aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-04-26 11:23:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-06-13 20:01:15 +0000
commitd409305fa3838fb39b38c26fc085fb729b8766d5 (patch)
treefd234b27775fb59a57266cf36a05ec916e79a85f /contrib/llvm-project/llvm/lib/Target/RISCV
parente8d8bef961a50d4dc22501cde4fb9fb0be1b2532 (diff)
parentb4125f7d51da2bb55d3b850dba9a69c201c3422c (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/RISCV')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp37
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp11
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp118
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td23
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td67
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td116
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td239
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td6
11 files changed, 252 insertions, 382 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index e7e590153605..dcf7525d7458 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2126,7 +2126,7 @@ bool RISCVAsmParser::parseDirectiveAttribute() {
if (getFeatureBits(RISCV::FeatureStdExtB))
formalArchStr = (Twine(formalArchStr) + "_b0p93").str();
if (getFeatureBits(RISCV::FeatureStdExtV))
- formalArchStr = (Twine(formalArchStr) + "_v1p0").str();
+ formalArchStr = (Twine(formalArchStr) + "_v0p10").str();
if (getFeatureBits(RISCV::FeatureExtZfh))
formalArchStr = (Twine(formalArchStr) + "_zfh0p1").str();
if (getFeatureBits(RISCV::FeatureExtZba))
@@ -2152,9 +2152,9 @@ bool RISCVAsmParser::parseDirectiveAttribute() {
if (getFeatureBits(RISCV::FeatureExtZbt))
formalArchStr = (Twine(formalArchStr) + "_zbt0p93").str();
if (getFeatureBits(RISCV::FeatureExtZvamo))
- formalArchStr = (Twine(formalArchStr) + "_zvamo1p0").str();
+ formalArchStr = (Twine(formalArchStr) + "_zvamo0p10").str();
if (getFeatureBits(RISCV::FeatureStdExtZvlsseg))
- formalArchStr = (Twine(formalArchStr) + "_zvlsseg1p0").str();
+ formalArchStr = (Twine(formalArchStr) + "_zvlsseg0p10").str();
getTargetStreamer().emitTextAttribute(Tag, formalArchStr);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
index 72434a15bedb..13c4b84aa300 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -63,7 +63,7 @@ void RISCVTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
if (STI.hasFeature(RISCV::FeatureStdExtB))
Arch += "_b0p93";
if (STI.hasFeature(RISCV::FeatureStdExtV))
- Arch += "_v1p0";
+ Arch += "_v0p10";
if (STI.hasFeature(RISCV::FeatureExtZfh))
Arch += "_zfh0p1";
if (STI.hasFeature(RISCV::FeatureExtZba))
@@ -89,9 +89,9 @@ void RISCVTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
if (STI.hasFeature(RISCV::FeatureExtZbt))
Arch += "_zbt0p93";
if (STI.hasFeature(RISCV::FeatureExtZvamo))
- Arch += "_zvamo1p0";
+ Arch += "_zvamo0p10";
if (STI.hasFeature(RISCV::FeatureStdExtZvlsseg))
- Arch += "_zvlsseg1p0";
+ Arch += "_zvlsseg0p10";
emitTextAttribute(RISCVAttrs::ARCH, Arch);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
index 6a12f99b8903..ae32cbd1ae59 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
@@ -59,7 +59,8 @@ bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
for (auto MII = MBB.begin(), MIE = MBB.end(); MII != MIE;) {
MachineInstr &MI = *MII++;
- if (MI.getOpcode() != RISCV::PseudoVSETVLI) {
+ if (MI.getOpcode() != RISCV::PseudoVSETVLI &&
+ MI.getOpcode() != RISCV::PseudoVSETIVLI) {
if (PrevVSETVLI &&
(MI.isCall() || MI.modifiesRegister(RISCV::VL) ||
MI.modifiesRegister(RISCV::VTYPE))) {
@@ -69,26 +70,48 @@ bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
continue;
}
- // If we don't have a previous VSETVLI or the VL output isn't dead, we
+ // If we don't have a previous VSET{I}VLI or the VL output isn't dead, we
// can't remove this VSETVLI.
if (!PrevVSETVLI || !MI.getOperand(0).isDead()) {
PrevVSETVLI = &MI;
continue;
}
- Register PrevAVLReg = PrevVSETVLI->getOperand(1).getReg();
- Register AVLReg = MI.getOperand(1).getReg();
+ // If a previous "set vl" instruction opcode is different from this one, we
+ // can't differentiate the AVL values.
+ if (PrevVSETVLI->getOpcode() != MI.getOpcode()) {
+ PrevVSETVLI = &MI;
+ continue;
+ }
+
+ // The remaining two cases are
+ // 1. PrevVSETVLI = PseudoVSETVLI
+ // MI = PseudoVSETVLI
+ //
+ // 2. PrevVSETVLI = PseudoVSETIVLI
+ // MI = PseudoVSETIVLI
+ Register AVLReg;
+ bool SameAVL = false;
+ if (MI.getOpcode() == RISCV::PseudoVSETVLI) {
+ AVLReg = MI.getOperand(1).getReg();
+ SameAVL = PrevVSETVLI->getOperand(1).getReg() == AVLReg;
+ } else { // RISCV::PseudoVSETIVLI
+ SameAVL =
+ PrevVSETVLI->getOperand(1).getImm() == MI.getOperand(1).getImm();
+ }
int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm();
int64_t VTYPEImm = MI.getOperand(2).getImm();
- // Does this VSETVLI use the same AVL register and VTYPE immediate?
- if (PrevAVLReg != AVLReg || PrevVTYPEImm != VTYPEImm) {
+ // Does this VSET{I}VLI use the same AVL register/value and VTYPE immediate?
+ if (!SameAVL || PrevVTYPEImm != VTYPEImm) {
PrevVSETVLI = &MI;
continue;
}
// If the AVLReg is X0 we need to look at the output VL of both VSETVLIs.
- if (AVLReg == RISCV::X0) {
+ if ((MI.getOpcode() == RISCV::PseudoVSETVLI) && (AVLReg == RISCV::X0)) {
+ assert((PrevVSETVLI->getOpcode() == RISCV::PseudoVSETVLI) &&
+ "Unexpected vsetvli opcode.");
Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg();
Register OutVL = MI.getOperand(0).getReg();
// We can't remove if the previous VSETVLI left VL unchanged and the
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 5f50892ca886..ec9a39569952 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -103,6 +103,7 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
case RISCV::PseudoLA_TLS_GD:
return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
case RISCV::PseudoVSETVLI:
+ case RISCV::PseudoVSETIVLI:
return expandVSetVL(MBB, MBBI);
case RISCV::PseudoVMCLR_M_B1:
case RISCV::PseudoVMCLR_M_B2:
@@ -217,9 +218,15 @@ bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock &MBB,
DebugLoc DL = MBBI->getDebugLoc();
- assert(MBBI->getOpcode() == RISCV::PseudoVSETVLI &&
+ assert((MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
+ MBBI->getOpcode() == RISCV::PseudoVSETIVLI) &&
"Unexpected pseudo instruction");
- const MCInstrDesc &Desc = TII->get(RISCV::VSETVLI);
+ unsigned Opcode;
+ if (MBBI->getOpcode() == RISCV::PseudoVSETVLI)
+ Opcode = RISCV::VSETVLI;
+ else
+ Opcode = RISCV::VSETIVLI;
+ const MCInstrDesc &Desc = TII->get(Opcode);
assert(Desc.getNumOperands() == 3 && "Unexpected instruction format");
Register DstReg = MBBI->getOperand(0).getReg();
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7b0f38671f06..43bf16c53a62 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -569,12 +569,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue VLOperand = Node->getOperand(2);
if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
- if (C->isNullValue()) {
- VLOperand = SDValue(
- CurDAG->getMachineNode(RISCV::ADDI, DL, XLenVT,
- CurDAG->getRegister(RISCV::X0, XLenVT),
- CurDAG->getTargetConstant(0, DL, XLenVT)),
- 0);
+ uint64_t AVL = C->getZExtValue();
+ if (isUInt<5>(AVL)) {
+ SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
+ ReplaceNode(Node,
+ CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
+ MVT::Other, VLImm, VTypeIOp,
+ /* Chain */ Node->getOperand(0)));
+ return;
}
}
@@ -824,93 +826,6 @@ bool RISCVDAGToDAGISel::MatchSRLIW(SDNode *N) const {
return (Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff;
}
-// Check that it is a SLOI (Shift Left Ones Immediate). A PatFrag has already
-// determined it has the right structure:
-//
-// (OR (SHL RS1, VC2), VC1)
-//
-// Check that VC1, the mask used to fill with ones, is compatible
-// with VC2, the shamt:
-//
-// VC1 == maskTrailingOnes(VC2)
-//
-bool RISCVDAGToDAGISel::MatchSLOI(SDNode *N) const {
- assert(N->getOpcode() == ISD::OR);
- assert(N->getOperand(0).getOpcode() == ISD::SHL);
- assert(isa<ConstantSDNode>(N->getOperand(1)));
- assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
-
- SDValue Shl = N->getOperand(0);
- if (Subtarget->is64Bit()) {
- uint64_t VC1 = N->getConstantOperandVal(1);
- uint64_t VC2 = Shl.getConstantOperandVal(1);
- return VC1 == maskTrailingOnes<uint64_t>(VC2);
- }
-
- uint32_t VC1 = N->getConstantOperandVal(1);
- uint32_t VC2 = Shl.getConstantOperandVal(1);
- return VC1 == maskTrailingOnes<uint32_t>(VC2);
-}
-
-// Check that it is a SROI (Shift Right Ones Immediate). A PatFrag has already
-// determined it has the right structure:
-//
-// (OR (SRL RS1, VC2), VC1)
-//
-// Check that VC1, the mask used to fill with ones, is compatible
-// with VC2, the shamt:
-//
-// VC1 == maskLeadingOnes(VC2)
-//
-bool RISCVDAGToDAGISel::MatchSROI(SDNode *N) const {
- assert(N->getOpcode() == ISD::OR);
- assert(N->getOperand(0).getOpcode() == ISD::SRL);
- assert(isa<ConstantSDNode>(N->getOperand(1)));
- assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
-
- SDValue Srl = N->getOperand(0);
- if (Subtarget->is64Bit()) {
- uint64_t VC1 = N->getConstantOperandVal(1);
- uint64_t VC2 = Srl.getConstantOperandVal(1);
- return VC1 == maskLeadingOnes<uint64_t>(VC2);
- }
-
- uint32_t VC1 = N->getConstantOperandVal(1);
- uint32_t VC2 = Srl.getConstantOperandVal(1);
- return VC1 == maskLeadingOnes<uint32_t>(VC2);
-}
-
-// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). A PatFrag
-// has already determined it has the right structure:
-//
-// (OR (SRL RS1, VC2), VC1)
-//
-// and then we check that VC1, the mask used to fill with ones, is compatible
-// with VC2, the shamt:
-//
-// VC2 < 32
-// VC1 == maskTrailingZeros<uint64_t>(32 - VC2)
-//
-bool RISCVDAGToDAGISel::MatchSROIW(SDNode *N) const {
- assert(N->getOpcode() == ISD::OR);
- assert(N->getOperand(0).getOpcode() == ISD::SRL);
- assert(isa<ConstantSDNode>(N->getOperand(1)));
- assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
-
- // The IsRV64 predicate is checked after PatFrag predicates so we can get
- // here even on RV32.
- if (!Subtarget->is64Bit())
- return false;
-
- SDValue Srl = N->getOperand(0);
- uint64_t VC1 = N->getConstantOperandVal(1);
- uint64_t VC2 = Srl.getConstantOperandVal(1);
-
- // Immediate range should be enforced by uimm5 predicate.
- assert(VC2 < 32 && "Unexpected immediate");
- return VC1 == maskTrailingZeros<uint64_t>(32 - VC2);
-}
-
// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
// on RV64).
// SLLIUW is the same as SLLI except for the fact that it clears the bits
@@ -946,6 +861,23 @@ bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
return (VC1 >> VC2) == UINT64_C(0xFFFFFFFF);
}
+// X0 has special meaning for vsetvl/vsetvli.
+// rd | rs1 | AVL value | Effect on vl
+//--------------------------------------------------------------
+// !X0 | X0 | VLMAX | Set vl to VLMAX
+// X0 | X0 | Value in vl | Keep current vl, just change vtype.
+bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
+ // If the VL value is a constant 0, manually select it to an ADDI with 0
+ // immediate to prevent the default selection path from matching it to X0.
+ auto *C = dyn_cast<ConstantSDNode>(N);
+ if (C && C->isNullValue())
+ VL = SDValue(selectImm(CurDAG, SDLoc(N), 0, Subtarget->getXLenVT()), 0);
+ else
+ VL = N;
+
+ return true;
+}
+
bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
if (N.getOpcode() != ISD::SPLAT_VECTOR &&
N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64)
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 23601c3b8f06..6099586d049d 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -46,11 +46,10 @@ public:
bool SelectAddrFI(SDValue Addr, SDValue &Base);
bool MatchSRLIW(SDNode *N) const;
- bool MatchSLOI(SDNode *N) const;
- bool MatchSROI(SDNode *N) const;
- bool MatchSROIW(SDNode *N) const;
bool MatchSLLIUW(SDNode *N) const;
+ bool selectVLOp(SDValue N, SDValue &VL);
+
bool selectVSplat(SDValue N, SDValue &SplatVal);
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
index 147993127e78..80f46b73bfd7 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
@@ -38,9 +38,11 @@ class RISCVLSUMOP<bits<5> val> {
bits<5> Value = val;
}
def LUMOPUnitStride : RISCVLSUMOP<0b00000>;
+def LUMOPUnitStrideMask : RISCVLSUMOP<0b01011>;
def LUMOPUnitStrideWholeReg : RISCVLSUMOP<0b01000>;
def LUMOPUnitStrideFF: RISCVLSUMOP<0b10000>;
def SUMOPUnitStride : RISCVLSUMOP<0b00000>;
+def SUMOPUnitStrideMask : RISCVLSUMOP<0b01011>;
def SUMOPUnitStrideWholeReg : RISCVLSUMOP<0b01000>;
class RISCVAMOOP<bits<5> val> {
@@ -63,10 +65,23 @@ def LSWidth8 : RISCVWidth<0b0000>;
def LSWidth16 : RISCVWidth<0b0101>;
def LSWidth32 : RISCVWidth<0b0110>;
def LSWidth64 : RISCVWidth<0b0111>;
-def LSWidth128 : RISCVWidth<0b1000>;
-def LSWidth256 : RISCVWidth<0b1101>;
-def LSWidth512 : RISCVWidth<0b1110>;
-def LSWidth1024 : RISCVWidth<0b1111>;
+
+class RVInstSetiVLi<dag outs, dag ins, string opcodestr, string argstr>
+ : RVInst<outs, ins, opcodestr, argstr, [], InstFormatI> {
+ bits<5> uimm;
+ bits<5> rd;
+ bits<10> vtypei;
+
+ let Inst{31} = 1;
+ let Inst{30} = 1;
+ let Inst{29-20} = vtypei{9-0};
+ let Inst{19-15} = uimm;
+ let Inst{14-12} = 0b111;
+ let Inst{11-7} = rd;
+ let Opcode = OPC_OP_V.Value;
+
+ let Defs = [VTYPE, VL];
+}
class RVInstSetVLi<dag outs, dag ins, string opcodestr, string argstr>
: RVInst<outs, ins, opcodestr, argstr, [], InstFormatI> {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 1bc288b5177c..7888ac7bac8e 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -45,25 +45,6 @@ def shfl_uimm : Operand<XLenVT>, ImmLeaf<XLenVT, [{
}];
}
-
-// Check that it is a SLOI (Shift Left Ones Immediate).
-def SLOIPat : PatFrag<(ops node:$A, node:$B),
- (or (shl node:$A, node:$B), imm), [{
- return MatchSLOI(N);
-}]>;
-
-// Check that it is a SROI (Shift Right Ones Immediate).
-def SROIPat : PatFrag<(ops node:$A, node:$B),
- (or (srl node:$A, node:$B), imm), [{
- return MatchSROI(N);
-}]>;
-
-// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
-def SROIWPat : PatFrag<(ops node:$A, node:$B),
- (or (srl node:$A, node:$B), imm), [{
- return MatchSROIW(N);
-}]>;
-
// Checks if this mask has a single 0 bit and cannot be used with ANDI.
def BCLRMask : ImmLeaf<XLenVT, [{
if (Subtarget->is64Bit())
@@ -210,11 +191,6 @@ def SH2ADD : ALU_rr<0b0010000, 0b100, "sh2add">, Sched<[]>;
def SH3ADD : ALU_rr<0b0010000, 0b110, "sh3add">, Sched<[]>;
} // Predicates = [HasStdExtZba]
-let Predicates = [HasStdExtZbp] in {
-def SLO : ALU_rr<0b0010000, 0b001, "slo">, Sched<[]>;
-def SRO : ALU_rr<0b0010000, 0b101, "sro">, Sched<[]>;
-} // Predicates = [HasStdExtZbp]
-
let Predicates = [HasStdExtZbbOrZbp] in {
def ROL : ALU_rr<0b0110000, 0b001, "rol">, Sched<[]>;
def ROR : ALU_rr<0b0110000, 0b101, "ror">, Sched<[]>;
@@ -238,11 +214,6 @@ def XPERMB : ALU_rr<0b0010100, 0b100, "xperm.b">, Sched<[]>;
def XPERMH : ALU_rr<0b0010100, 0b110, "xperm.h">, Sched<[]>;
} // Predicates = [HasStdExtZbp]
-let Predicates = [HasStdExtZbp] in {
-def SLOI : RVBShift_ri<0b00100, 0b001, OPC_OP_IMM, "sloi">, Sched<[]>;
-def SROI : RVBShift_ri<0b00100, 0b101, OPC_OP_IMM, "sroi">, Sched<[]>;
-} // Predicates = [HasStdExtZbp]
-
let Predicates = [HasStdExtZbbOrZbp] in
def RORI : RVBShift_ri<0b01100, 0b101, OPC_OP_IMM, "rori">, Sched<[]>;
@@ -369,11 +340,6 @@ def SH2ADDUW : ALUW_rr<0b0010000, 0b100, "sh2add.uw">, Sched<[]>;
def SH3ADDUW : ALUW_rr<0b0010000, 0b110, "sh3add.uw">, Sched<[]>;
} // Predicates = [HasStdExtZbb, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def SLOW : ALUW_rr<0b0010000, 0b001, "slow">, Sched<[]>;
-def SROW : ALUW_rr<0b0010000, 0b101, "srow">, Sched<[]>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
def ROLW : ALUW_rr<0b0110000, 0b001, "rolw">, Sched<[]>;
def RORW : ALUW_rr<0b0110000, 0b101, "rorw">, Sched<[]>;
@@ -395,11 +361,6 @@ let Predicates = [HasStdExtZbp, IsRV64] in {
def XPERMW : ALU_rr<0b0010100, 0b000, "xperm.w">, Sched<[]>;
} // Predicates = [HasStdExtZbp, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def SLOIW : RVBShiftW_ri<0b0010000, 0b001, OPC_OP_IMM_32, "sloiw">, Sched<[]>;
-def SROIW : RVBShiftW_ri<0b0010000, 0b101, OPC_OP_IMM_32, "sroiw">, Sched<[]>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
def RORIW : RVBShiftW_ri<0b0110000, 0b101, OPC_OP_IMM_32, "roriw">, Sched<[]>;
@@ -673,13 +634,6 @@ def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>;
def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
} // Predicates = [HasStdExtZbbOrZbp]
-let Predicates = [HasStdExtZbp] in {
-def : Pat<(not (shiftop<shl> (not GPR:$rs1), GPR:$rs2)),
- (SLO GPR:$rs1, GPR:$rs2)>;
-def : Pat<(not (shiftop<srl> (not GPR:$rs1), GPR:$rs2)),
- (SRO GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp]
-
let Predicates = [HasStdExtZbbOrZbp] in {
def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>;
def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
@@ -710,13 +664,6 @@ def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1)),
(BEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
}
-let Predicates = [HasStdExtZbp] in {
-def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt),
- (SLOI GPR:$rs1, uimmlog2xlen:$shamt)>;
-def : Pat<(SROIPat GPR:$rs1, uimmlog2xlen:$shamt),
- (SROI GPR:$rs1, uimmlog2xlen:$shamt)>;
-} // Predicates = [HasStdExtZbp]
-
// There's no encoding for roli in the the 'B' extension as it can be
// implemented with rori by negating the immediate.
let Predicates = [HasStdExtZbbOrZbp] in {
@@ -936,13 +883,6 @@ def : Pat<(add (SLLIUWPat GPR:$rs1, (XLenVT 3)), GPR:$rs2),
(SH3ADDUW GPR:$rs1, GPR:$rs2)>;
} // Predicates = [HasStdExtZba, IsRV64]
-let Predicates = [HasStdExtZbp, IsRV64] in {
-def : Pat<(not (shiftopw<riscv_sllw> (not GPR:$rs1), GPR:$rs2)),
- (SLOW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(not (shiftopw<riscv_srlw> (not GPR:$rs1), GPR:$rs2)),
- (SROW GPR:$rs1, GPR:$rs2)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
def : Pat<(riscv_rolw GPR:$rs1, GPR:$rs2),
(ROLW GPR:$rs1, GPR:$rs2)>;
@@ -983,13 +923,6 @@ def : Pat<(xor (assertsexti32 GPR:$rs1), BSETINVWMask:$mask),
} // Predicates = [HasStdExtZbs, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in {
-def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32),
- (SLOIW GPR:$rs1, uimm5:$shamt)>;
-def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt),
- (SROIW GPR:$rs1, uimm5:$shamt)>;
-} // Predicates = [HasStdExtZbp, IsRV64]
-
-let Predicates = [HasStdExtZbp, IsRV64] in {
def : Pat<(riscv_rorw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>;
def : Pat<(riscv_rolw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>;
def : Pat<(riscv_greviw GPR:$rs1, timm:$shamt), (GREVIW GPR:$rs1, timm:$shamt)>;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 4f9e9cfbdb98..b3fc76aee161 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
///
/// This file describes the RISC-V instructions from the standard 'V' Vector
-/// extension, version 0.9.
+/// extension, version 0.10.
/// This version is still experimental as the 'V' extension hasn't been
/// ratified yet.
///
@@ -82,6 +82,12 @@ def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
+// load vd, (rs1)
+class VUnitStrideLoadMask<string opcodestr>
+ : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
+ (outs VR:$vd),
+ (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
+
// load vd, (rs1), vm
class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
string opcodestr>
@@ -138,6 +144,12 @@ class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
// store vd, vs3, (rs1), vm
+class VUnitStrideStoreMask<string opcodestr>
+ : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
+ (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
+ "$vs3, (${rs1})">;
+
+// store vd, vs3, (rs1), vm
class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
string opcodestr>
: RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
@@ -423,10 +435,6 @@ multiclass VWholeLoad<bits<3> nf, string opcodestr> {
def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v">;
def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v">;
def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v">;
- def E128_V : VWholeLoad<nf, LSWidth128, opcodestr # "e128.v">;
- def E256_V : VWholeLoad<nf, LSWidth256, opcodestr # "e256.v">;
- def E512_V : VWholeLoad<nf, LSWidth512, opcodestr # "e512.v">;
- def E1024_V : VWholeLoad<nf, LSWidth1024, opcodestr # "e1024.v">;
}
//===----------------------------------------------------------------------===//
@@ -438,6 +446,9 @@ let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
"vsetvli", "$rd, $rs1, $vtypei">;
+def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
+ "vsetivli", "$rd, $uimm, $vtypei">;
+
def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
"vsetvl", "$rd, $rs1, $rs2">;
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
@@ -447,47 +458,30 @@ def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">;
def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">;
def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">;
def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">;
-def VLE128_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth128, "vle128.v">;
-def VLE256_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth256, "vle256.v">;
-def VLE512_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth512, "vle512.v">;
-def VLE1024_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth1024, "vle1024.v">;
def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">;
def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">;
def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">;
def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">;
-def VLE128FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth128, "vle128ff.v">;
-def VLE256FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth256, "vle256ff.v">;
-def VLE512FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth512, "vle512ff.v">;
-def VLE1024FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth1024, "vle1024ff.v">;
+
+def VLE1_V : VUnitStrideLoadMask<"vle1.v">;
+def VSE1_V : VUnitStrideStoreMask<"vse1.v">;
def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">;
def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">;
def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">;
def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">;
-def VSE128_V : VUnitStrideStore<SUMOPUnitStride, LSWidth128, "vse128.v">;
-def VSE256_V : VUnitStrideStore<SUMOPUnitStride, LSWidth256, "vse256.v">;
-def VSE512_V : VUnitStrideStore<SUMOPUnitStride, LSWidth512, "vse512.v">;
-def VSE1024_V : VUnitStrideStore<SUMOPUnitStride, LSWidth1024, "vse1024.v">;
// Vector Strided Instructions
def VLSE8_V : VStridedLoad<LSWidth8, "vlse8.v">;
def VLSE16_V : VStridedLoad<LSWidth16, "vlse16.v">;
def VLSE32_V : VStridedLoad<LSWidth32, "vlse32.v">;
def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">;
-def VLSE128_V : VStridedLoad<LSWidth128, "vlse128.v">;
-def VLSE256_V : VStridedLoad<LSWidth256, "vlse256.v">;
-def VLSE512_V : VStridedLoad<LSWidth512, "vlse512.v">;
-def VLSE1024_V : VStridedLoad<LSWidth1024, "vlse1024.v">;
def VSSE8_V : VStridedStore<LSWidth8, "vsse8.v">;
def VSSE16_V : VStridedStore<LSWidth16, "vsse16.v">;
def VSSE32_V : VStridedStore<LSWidth32, "vsse32.v">;
def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">;
-def VSSE128_V : VStridedStore<LSWidth128, "vsse128.v">;
-def VSSE256_V : VStridedStore<LSWidth256, "vsse256.v">;
-def VSSE512_V : VStridedStore<LSWidth512, "vsse512.v">;
-def VSSE1024_V : VStridedStore<LSWidth1024, "vsse1024.v">;
// Vector Indexed Instructions
def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
@@ -510,19 +504,19 @@ def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
-defm VL1R : VWholeLoad<1, "vl1r">;
-defm VL2R : VWholeLoad<2, "vl2r">;
-defm VL4R : VWholeLoad<4, "vl4r">;
-defm VL8R : VWholeLoad<8, "vl8r">;
+defm VL1R : VWholeLoad<0, "vl1r">;
+defm VL2R : VWholeLoad<1, "vl2r">;
+defm VL4R : VWholeLoad<3, "vl4r">;
+defm VL8R : VWholeLoad<7, "vl8r">;
def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>;
def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>;
def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>;
-def VS1R_V : VWholeStore<1, "vs1r.v">;
-def VS2R_V : VWholeStore<2, "vs2r.v">;
-def VS4R_V : VWholeStore<4, "vs4r.v">;
-def VS8R_V : VWholeStore<8, "vs8r.v">;
+def VS1R_V : VWholeStore<0, "vs1r.v">;
+def VS2R_V : VWholeStore<1, "vs2r.v">;
+def VS4R_V : VWholeStore<3, "vs4r.v">;
+def VS8R_V : VWholeStore<7, "vs8r.v">;
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
@@ -806,8 +800,8 @@ defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
// Vector Floating-Point Square-Root Instruction
defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
-defm VFRSQRTE7_V : VALU_FV_VS2<"vfrsqrte7.v", 0b010011, 0b00100>;
-defm VFRECE7_V : VALU_FV_VS2<"vfrece7.v", 0b010011, 0b00101>;
+defm VFRSQRT7_V : VALU_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
+defm VFREC7_V : VALU_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
// Vector Floating-Point MIN/MAX Instructions
defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
@@ -1058,47 +1052,27 @@ let Predicates = [HasStdExtZvlsseg] in {
def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
- def VLSEG#nf#E128_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth128, "vlseg"#nf#"e128.v">;
- def VLSEG#nf#E256_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth256, "vlseg"#nf#"e256.v">;
- def VLSEG#nf#E512_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth512, "vlseg"#nf#"e512.v">;
- def VLSEG#nf#E1024_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth1024, "vlseg"#nf#"e1024.v">;
def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
- def VLSEG#nf#E128FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth128, "vlseg"#nf#"e128ff.v">;
- def VLSEG#nf#E256FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth256, "vlseg"#nf#"e256ff.v">;
- def VLSEG#nf#E512FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth512, "vlseg"#nf#"e512ff.v">;
- def VLSEG#nf#E1024FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth1024, "vlseg"#nf#"e1024ff.v">;
def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
def VSSEG#nf#E32_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth32, "vsseg"#nf#"e32.v">;
def VSSEG#nf#E64_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
- def VSSEG#nf#E128_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth128, "vsseg"#nf#"e128.v">;
- def VSSEG#nf#E256_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth256, "vsseg"#nf#"e256.v">;
- def VSSEG#nf#E512_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth512, "vsseg"#nf#"e512.v">;
- def VSSEG#nf#E1024_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth1024, "vsseg"#nf#"e1024.v">;
// Vector Strided Instructions
def VLSSEG#nf#E8_V : VStridedSegmentLoad<!add(nf, -1), LSWidth8, "vlsseg"#nf#"e8.v">;
def VLSSEG#nf#E16_V : VStridedSegmentLoad<!add(nf, -1), LSWidth16, "vlsseg"#nf#"e16.v">;
def VLSSEG#nf#E32_V : VStridedSegmentLoad<!add(nf, -1), LSWidth32, "vlsseg"#nf#"e32.v">;
def VLSSEG#nf#E64_V : VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
- def VLSSEG#nf#E128_V : VStridedSegmentLoad<!add(nf, -1), LSWidth128, "vlsseg"#nf#"e128.v">;
- def VLSSEG#nf#E256_V : VStridedSegmentLoad<!add(nf, -1), LSWidth256, "vlsseg"#nf#"e256.v">;
- def VLSSEG#nf#E512_V : VStridedSegmentLoad<!add(nf, -1), LSWidth512, "vlsseg"#nf#"e512.v">;
- def VLSSEG#nf#E1024_V : VStridedSegmentLoad<!add(nf, -1), LSWidth1024, "vlsseg"#nf#"e1024.v">;
def VSSSEG#nf#E8_V : VStridedSegmentStore<!add(nf, -1), LSWidth8, "vssseg"#nf#"e8.v">;
def VSSSEG#nf#E16_V : VStridedSegmentStore<!add(nf, -1), LSWidth16, "vssseg"#nf#"e16.v">;
def VSSSEG#nf#E32_V : VStridedSegmentStore<!add(nf, -1), LSWidth32, "vssseg"#nf#"e32.v">;
def VSSSEG#nf#E64_V : VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
- def VSSSEG#nf#E128_V : VStridedSegmentStore<!add(nf, -1), LSWidth128, "vssseg"#nf#"e128.v">;
- def VSSSEG#nf#E256_V : VStridedSegmentStore<!add(nf, -1), LSWidth256, "vssseg"#nf#"e256.v">;
- def VSSSEG#nf#E512_V : VStridedSegmentStore<!add(nf, -1), LSWidth512, "vssseg"#nf#"e512.v">;
- def VSSSEG#nf#E1024_V : VStridedSegmentStore<!add(nf, -1), LSWidth1024, "vssseg"#nf#"e1024.v">;
// Vector Indexed Instructions
def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
@@ -1109,14 +1083,6 @@ let Predicates = [HasStdExtZvlsseg] in {
LSWidth32, "vluxseg"#nf#"ei32.v">;
def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
LSWidth64, "vluxseg"#nf#"ei64.v">;
- def VLUXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
- LSWidth128, "vluxseg"#nf#"ei128.v">;
- def VLUXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
- LSWidth256, "vluxseg"#nf#"ei256.v">;
- def VLUXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
- LSWidth512, "vluxseg"#nf#"ei512.v">;
- def VLUXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
- LSWidth1024, "vluxseg"#nf#"ei1024.v">;
def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
LSWidth8, "vloxseg"#nf#"ei8.v">;
@@ -1126,14 +1092,6 @@ let Predicates = [HasStdExtZvlsseg] in {
LSWidth32, "vloxseg"#nf#"ei32.v">;
def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
LSWidth64, "vloxseg"#nf#"ei64.v">;
- def VLOXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
- LSWidth128, "vloxseg"#nf#"ei128.v">;
- def VLOXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
- LSWidth256, "vloxseg"#nf#"ei256.v">;
- def VLOXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
- LSWidth512, "vloxseg"#nf#"ei512.v">;
- def VLOXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
- LSWidth1024, "vloxseg"#nf#"ei1024.v">;
def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
LSWidth8, "vsuxseg"#nf#"ei8.v">;
@@ -1143,14 +1101,6 @@ let Predicates = [HasStdExtZvlsseg] in {
LSWidth32, "vsuxseg"#nf#"ei32.v">;
def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
LSWidth64, "vsuxseg"#nf#"ei64.v">;
- def VSUXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
- LSWidth128, "vsuxseg"#nf#"ei128.v">;
- def VSUXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
- LSWidth256, "vsuxseg"#nf#"ei256.v">;
- def VSUXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
- LSWidth512, "vsuxseg"#nf#"ei512.v">;
- def VSUXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
- LSWidth1024, "vsuxseg"#nf#"ei1024.v">;
def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
LSWidth8, "vsoxseg"#nf#"ei8.v">;
@@ -1160,14 +1110,6 @@ let Predicates = [HasStdExtZvlsseg] in {
LSWidth32, "vsoxseg"#nf#"ei32.v">;
def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
LSWidth64, "vsoxseg"#nf#"ei64.v">;
- def VSOXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
- LSWidth128, "vsoxseg"#nf#"ei128.v">;
- def VSOXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
- LSWidth256, "vsoxseg"#nf#"ei256.v">;
- def VSOXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
- LSWidth512, "vsoxseg"#nf#"ei512.v">;
- def VSOXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
- LSWidth1024, "vsoxseg"#nf#"ei1024.v">;
}
} // Predicates = [HasStdExtZvlsseg]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 06e4d053d5d7..60bd1b24cab8 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
///
/// This file contains the required infrastructure to support code generation
-/// for the standard 'V' (Vector) extension, version 0.9. This version is still
+/// for the standard 'V' (Vector) extension, version 0.10. This version is still
/// experimental as the 'V' extension hasn't been ratified yet.
///
/// This file is included from RISCVInstrInfoV.td
@@ -42,17 +42,7 @@ def riscv_read_vl : SDNode<"RISCVISD::READ_VL",
//--------------------------------------------------------------
// !X0 | X0 | VLMAX | Set vl to VLMAX
// X0 | X0 | Value in vl | Keep current vl, just change vtype.
-def NoX0 : SDNodeXForm<undef,
-[{
- auto *C = dyn_cast<ConstantSDNode>(N);
- if (C && C->isNullValue()) {
- SDLoc DL(N);
- return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, Subtarget->getXLenVT(),
- CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()),
- CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT())), 0);
- }
- return SDValue(N, 0);
-}]>;
+def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
def DecImm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
@@ -1228,6 +1218,14 @@ multiclass VPseudoUSLoad {
}
}
+multiclass VPseudoLoadMask {
+ foreach mti = AllMasks in {
+ let VLMul = mti.LMul.value in {
+ def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR>;
+ }
+ }
+}
+
multiclass VPseudoSLoad {
foreach lmul = MxList.m in {
defvar LInfo = lmul.MX;
@@ -1264,6 +1262,14 @@ multiclass VPseudoUSStore {
}
}
+multiclass VPseudoStoreMask {
+ foreach mti = AllMasks in {
+ let VLMul = mti.LMul.value in {
+ def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR>;
+ }
+ }
+}
+
multiclass VPseudoSStore {
foreach lmul = MxList.m in {
defvar LInfo = lmul.MX;
@@ -1951,10 +1957,10 @@ class VPatUnaryNoMask<string intrinsic_name,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op2_type op2_reg_class:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op2_type op2_reg_class:$rs2),
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatUnaryMask<string intrinsic_name,
string inst,
@@ -1970,21 +1976,21 @@ class VPatUnaryMask<string intrinsic_name,
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
- (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ (mask_type V0), GPR:$vl, sew)>;
class VPatMaskUnaryNoMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
(mti.Mask VR:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_M_"#mti.BX)
(mti.Mask VR:$rs2),
- (NoX0 GPR:$vl), mti.SEW)>;
+ GPR:$vl, mti.SEW)>;
class VPatMaskUnaryMask<string intrinsic_name,
string inst,
@@ -1993,11 +1999,11 @@ class VPatMaskUnaryMask<string intrinsic_name,
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
- (mti.Mask V0), (NoX0 GPR:$vl), mti.SEW)>;
+ (mti.Mask V0), GPR:$vl, mti.SEW)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
@@ -2013,12 +2019,12 @@ class VPatUnaryAnyMask<string intrinsic,
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatBinaryNoMask<string intrinsic_name,
string inst,
@@ -2031,11 +2037,11 @@ class VPatBinaryNoMask<string intrinsic_name,
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatBinaryMask<string intrinsic_name,
string inst,
@@ -2052,12 +2058,12 @@ class VPatBinaryMask<string intrinsic_name,
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ (mask_type V0), GPR:$vl, sew)>;
class VPatTernaryNoMask<string intrinsic,
string inst,
@@ -2075,12 +2081,12 @@ class VPatTernaryNoMask<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatTernaryMask<string intrinsic,
string inst,
@@ -2099,13 +2105,13 @@ class VPatTernaryMask<string intrinsic,
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
(mask_type V0),
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatAMOWDNoMask<string intrinsic_name,
string inst,
@@ -2119,10 +2125,10 @@ class VPatAMOWDNoMask<string intrinsic_name,
GPR:$rs1,
(op1_type op1_reg_class:$vs2),
(result_type vlmul.vrclass:$vd),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX)
$rs1, $vs2, $vd,
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
class VPatAMOWDMask<string intrinsic_name,
string inst,
@@ -2138,10 +2144,10 @@ class VPatAMOWDMask<string intrinsic_name,
(op1_type op1_reg_class:$vs2),
(result_type vlmul.vrclass:$vd),
(mask_type V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK")
$rs1, $vs2, $vd,
- (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ (mask_type V0), GPR:$vl, sew)>;
multiclass VPatUSLoad<string intrinsic,
string inst,
@@ -2153,14 +2159,14 @@ multiclass VPatUSLoad<string intrinsic,
{
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
- def : Pat<(type (Intr GPR:$rs1, GPR:$vl)),
- (Pseudo $rs1, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(type (Intr GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
+ (Pseudo $rs1, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
- GPR:$rs1, (mask_type V0), GPR:$vl)),
+ GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
(PseudoMask $merge,
- $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ $rs1, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatUSLoadFF<string inst,
@@ -2171,13 +2177,13 @@ multiclass VPatUSLoadFF<string inst,
VReg reg_class>
{
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
- def : Pat<(type (riscv_vleff GPR:$rs1, GPR:$vl)),
- (Pseudo $rs1, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(type (riscv_vleff GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
+ (Pseudo $rs1, GPR:$vl, sew)>;
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
def : Pat<(type (riscv_vleff_mask (type GetVRegNoV0<reg_class>.R:$merge),
- GPR:$rs1, (mask_type V0), GPR:$vl)),
+ GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
(PseudoMask $merge,
- $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ $rs1, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatSLoad<string intrinsic,
@@ -2190,14 +2196,14 @@ multiclass VPatSLoad<string intrinsic,
{
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
- def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, GPR:$vl)),
- (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
+ (Pseudo $rs1, $rs2, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
- GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl)),
+ GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
(PseudoMask $merge,
- $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatILoad<string intrinsic,
@@ -2213,16 +2219,16 @@ multiclass VPatILoad<string intrinsic,
{
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
- def : Pat<(type (Intr GPR:$rs1, (idx_type idx_reg_class:$rs2), GPR:$vl)),
- (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(type (Intr GPR:$rs1, (idx_type idx_reg_class:$rs2), (XLenVT (VLOp GPR:$vl)))),
+ (Pseudo $rs1, $rs2, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
GPR:$rs1, (idx_type idx_reg_class:$rs2),
- (mask_type V0), GPR:$vl)),
+ (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
(PseudoMask $merge,
- $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatUSStore<string intrinsic,
@@ -2235,12 +2241,12 @@ multiclass VPatUSStore<string intrinsic,
{
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
- def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$vl),
- (Pseudo $rs3, $rs1, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))),
+ (Pseudo $rs3, $rs1, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
- def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, (mask_type V0), GPR:$vl),
- (PseudoMask $rs3, $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl))),
+ (PseudoMask $rs3, $rs1, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatSStore<string intrinsic,
@@ -2253,12 +2259,12 @@ multiclass VPatSStore<string intrinsic,
{
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
- def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, GPR:$vl),
- (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+ def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl))),
+ (Pseudo $rs3, $rs1, $rs2, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
- def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl),
- (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl))),
+ (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatIStore<string intrinsic,
@@ -2275,13 +2281,13 @@ multiclass VPatIStore<string intrinsic,
defvar Intr = !cast<Intrinsic>(intrinsic);
defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1,
- (idx_type idx_reg_class:$rs2), GPR:$vl),
- (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+ (idx_type idx_reg_class:$rs2), (XLenVT (VLOp GPR:$vl))),
+ (Pseudo $rs3, $rs1, $rs2, GPR:$vl, sew)>;
defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1,
- (idx_type idx_reg_class:$rs2), (mask_type V0), GPR:$vl),
- (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ (idx_type idx_reg_class:$rs2), (mask_type V0), (XLenVT (VLOp GPR:$vl))),
+ (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatUnaryS_M<string intrinsic_name,
@@ -2289,13 +2295,13 @@ multiclass VPatUnaryS_M<string intrinsic_name,
{
foreach mti = AllMasks in {
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
- (mti.Mask VR:$rs1), GPR:$vl)),
+ (mti.Mask VR:$rs1), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
- (NoX0 GPR:$vl), mti.SEW)>;
+ GPR:$vl, mti.SEW)>;
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
- (mti.Mask VR:$rs1), (mti.Mask V0), GPR:$vl)),
+ (mti.Mask VR:$rs1), (mti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
- (mti.Mask V0), (NoX0 GPR:$vl), mti.SEW)>;
+ (mti.Mask V0), GPR:$vl, mti.SEW)>;
}
}
@@ -2360,24 +2366,24 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
{
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
- (NoX0 GPR:$vl), vti.SEW)>;
+ GPR:$vl, vti.SEW)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
(vti.Vector vti.RegClass:$merge),
- (vti.Mask V0), (XLenVT GPR:$vl))),
+ (vti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
vti.RegClass:$merge, (vti.Mask V0),
- (NoX0 GPR:$vl), vti.SEW)>;
+ GPR:$vl, vti.SEW)>;
}
}
multiclass VPatNullaryM<string intrinsic, string inst> {
foreach mti = AllMasks in
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_M_"#mti.BX)
- (NoX0 GPR:$vl), mti.SEW)>;
+ GPR:$vl, mti.SEW)>;
}
multiclass VPatBinary<string intrinsic,
@@ -2414,11 +2420,11 @@ multiclass VPatBinaryCarryIn<string intrinsic,
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (mask_type V0), (NoX0 GPR:$vl), sew)>;
+ (mask_type V0), GPR:$vl, sew)>;
}
multiclass VPatBinaryMaskOut<string intrinsic,
@@ -2435,11 +2441,11 @@ multiclass VPatBinaryMaskOut<string intrinsic,
def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (XLenVT GPR:$vl))),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- (NoX0 GPR:$vl), sew)>;
+ GPR:$vl, sew)>;
}
multiclass VPatConversion<string intrinsic,
@@ -3125,7 +3131,7 @@ def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins),
// Pseudos.
let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>;
-
+def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp:$vtypei), []>;
}
//===----------------------------------------------------------------------===//
@@ -3142,6 +3148,9 @@ foreach eew = EEWList in {
defm PseudoVSE # eew : VPseudoUSStore;
}
+defm PseudoVLE1 : VPseudoLoadMask;
+defm PseudoVSE1 : VPseudoStoreMask;
+
//===----------------------------------------------------------------------===//
// 7.5 Vector Strided Instructions
//===----------------------------------------------------------------------===//
@@ -3437,12 +3446,12 @@ defm PseudoVFSQRT : VPseudoUnaryV_V;
//===----------------------------------------------------------------------===//
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
//===----------------------------------------------------------------------===//
-defm PseudoVFRSQRTE7 : VPseudoUnaryV_V;
+defm PseudoVFRSQRT7 : VPseudoUnaryV_V;
//===----------------------------------------------------------------------===//
// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
//===----------------------------------------------------------------------===//
-defm PseudoVFRECE7 : VPseudoUnaryV_V;
+defm PseudoVFREC7 : VPseudoUnaryV_V;
//===----------------------------------------------------------------------===//
// 14.11. Vector Floating-Point Min/Max Instructions
@@ -3719,6 +3728,15 @@ foreach vti = AllVectors in
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
}
+foreach vti = AllMasks in {
+ defvar PseudoVLE1 = !cast<Instruction>("PseudoVLE1_V_"#vti.BX);
+ def : Pat<(vti.Mask (int_riscv_vle1 GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
+ (PseudoVLE1 $rs1, GPR:$vl, vti.SEW)>;
+ defvar PseudoVSE1 = !cast<Instruction>("PseudoVSE1_V_"#vti.BX);
+ def : Pat<(int_riscv_vse1 (vti.Mask VR:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))),
+ (PseudoVSE1 $rs3, $rs1, GPR:$vl, vti.SEW)>;
+}
+
//===----------------------------------------------------------------------===//
// 7.5 Vector Strided Instructions
//===----------------------------------------------------------------------===//
@@ -3886,62 +3904,63 @@ defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>
// instruction.
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+ (vti.Scalar simm5_plus1:$rs2), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask V0),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(vti.Mask VR:$merge),
- GPR:$vl)),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
VR:$merge,
vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
- def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+ def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar simm5_plus1:$rs2),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(vti.Mask VR:$merge),
- GPR:$vl)),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
VR:$merge,
vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
// Special cases to avoid matching vmsltu.vi 0 (always false) to
// vmsleu.vi -1 (always true). Instead match to vmsne.vv.
def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
- (vti.Scalar 0), GPR:$vl)),
+ (vti.Scalar 0), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar 0),
(vti.Mask VR:$merge),
- GPR:$vl)),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
VR:$merge,
vti.RegClass:$rs1,
vti.RegClass:$rs1,
(vti.Mask V0),
- (NoX0 GPR:$vl),
+ GPR:$vl,
vti.SEW)>;
}
@@ -4002,18 +4021,18 @@ defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
//===----------------------------------------------------------------------===//
foreach vti = AllVectors in {
def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
- GPR:$vl)),
+ (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
- $rs1, (NoX0 GPR:$vl), vti.SEW)>;
+ $rs1, GPR:$vl, vti.SEW)>;
}
foreach vti = AllIntegerVectors in {
- def : Pat<(vti.Vector (int_riscv_vmv_v_x GPR:$rs2, GPR:$vl)),
+ def : Pat<(vti.Vector (int_riscv_vmv_v_x GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
- $rs2, (NoX0 GPR:$vl), vti.SEW)>;
- def : Pat<(vti.Vector (int_riscv_vmv_v_x simm5:$imm5, GPR:$vl)),
+ $rs2, GPR:$vl, vti.SEW)>;
+ def : Pat<(vti.Vector (int_riscv_vmv_v_x simm5:$imm5, (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
- simm5:$imm5, (NoX0 GPR:$vl), vti.SEW)>;
+ simm5:$imm5, GPR:$vl, vti.SEW)>;
}
//===----------------------------------------------------------------------===//
@@ -4109,12 +4128,12 @@ defm "" : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
//===----------------------------------------------------------------------===//
-defm "" : VPatUnaryV_V<"int_riscv_vfrsqrte7", "PseudoVFRSQRTE7", AllFloatVectors>;
+defm "" : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
//===----------------------------------------------------------------------===//
-defm "" : VPatUnaryV_V<"int_riscv_vfrece7", "PseudoVFRECE7", AllFloatVectors>;
+defm "" : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.11. Vector Floating-Point Min/Max Instructions
@@ -4157,8 +4176,8 @@ foreach fvti = AllFloatVectors in {
defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
- (fvti.Mask V0), (XLenVT GPR:$vl))),
- (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), (NoX0 GPR:$vl), fvti.SEW)>;
+ (fvti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
+ (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>;
}
//===----------------------------------------------------------------------===//
@@ -4167,16 +4186,16 @@ foreach fvti = AllFloatVectors in {
foreach fvti = AllFloatVectors in {
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
- (fvti.Scalar (fpimm0)), GPR:$vl)),
+ (fvti.Scalar (fpimm0)), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
- 0, (NoX0 GPR:$vl), fvti.SEW)>;
+ 0, GPR:$vl, fvti.SEW)>;
def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
- (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)),
+ (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
fvti.LMul.MX)
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- (NoX0 GPR:$vl), fvti.SEW)>;
+ GPR:$vl, fvti.SEW)>;
}
//===----------------------------------------------------------------------===//
@@ -4321,9 +4340,9 @@ foreach vti = AllIntegerVectors in {
def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
def : Pat<(vti.Vector (int_riscv_vmv_s_x (vti.Vector vti.RegClass:$rs1),
- GPR:$rs2, GPR:$vl)),
+ GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMV_S_X_" # vti.LMul.MX)
- (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>;
+ (vti.Vector $rs1), $rs2, GPR:$vl, vti.SEW)>;
}
} // Predicates = [HasStdExtV]
@@ -4339,12 +4358,12 @@ foreach fvti = AllFloatVectors in {
(instr $rs2, fvti.SEW)>;
def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
- (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)),
+ (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
fvti.LMul.MX)
(fvti.Vector $rs1),
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- (NoX0 GPR:$vl), fvti.SEW)>;
+ GPR:$vl, fvti.SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index aea3d0e17ccc..dee67708bed1 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -8,7 +8,7 @@
///
/// This file contains the required infrastructure and SDNode patterns to
/// support code generation for the standard 'V' (Vector) extension, version
-/// 0.9. This version is still experimental as the 'V' extension hasn't been
+/// 0.10. This version is still experimental as the 'V' extension hasn't been
/// ratified yet.
///
/// This file is included from and depends upon RISCVInstrInfoVPseudos.td
@@ -384,8 +384,8 @@ defm "" : VPatBinarySDNode_VV_VX<mulhs, "PseudoVMULH">;
defm "" : VPatBinarySDNode_VV_VX<mulhu, "PseudoVMULHU">;
// 12.11. Vector Integer Divide Instructions
-defm "" : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIVU">;
-defm "" : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIV">;
+defm "" : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIVU">;
+defm "" : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">;
defm "" : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">;
defm "" : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">;