aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-07-31 22:23:32 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-07-31 22:23:32 +0000
commit979e22ff1ac2a50acbf94e28576a058db89003b5 (patch)
treef9ebe42670b788a1aed8dd616ec64fd518115aa9 /contrib/llvm-project/llvm/lib/Target/RISCV
parent590d96feea75246dee213cb528930df8f6234b87 (diff)
parent899468a0006db4146d9b229234a183f499f7bcd2 (diff)
Notes
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/RISCV')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp324
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td429
4 files changed, 783 insertions, 6 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index a0ae05081adc..7570385e38e3 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -184,6 +184,330 @@ bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
return false;
}
+// Check that it is a SLOI (Shift Left Ones Immediate). We first check that
+// it is the right node tree:
+//
+// (OR (SHL RS1, VC2), VC1)
+//
+// and then we check that VC1, the mask used to fill with ones, is compatible
+// with VC2, the shamt:
+//
+// VC1 == maskTrailingOnes<uint64_t>(VC2)
+
+bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ MVT XLenVT = Subtarget->getXLenVT();
+ if (N.getOpcode() == ISD::OR) {
+ SDValue Or = N;
+ if (Or.getOperand(0).getOpcode() == ISD::SHL) {
+ SDValue Shl = Or.getOperand(0);
+ if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
+ isa<ConstantSDNode>(Or.getOperand(1))) {
+ if (XLenVT == MVT::i64) {
+ uint64_t VC1 = Or.getConstantOperandVal(1);
+ uint64_t VC2 = Shl.getConstantOperandVal(1);
+ if (VC1 == maskTrailingOnes<uint64_t>(VC2)) {
+ RS1 = Shl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Shl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ if (XLenVT == MVT::i32) {
+ uint32_t VC1 = Or.getConstantOperandVal(1);
+ uint32_t VC2 = Shl.getConstantOperandVal(1);
+ if (VC1 == maskTrailingOnes<uint32_t>(VC2)) {
+ RS1 = Shl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Shl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a SROI (Shift Right Ones Immediate). We first check that
+// it is the right node tree:
+//
+// (OR (SRL RS1, VC2), VC1)
+//
+// and then we check that VC1, the mask used to fill with ones, is compatible
+// with VC2, the shamt:
+//
+// VC1 == maskLeadingOnes<uint64_t>(VC2)
+
+bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ MVT XLenVT = Subtarget->getXLenVT();
+ if (N.getOpcode() == ISD::OR) {
+ SDValue Or = N;
+ if (Or.getOperand(0).getOpcode() == ISD::SRL) {
+ SDValue Srl = Or.getOperand(0);
+ if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
+ isa<ConstantSDNode>(Or.getOperand(1))) {
+ if (XLenVT == MVT::i64) {
+ uint64_t VC1 = Or.getConstantOperandVal(1);
+ uint64_t VC2 = Srl.getConstantOperandVal(1);
+ if (VC1 == maskLeadingOnes<uint64_t>(VC2)) {
+ RS1 = Srl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Srl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ if (XLenVT == MVT::i32) {
+ uint32_t VC1 = Or.getConstantOperandVal(1);
+ uint32_t VC2 = Srl.getConstantOperandVal(1);
+ if (VC1 == maskLeadingOnes<uint32_t>(VC2)) {
+ RS1 = Srl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Srl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a RORI (Rotate Right Immediate). We first check that
+// it is the right node tree:
+//
+// (ROTL RS1, VC)
+//
+// The compiler translates immediate rotations to the right given by the call
+// to the rotateright32/rotateright64 intrinsics as rotations to the left.
+// Since the rotation to the left can be easily emulated as a rotation to the
+// right by negating the constant, there is no encoding for ROLI.
+// We then select the immediate left rotations as RORI by the complementary
+// constant:
+//
+// Shamt == XLen - VC
+
+bool RISCVDAGToDAGISel::SelectRORI(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ MVT XLenVT = Subtarget->getXLenVT();
+ if (N.getOpcode() == ISD::ROTL) {
+ if (isa<ConstantSDNode>(N.getOperand(1))) {
+ if (XLenVT == MVT::i64) {
+ uint64_t VC = N.getConstantOperandVal(1);
+ Shamt = CurDAG->getTargetConstant((64 - VC), SDLoc(N),
+ N.getOperand(1).getValueType());
+ RS1 = N.getOperand(0);
+ return true;
+ }
+ if (XLenVT == MVT::i32) {
+ uint32_t VC = N.getConstantOperandVal(1);
+ Shamt = CurDAG->getTargetConstant((32 - VC), SDLoc(N),
+ N.getOperand(1).getValueType());
+ RS1 = N.getOperand(0);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
+// on RV64).
+// SLLIUW is the same as SLLI except for the fact that it clears the bits
+// XLEN-1:32 of the input RS1 before shifting.
+// We first check that it is the right node tree:
+//
+// (AND (SHL RS1, VC2), VC1)
+//
+// We check that VC2, the shamt is less than 32, otherwise the pattern is
+// exactly the same as SLLI and we give priority to that.
+// Eventually we check that that VC1, the mask used to clear the upper 32 bits
+// of RS1, is correct:
+//
+// VC1 == (0xFFFFFFFF << VC2)
+
+bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) {
+ SDValue And = N;
+ if (And.getOperand(0).getOpcode() == ISD::SHL) {
+ SDValue Shl = And.getOperand(0);
+ if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
+ isa<ConstantSDNode>(And.getOperand(1))) {
+ uint64_t VC1 = And.getConstantOperandVal(1);
+ uint64_t VC2 = Shl.getConstantOperandVal(1);
+ if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) {
+ RS1 = Shl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Shl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64).
+// We first check that it is the right node tree:
+//
+// (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1))
+//
+// and then we check that VC1, the mask used to fill with ones, is compatible
+// with VC2, the shamt:
+//
+// VC1 == maskTrailingOnes<uint32_t>(VC2)
+
+bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ if (Subtarget->getXLenVT() == MVT::i64 &&
+ N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
+ if (N.getOperand(0).getOpcode() == ISD::OR) {
+ SDValue Or = N.getOperand(0);
+ if (Or.getOperand(0).getOpcode() == ISD::SHL) {
+ SDValue Shl = Or.getOperand(0);
+ if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
+ isa<ConstantSDNode>(Or.getOperand(1))) {
+ uint32_t VC1 = Or.getConstantOperandVal(1);
+ uint32_t VC2 = Shl.getConstantOperandVal(1);
+ if (VC1 == maskTrailingOnes<uint32_t>(VC2)) {
+ RS1 = Shl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Shl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
+// We first check that it is the right node tree:
+//
+// (OR (SHL RS1, VC2), VC1)
+//
+// and then we check that VC1, the mask used to fill with ones, is compatible
+// with VC2, the shamt:
+//
+// VC1 == maskLeadingOnes<uint32_t>(VC2)
+
+bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ if (N.getOpcode() == ISD::OR && Subtarget->getXLenVT() == MVT::i64) {
+ SDValue Or = N;
+ if (Or.getOperand(0).getOpcode() == ISD::SRL) {
+ SDValue Srl = Or.getOperand(0);
+ if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
+ isa<ConstantSDNode>(Or.getOperand(1))) {
+ uint32_t VC1 = Or.getConstantOperandVal(1);
+ uint32_t VC2 = Srl.getConstantOperandVal(1);
+ if (VC1 == maskLeadingOnes<uint32_t>(VC2)) {
+ RS1 = Srl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
+ Srl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a RORIW (i32 Right Rotate Immediate on RV64).
+// We first check that it is the right node tree:
+//
+// (SIGN_EXTEND_INREG (OR (SHL (AsserSext RS1, i32), VC2),
+// (SRL (AND (AssertSext RS2, i32), VC3), VC1)))
+//
+// Then we check that the constant operands respect these constraints:
+//
+// VC2 == 32 - VC1
+// VC3 == maskLeadingOnes<uint32_t>(VC2)
+//
+// being VC1 the Shamt we need, VC2 the complementary of Shamt over 32
+// and VC3 a 32 bit mask of (32 - VC1) leading ones.
+
+bool RISCVDAGToDAGISel::SelectRORIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
+ if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ Subtarget->getXLenVT() == MVT::i64 &&
+ cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
+ if (N.getOperand(0).getOpcode() == ISD::OR) {
+ SDValue Or = N.getOperand(0);
+ if (Or.getOperand(0).getOpcode() == ISD::SHL &&
+ Or.getOperand(1).getOpcode() == ISD::SRL) {
+ SDValue Shl = Or.getOperand(0);
+ SDValue Srl = Or.getOperand(1);
+ if (Srl.getOperand(0).getOpcode() == ISD::AND) {
+ SDValue And = Srl.getOperand(0);
+ if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
+ isa<ConstantSDNode>(Shl.getOperand(1)) &&
+ isa<ConstantSDNode>(And.getOperand(1))) {
+ uint32_t VC1 = Srl.getConstantOperandVal(1);
+ uint32_t VC2 = Shl.getConstantOperandVal(1);
+ uint32_t VC3 = And.getConstantOperandVal(1);
+ if (VC2 == (32 - VC1) &&
+ VC3 == maskLeadingOnes<uint32_t>(VC2)) {
+ RS1 = Shl.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC1, SDLoc(N),
+ Srl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Check that it is a FSRIW (i32 Funnel Shift Right Immediate on RV64).
+// We first check that it is the right node tree:
+//
+// (SIGN_EXTEND_INREG (OR (SHL (AsserSext RS1, i32), VC2),
+// (SRL (AND (AssertSext RS2, i32), VC3), VC1)))
+//
+// Then we check that the constant operands respect these constraints:
+//
+// VC2 == 32 - VC1
+// VC3 == maskLeadingOnes<uint32_t>(VC2)
+//
+// being VC1 the Shamt we need, VC2 the complementary of Shamt over 32
+// and VC3 a 32 bit mask of (32 - VC1) leading ones.
+
+bool RISCVDAGToDAGISel::SelectFSRIW(SDValue N, SDValue &RS1, SDValue &RS2,
+ SDValue &Shamt) {
+ if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ Subtarget->getXLenVT() == MVT::i64 &&
+ cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
+ if (N.getOperand(0).getOpcode() == ISD::OR) {
+ SDValue Or = N.getOperand(0);
+ if (Or.getOperand(0).getOpcode() == ISD::SHL &&
+ Or.getOperand(1).getOpcode() == ISD::SRL) {
+ SDValue Shl = Or.getOperand(0);
+ SDValue Srl = Or.getOperand(1);
+ if (Srl.getOperand(0).getOpcode() == ISD::AND) {
+ SDValue And = Srl.getOperand(0);
+ if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
+ isa<ConstantSDNode>(Shl.getOperand(1)) &&
+ isa<ConstantSDNode>(And.getOperand(1))) {
+ uint32_t VC1 = Srl.getConstantOperandVal(1);
+ uint32_t VC2 = Shl.getConstantOperandVal(1);
+ uint32_t VC3 = And.getConstantOperandVal(1);
+ if (VC2 == (32 - VC1) &&
+ VC3 == maskLeadingOnes<uint32_t>(VC2)) {
+ RS1 = Shl.getOperand(0);
+ RS2 = And.getOperand(0);
+ Shamt = CurDAG->getTargetConstant(VC1, SDLoc(N),
+ Srl.getOperand(1).getValueType());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
// Merge an ADDI into the offset of a load/store instruction where possible.
// (load (addi base, off1), off2) -> (load base, off1+off2)
// (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index dcf733ec3675..0ca12510a230 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -45,6 +45,15 @@ public:
bool SelectAddrFI(SDValue Addr, SDValue &Base);
+ bool SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectRORI(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectRORIW(SDValue N, SDValue &RS1, SDValue &Shamt);
+ bool SelectFSRIW(SDValue N, SDValue &RS1, SDValue &RS2, SDValue &Shamt);
+
// Include the pieces autogenerated from the target description.
#include "RISCVGenDAGISel.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 91fc69b5bc10..03d9eefd59d0 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -149,12 +149,27 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
- setOperationAction(ISD::ROTL, XLenVT, Expand);
- setOperationAction(ISD::ROTR, XLenVT, Expand);
- setOperationAction(ISD::BSWAP, XLenVT, Expand);
- setOperationAction(ISD::CTTZ, XLenVT, Expand);
- setOperationAction(ISD::CTLZ, XLenVT, Expand);
- setOperationAction(ISD::CTPOP, XLenVT, Expand);
+ if (!(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp())) {
+ setOperationAction(ISD::ROTL, XLenVT, Expand);
+ setOperationAction(ISD::ROTR, XLenVT, Expand);
+ }
+
+ if (!Subtarget.hasStdExtZbp())
+ setOperationAction(ISD::BSWAP, XLenVT, Expand);
+
+ if (!Subtarget.hasStdExtZbb()) {
+ setOperationAction(ISD::CTTZ, XLenVT, Expand);
+ setOperationAction(ISD::CTLZ, XLenVT, Expand);
+ setOperationAction(ISD::CTPOP, XLenVT, Expand);
+ }
+
+ if (Subtarget.hasStdExtZbp())
+ setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
+
+ if (Subtarget.hasStdExtZbt()) {
+ setOperationAction(ISD::FSHL, XLenVT, Legal);
+ setOperationAction(ISD::FSHR, XLenVT, Legal);
+ }
ISD::CondCode FPCCToExtend[] = {
ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 34a463626e29..afac509f743d 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -632,3 +632,432 @@ let Predicates = [HasStdExtZbproposedc, HasStdExtZbbOrZbp, HasStdExtC, IsRV64] i
def : CompressPat<(PACK GPRC:$rs1, GPRC:$rs1, X0),
(C_ZEXTW GPRC:$rs1)>;
} // Predicates = [HasStdExtZbproposedc, HasStdExtC, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Codegen patterns
+//===----------------------------------------------------------------------===//
+def SLOIPat : ComplexPattern<XLenVT, 2, "SelectSLOI", [or]>;
+def SROIPat : ComplexPattern<XLenVT, 2, "SelectSROI", [or]>;
+def RORIPat : ComplexPattern<XLenVT, 2, "SelectRORI", [rotl]>;
+def SLLIUWPat : ComplexPattern<i64, 2, "SelectSLLIUW", [and]>;
+def SLOIWPat : ComplexPattern<i64, 2, "SelectSLOIW", [sext_inreg]>;
+def SROIWPat : ComplexPattern<i64, 2, "SelectSROIW", [or]>;
+def RORIWPat : ComplexPattern<i64, 2, "SelectRORIW", [sext_inreg]>;
+def FSRIWPat : ComplexPattern<i64, 3, "SelectFSRIW", [sext_inreg]>;
+
+let Predicates = [HasStdExtZbbOrZbp] in {
+def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbbOrZbp]
+
+let Predicates = [HasStdExtZbb] in {
+def : Pat<(xor (shl (xor GPR:$rs1, -1), GPR:$rs2), -1),
+ (SLO GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor (srl (xor GPR:$rs1, -1), GPR:$rs2), -1),
+ (SRO GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbb]
+
+let Predicates = [HasStdExtZbbOrZbp] in {
+def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>;
+def : Pat<(fshl GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>;
+def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(fshr GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbbOrZbp]
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(and (xor (shl 1, (and GPR:$rs2, 31)), -1), GPR:$rs1),
+ (SBCLR GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(and (xor (shl 1, (and GPR:$rs2, 63)), -1), GPR:$rs1),
+ (SBCLR GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs] in
+def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (SBCLR GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(or (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
+ (SBSET GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(or (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
+ (SBSET GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(xor (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
+ (SBINV GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(xor (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
+ (SBINV GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV32] in
+def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 31)), 1),
+ (SBEXT GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbs, IsRV64] in
+def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 63)), 1),
+ (SBEXT GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbb] in {
+def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (SLOI GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(SROIPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (SROI GPR:$rs1, uimmlog2xlen:$shamt)>;
+} // Predicates = [HasStdExtZbb]
+
+// There's no encoding for roli in the current version of the 'B' extension
+// (v0.92) as it can be implemented with rori by negating the immediate.
+// For this reason we pattern-match only against rori[w].
+let Predicates = [HasStdExtZbbOrZbp] in
+def : Pat<(RORIPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (RORI GPR:$rs1, uimmlog2xlen:$shamt)>;
+
+// We don't pattern-match sbclri[w], sbseti[w], sbinvi[w] because they are
+// pattern-matched by simple andi, ori, and xori.
+let Predicates = [HasStdExtZbs] in
+def : Pat<(and (srl GPR:$rs1, uimmlog2xlen:$shamt), (XLenVT 1)),
+ (SBEXTI GPR:$rs1, uimmlog2xlen:$shamt)>;
+
+let Predicates = [HasStdExtZbp, IsRV32] in {
+def : Pat<(or (or (and (srl GPR:$rs1, (i32 1)), (i32 0x55555555)), GPR:$rs1),
+ (and (shl GPR:$rs1, (i32 1)), (i32 0xAAAAAAAA))),
+ (GORCI GPR:$rs1, (i32 1))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i32 2)), (i32 0x33333333)), GPR:$rs1),
+ (and (shl GPR:$rs1, (i32 2)), (i32 0xCCCCCCCC))),
+ (GORCI GPR:$rs1, (i32 2))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i32 4)), (i32 0x0F0F0F0F)), GPR:$rs1),
+ (and (shl GPR:$rs1, (i32 4)), (i32 0xF0F0F0F0))),
+ (GORCI GPR:$rs1, (i32 4))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i32 8)), (i32 0x00FF00FF)), GPR:$rs1),
+ (and (shl GPR:$rs1, (i32 8)), (i32 0xFF00FF00))),
+ (GORCI GPR:$rs1, (i32 8))>;
+def : Pat<(or (or (srl GPR:$rs1, (i32 16)), GPR:$rs1),
+ (shl GPR:$rs1, (i32 16))),
+ (GORCI GPR:$rs1, (i32 16))>;
+} // Predicates = [HasStdExtZbp, IsRV32]
+
+let Predicates = [HasStdExtZbp, IsRV64] in {
+def : Pat<(or (or (and (srl GPR:$rs1, (i64 1)), (i64 0x5555555555555555)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 1)), (i64 0xAAAAAAAAAAAAAAAA))),
+ (GORCI GPR:$rs1, (i64 1))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i64 2)), (i64 0x3333333333333333)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 2)), (i64 0xCCCCCCCCCCCCCCCC))),
+ (GORCI GPR:$rs1, (i64 2))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i64 4)), (i64 0x0F0F0F0F0F0F0F0F)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 4)), (i64 0xF0F0F0F0F0F0F0F0))),
+ (GORCI GPR:$rs1, (i64 4))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i64 8)), (i64 0x00FF00FF00FF00FF)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 8)), (i64 0xFF00FF00FF00FF00))),
+ (GORCI GPR:$rs1, (i64 8))>;
+def : Pat<(or (or (and (srl GPR:$rs1, (i64 16)), (i64 0x0000FFFF0000FFFF)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 16)), (i64 0xFFFF0000FFFF0000))),
+ (GORCI GPR:$rs1, (i64 16))>;
+def : Pat<(or (or (srl GPR:$rs1, (i64 32)), GPR:$rs1),
+ (shl GPR:$rs1, (i64 32))),
+ (GORCI GPR:$rs1, (i64 32))>;
+} // Predicates = [HasStdExtZbp, IsRV64]
+
+let Predicates = [HasStdExtZbp, IsRV32] in {
+def : Pat<(or (and (shl GPR:$rs1, (i32 1)), (i32 0xAAAAAAAA)),
+ (and (srl GPR:$rs1, (i32 1)), (i32 0x55555555))),
+ (GREVI GPR:$rs1, (i32 1))>;
+def : Pat<(or (and (shl GPR:$rs1, (i32 2)), (i32 0xCCCCCCCC)),
+ (and (srl GPR:$rs1, (i32 2)), (i32 0x33333333))),
+ (GREVI GPR:$rs1, (i32 2))>;
+def : Pat<(or (and (shl GPR:$rs1, (i32 4)), (i32 0xF0F0F0F0)),
+ (and (srl GPR:$rs1, (i32 4)), (i32 0x0F0F0F0F))),
+ (GREVI GPR:$rs1, (i32 4))>;
+def : Pat<(or (and (shl GPR:$rs1, (i32 8)), (i32 0xFF00FF00)),
+ (and (srl GPR:$rs1, (i32 8)), (i32 0x00FF00FF))),
+ (GREVI GPR:$rs1, (i32 8))>;
+def : Pat<(rotr (bswap GPR:$rs1), (i32 16)), (GREVI GPR:$rs1, (i32 8))>;
+def : Pat<(or (shl GPR:$rs1, (i32 16)), (srl GPR:$rs1, (i32 16))),
+ (GREVI GPR:$rs1, (i32 16))>;
+def : Pat<(rotl GPR:$rs1, (i32 16)), (GREVI GPR:$rs1, (i32 16))>;
+def : Pat<(bswap GPR:$rs1), (GREVI GPR:$rs1, (i32 24))>;
+def : Pat<(bitreverse GPR:$rs1), (GREVI GPR:$rs1, (i32 31))>;
+} // Predicates = [HasStdExtZbp, IsRV32]
+
+let Predicates = [HasStdExtZbp, IsRV64] in {
+def : Pat<(or (and (shl GPR:$rs1, (i64 1)), (i64 0xAAAAAAAAAAAAAAAA)),
+ (and (srl GPR:$rs1, (i64 1)), (i64 0x5555555555555555))),
+ (GREVI GPR:$rs1, (i64 1))>;
+def : Pat<(or (and (shl GPR:$rs1, (i64 2)), (i64 0xCCCCCCCCCCCCCCCC)),
+ (and (srl GPR:$rs1, (i64 2)), (i64 0x3333333333333333))),
+ (GREVI GPR:$rs1, (i64 2))>;
+def : Pat<(or (and (shl GPR:$rs1, (i64 4)), (i64 0xF0F0F0F0F0F0F0F0)),
+ (and (srl GPR:$rs1, (i64 4)), (i64 0x0F0F0F0F0F0F0F0F))),
+ (GREVI GPR:$rs1, (i64 4))>;
+def : Pat<(or (and (shl GPR:$rs1, (i64 8)), (i64 0xFF00FF00FF00FF00)),
+ (and (srl GPR:$rs1, (i64 8)), (i64 0x00FF00FF00FF00FF))),
+ (GREVI GPR:$rs1, (i64 8))>;
+def : Pat<(or (and (shl GPR:$rs1, (i64 16)), (i64 0xFFFF0000FFFF0000)),
+ (and (srl GPR:$rs1, (i64 16)), (i64 0x0000FFFF0000FFFF))),
+ (GREVI GPR:$rs1, (i64 16))>;
+def : Pat<(or (shl GPR:$rs1, (i64 32)), (srl GPR:$rs1, (i64 32))),
+ (GREVI GPR:$rs1, (i64 32))>;
+def : Pat<(rotl GPR:$rs1, (i64 32)), (GREVI GPR:$rs1, (i64 32))>;
+def : Pat<(bswap GPR:$rs1), (GREVI GPR:$rs1, (i64 56))>;
+def : Pat<(bitreverse GPR:$rs1), (GREVI GPR:$rs1, (i64 63))>;
+} // Predicates = [HasStdExtZbp, IsRV64]
+
+let Predicates = [HasStdExtZbt] in {
+def : Pat<(or (and (xor GPR:$rs2, -1), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)),
+ (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(riscv_selectcc GPR:$rs2, (XLenVT 0), (XLenVT 17), GPR:$rs3, GPR:$rs1),
+ (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(fshl GPR:$rs1, GPR:$rs2, GPR:$rs3),
+ (FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(fshr GPR:$rs1, GPR:$rs2, GPR:$rs3),
+ (FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(fshr GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt),
+ (FSRI GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt)>;
+} // Predicates = [HasStdExtZbt]
+
+let Predicates = [HasStdExtZbb] in {
+def : Pat<(ctlz GPR:$rs1), (CLZ GPR:$rs1)>;
+def : Pat<(cttz GPR:$rs1), (CTZ GPR:$rs1)>;
+def : Pat<(ctpop GPR:$rs1), (PCNT GPR:$rs1)>;
+} // Predicates = [HasStdExtZbb]
+
+let Predicates = [HasStdExtZbb, IsRV32] in
+def : Pat<(sra (shl GPR:$rs1, (i32 24)), (i32 24)), (SEXTB GPR:$rs1)>;
+let Predicates = [HasStdExtZbb, IsRV64] in
+def : Pat<(sra (shl GPR:$rs1, (i64 56)), (i64 56)), (SEXTB GPR:$rs1)>;
+
+let Predicates = [HasStdExtZbb, IsRV32] in
+def : Pat<(sra (shl GPR:$rs1, (i32 16)), (i32 16)), (SEXTH GPR:$rs1)>;
+let Predicates = [HasStdExtZbb, IsRV64] in
+def : Pat<(sra (shl GPR:$rs1, (i64 48)), (i64 48)), (SEXTH GPR:$rs1)>;
+
+let Predicates = [HasStdExtZbb] in {
+def : Pat<(smin GPR:$rs1, GPR:$rs2), (MIN GPR:$rs1, GPR:$rs2)>;
+def : Pat<(riscv_selectcc GPR:$rs1, GPR:$rs2, (XLenVT 20), GPR:$rs1, GPR:$rs2),
+ (MIN GPR:$rs1, GPR:$rs2)>;
+def : Pat<(smax GPR:$rs1, GPR:$rs2), (MAX GPR:$rs1, GPR:$rs2)>;
+def : Pat<(riscv_selectcc GPR:$rs2, GPR:$rs1, (XLenVT 20), GPR:$rs1, GPR:$rs2),
+ (MAX GPR:$rs1, GPR:$rs2)>;
+def : Pat<(umin GPR:$rs1, GPR:$rs2), (MINU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(riscv_selectcc GPR:$rs1, GPR:$rs2, (XLenVT 12), GPR:$rs1, GPR:$rs2),
+ (MINU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(umax GPR:$rs1, GPR:$rs2), (MAXU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(riscv_selectcc GPR:$rs2, GPR:$rs1, (XLenVT 12), GPR:$rs1, GPR:$rs2),
+ (MAXU GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbb]
+
+let Predicates = [HasStdExtZbbOrZbp, IsRV32] in
+def : Pat<(or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16))),
+ (PACK GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
+def : Pat<(or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32))),
+ (PACK GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbbOrZbp, IsRV32] in
+def : Pat<(or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16))),
+ (PACKU GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
+def : Pat<(or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32))),
+ (PACKU GPR:$rs1, GPR:$rs2)>;
+let Predicates = [HasStdExtZbbOrZbp] in
+def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFF00),
+ (and GPR:$rs1, 0x00FF)),
+ (PACKH GPR:$rs1, GPR:$rs2)>;
+
+let Predicates = [HasStdExtZbp, IsRV32] in {
+def : Pat<(or (or (and (shl GPR:$rs1, (i32 8)), (i32 0x00FF0000)),
+ (and GPR:$rs1, (i32 0xFF0000FF))),
+ (and (srl GPR:$rs1, (i32 8)), (i32 0x0000FF00))),
+ (SHFLI GPR:$rs1, (i32 8))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i32 4)), (i32 0x0F000F00)),
+ (and GPR:$rs1, (i32 0xF00FF00F))),
+ (and (srl GPR:$rs1, (i32 4)), (i32 0x00F000F0))),
+ (SHFLI GPR:$rs1, (i32 4))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i32 2)), (i32 0x30303030)),
+ (and GPR:$rs1, (i32 0xC3C3C3C3))),
+ (and (srl GPR:$rs1, (i32 2)), (i32 0x0C0C0C0C))),
+ (SHFLI GPR:$rs1, (i32 2))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i32 1)), (i32 0x44444444)),
+ (and GPR:$rs1, (i32 0x99999999))),
+ (and (srl GPR:$rs1, (i32 1)), (i32 0x22222222))),
+ (SHFLI GPR:$rs1, (i32 1))>;
+} // Predicates = [HasStdExtZbp, IsRV32]
+
+let Predicates = [HasStdExtZbp, IsRV64] in {
+def : Pat<(or (or (and (shl GPR:$rs1, (i64 16)), (i64 0x0000FFFF00000000)),
+ (and GPR:$rs1, (i64 0xFFFF00000000FFFF))),
+ (and (srl GPR:$rs1, (i64 16)), (i64 0x00000000FFFF0000))),
+ (SHFLI GPR:$rs1, (i64 16))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i64 8)), (i64 0x00FF000000FF0000)),
+ (and GPR:$rs1, (i64 0xFF0000FFFF0000FF))),
+ (and (srl GPR:$rs1, (i64 8)), (i64 0x0000FF000000FF00))),
+ (SHFLI GPR:$rs1, (i64 8))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i64 4)), (i64 0x0F000F000F000F00)),
+ (and GPR:$rs1, (i64 0xF00FF00FF00FF00F))),
+ (and (srl GPR:$rs1, (i64 4)), (i64 0x00F000F000F000F0))),
+ (SHFLI GPR:$rs1, (i64 4))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i64 2)), (i64 0x3030303030303030)),
+ (and GPR:$rs1, (i64 0xC3C3C3C3C3C3C3C3))),
+ (and (srl GPR:$rs1, (i64 2)), (i64 0x0C0C0C0C0C0C0C0C))),
+ (SHFLI GPR:$rs1, (i64 2))>;
+def : Pat<(or (or (and (shl GPR:$rs1, (i64 1)), (i64 0x4444444444444444)),
+ (and GPR:$rs1, (i64 0x9999999999999999))),
+ (and (srl GPR:$rs1, (i64 1)), (i64 0x2222222222222222))),
+ (SHFLI GPR:$rs1, (i64 1))>;
+} // Predicates = [HasStdExtZbp, IsRV64]
+
+let Predicates = [HasStdExtZbb, IsRV64] in {
+def : Pat<(and (add GPR:$rs, simm12:$simm12), (i64 0xFFFFFFFF)),
+ (ADDIWU GPR:$rs, simm12:$simm12)>;
+def : Pat<(SLLIUWPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (SLLIUW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(and (add GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
+ (ADDWU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(and (sub GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
+ (SUBWU GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add GPR:$rs1, (and GPR:$rs2, (i64 0xFFFFFFFF))),
+ (ADDUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sub GPR:$rs1, (and GPR:$rs2, (i64 0xFFFFFFFF))),
+ (SUBUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor (riscv_sllw (xor GPR:$rs1, -1), GPR:$rs2), -1),
+ (SLOW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor (riscv_srlw (xor GPR:$rs1, -1), GPR:$rs2), -1),
+ (SROW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbb, IsRV64]
+
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+def : Pat<(or (riscv_sllw (assertsexti32 GPR:$rs1), (assertsexti32 GPR:$rs2)),
+ (riscv_srlw (assertsexti32 GPR:$rs1),
+ (sub (i64 0), (assertsexti32 GPR:$rs2)))),
+ (ROLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or (riscv_sllw (assertsexti32 GPR:$rs1),
+ (sub (i64 0), (assertsexti32 GPR:$rs2))),
+ (riscv_srlw (assertsexti32 GPR:$rs1), (assertsexti32 GPR:$rs2))),
+ (RORW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
+
+let Predicates = [HasStdExtZbs, IsRV64] in {
+def : Pat<(and (xor (riscv_sllw 1, (assertsexti32 GPR:$rs2)), -1),
+ (assertsexti32 GPR:$rs1)),
+ (SBCLRW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or (riscv_sllw 1, (assertsexti32 GPR:$rs2)),
+ (assertsexti32 GPR:$rs1)),
+ (SBSETW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(xor (riscv_sllw 1, (assertsexti32 GPR:$rs2)),
+ (assertsexti32 GPR:$rs1)),
+ (SBINVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(and (riscv_srlw (assertsexti32 GPR:$rs1), (assertsexti32 GPR:$rs2)),
+ 1),
+ (SBEXTW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbs, IsRV64]
+
+let Predicates = [HasStdExtZbb, IsRV64] in {
+def : Pat<(SLOIWPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (SLOIW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(SROIWPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (SROIW GPR:$rs1, uimmlog2xlen:$shamt)>;
+} // Predicates = [HasStdExtZbb, IsRV64]
+
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in
+def : Pat<(RORIWPat GPR:$rs1, uimmlog2xlen:$shamt),
+ (RORIW GPR:$rs1, uimmlog2xlen:$shamt)>;
+
+let Predicates = [HasStdExtZbp, IsRV64] in {
+def : Pat<(sext_inreg (or (or (and (srl GPR:$rs1, (i64 1)), (i64 0x55555555)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 1)), (i64 0xAAAAAAAA))),
+ i32),
+ (GORCIW GPR:$rs1, (i64 1))>;
+def : Pat<(sext_inreg (or (or (and (srl GPR:$rs1, (i64 2)), (i64 0x33333333)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 2)), (i64 0xCCCCCCCC))),
+ i32),
+ (GORCIW GPR:$rs1, (i64 2))>;
+def : Pat<(sext_inreg (or (or (and (srl GPR:$rs1, (i64 4)), (i64 0x0F0F0F0F)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 4)), (i64 0xF0F0F0F0))),
+ i32),
+ (GORCIW GPR:$rs1, (i64 4))>;
+def : Pat<(sext_inreg (or (or (and (srl GPR:$rs1, (i64 8)), (i64 0x00FF00FF)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 8)), (i64 0xFF00FF00))),
+ i32),
+ (GORCIW GPR:$rs1, (i64 8))>;
+def : Pat<(sext_inreg (or (or (and (srl GPR:$rs1, (i64 16)), (i64 0x0000FFFF)),
+ GPR:$rs1),
+ (and (shl GPR:$rs1, (i64 16)), (i64 0xFFFF0000))),
+ i32),
+ (GORCIW GPR:$rs1, (i64 16))>;
+def : Pat<(sext_inreg (or (or (srl (and GPR:$rs1, (i64 0xFFFF0000)), (i64 16)),
+ GPR:$rs1),
+ (shl GPR:$rs1, (i64 16))), i32),
+ (GORCIW GPR:$rs1, (i64 16))>;
+
+def : Pat<(sext_inreg (or (and (shl GPR:$rs1, (i64 1)), (i64 0xAAAAAAAA)),
+ (and (srl GPR:$rs1, (i64 1)), (i64 0x55555555))),
+ i32),
+ (GREVIW GPR:$rs1, (i64 1))>;
+def : Pat<(sext_inreg (or (and (shl GPR:$rs1, (i64 2)), (i64 0xCCCCCCCC)),
+ (and (srl GPR:$rs1, (i64 2)), (i64 0x33333333))),
+ i32),
+ (GREVIW GPR:$rs1, (i64 2))>;
+def : Pat<(sext_inreg (or (and (shl GPR:$rs1, (i64 4)), (i64 0xF0F0F0F0)),
+ (and (srl GPR:$rs1, (i64 4)), (i64 0x0F0F0F0F))),
+ i32),
+ (GREVIW GPR:$rs1, (i64 4))>;
+def : Pat<(sext_inreg (or (and (shl GPR:$rs1, (i64 8)), (i64 0xFF00FF00)),
+ (and (srl GPR:$rs1, (i64 8)), (i64 0x00FF00FF))),
+ i32),
+ (GREVIW GPR:$rs1, (i64 8))>;
+def : Pat<(sext_inreg (or (shl GPR:$rs1, (i64 16)),
+ (srl (and GPR:$rs1, 0xFFFF0000), (i64 16))), i32),
+ (GREVIW GPR:$rs1, (i64 16))>;
+def : Pat<(sra (bswap GPR:$rs1), (i64 32)), (GREVIW GPR:$rs1, (i64 24))>;
+def : Pat<(sra (bitreverse GPR:$rs1), (i64 32)), (GREVIW GPR:$rs1, (i64 31))>;
+} // Predicates = [HasStdExtZbp, IsRV64]
+
+let Predicates = [HasStdExtZbt, IsRV64] in {
+def : Pat<(riscv_selectcc (and (assertsexti32 GPR:$rs3), 31),
+ (i64 0),
+ (i64 17),
+ (assertsexti32 GPR:$rs1),
+ (or (riscv_sllw (assertsexti32 GPR:$rs1),
+ (and (assertsexti32 GPR:$rs3), 31)),
+ (riscv_srlw (assertsexti32 GPR:$rs2),
+ (sub (i64 32),
+ (assertsexti32 GPR:$rs3))))),
+ (FSLW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(riscv_selectcc (and (assertsexti32 GPR:$rs3), 31),
+ (i64 0),
+ (i64 17),
+ (assertsexti32 GPR:$rs2),
+ (or (riscv_sllw (assertsexti32 GPR:$rs1),
+ (sub (i64 32),
+ (assertsexti32 GPR:$rs3))),
+ (riscv_srlw (assertsexti32 GPR:$rs2),
+ (and (assertsexti32 GPR:$rs3), 31)))),
+ (FSRW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
+def : Pat<(FSRIWPat GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt),
+ (FSRIW GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt)>;
+} // Predicates = [HasStdExtZbt, IsRV64]
+
+let Predicates = [HasStdExtZbb, IsRV64] in {
+def : Pat<(add (ctlz (and GPR:$rs1, (i64 0xFFFFFFFF))), (i64 -32)),
+ (CLZW GPR:$rs1)>;
+// We don't pattern-match CTZW here as it has the same pattern and result as
+// RV64 CTZ
+def : Pat<(ctpop (and GPR:$rs1, (i64 0xFFFFFFFF))), (PCNTW GPR:$rs1)>;
+} // Predicates = [HasStdExtZbb, IsRV64]
+
+let Predicates = [HasStdExtZbbOrZbp, IsRV64] in {
+def : Pat<(sext_inreg (or (shl (assertsexti32 GPR:$rs2), (i64 16)),
+ (and (assertsexti32 GPR:$rs1), 0x000000000000FFFF)),
+ i32),
+ (PACKW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(or (and (assertsexti32 GPR:$rs2), 0xFFFFFFFFFFFF0000),
+ (srl (and (assertsexti32 GPR:$rs1), 0x00000000FFFF0000),
+ (i64 16))),
+ (PACKUW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZbbOrZbp, IsRV64]