aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2024-07-27 23:34:35 +0000
committerDimitry Andric <dim@FreeBSD.org>2024-10-23 18:26:01 +0000
commit0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583 (patch)
tree6cf5ab1f05330c6773b1f3f64799d56a9c7a1faa /contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
parent6b9f7133aba44189d9625c352bc2c2a59baf18ef (diff)
parentac9a064cb179f3425b310fa2847f8764ac970a4d (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp66
1 files changed, 45 insertions, 21 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index d290dd82b760..79bcf5e8cd30 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -45,7 +45,6 @@ public:
bool isKImmOperand(const MachineOperand &Src) const;
bool isKUImmOperand(const MachineOperand &Src) const;
bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const;
- bool isReverseInlineImm(const MachineOperand &Src, int32_t &ReverseImm) const;
void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const;
void shrinkScalarCompare(MachineInstr &MI) const;
void shrinkMIMG(MachineInstr &MI) const;
@@ -183,15 +182,36 @@ bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src,
return false;
}
-/// \returns true if the constant in \p Src should be replaced with a bitreverse
-/// of an inline immediate.
-bool SIShrinkInstructions::isReverseInlineImm(const MachineOperand &Src,
- int32_t &ReverseImm) const {
- if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
- return false;
+/// \returns the opcode of an instruction a move immediate of the constant \p
+/// Src can be replaced with if the constant is replaced with \p ModifiedImm.
+/// i.e.
+///
+/// If the bitreverse of a constant is an inline immediate, reverse the
+/// immediate and return the bitreverse opcode.
+///
+/// If the bitwise negation of a constant is an inline immediate, reverse the
+/// immediate and return the bitwise not opcode.
+static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII,
+ const MachineOperand &Src,
+ int32_t &ModifiedImm, bool Scalar) {
+ if (TII->isInlineConstant(Src))
+ return 0;
+ int32_t SrcImm = static_cast<int32_t>(Src.getImm());
+
+ if (!Scalar) {
+ // We could handle the scalar case with here, but we would need to check
+ // that SCC is not live as S_NOT_B32 clobbers it. It's probably not worth
+ // it, as the reasonable values are already covered by s_movk_i32.
+ ModifiedImm = ~SrcImm;
+ if (TII->isInlineConstant(APInt(32, ModifiedImm)))
+ return AMDGPU::V_NOT_B32_e32;
+ }
+
+ ModifiedImm = reverseBits<int32_t>(SrcImm);
+ if (TII->isInlineConstant(APInt(32, ModifiedImm)))
+ return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
- ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
- return ReverseImm >= -16 && ReverseImm <= 64;
+ return 0;
}
/// Copy implicit register operands from specified instruction to this
@@ -251,9 +271,9 @@ void SIShrinkInstructions::shrinkScalarCompare(MachineInstr &MI) const {
const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
- if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(Src1)) ||
- (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(Src1))) {
- if (!TII->sopkIsZext(SOPKOpc))
+ if ((SIInstrInfo::sopkIsZext(SOPKOpc) && isKUImmOperand(Src1)) ||
+ (!SIInstrInfo::sopkIsZext(SOPKOpc) && isKImmOperand(Src1))) {
+ if (!SIInstrInfo::sopkIsZext(SOPKOpc))
Src1.setImm(SignExtend64(Src1.getImm(), 32));
MI.setDesc(NewDesc);
}
@@ -801,10 +821,12 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
- int32_t ReverseImm;
- if (isReverseInlineImm(Src, ReverseImm)) {
- MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
- Src.setImm(ReverseImm);
+ int32_t ModImm;
+ unsigned ModOpcode =
+ canModifyToInlineImmOp32(TII, Src, ModImm, /*Scalar=*/false);
+ if (ModOpcode != 0) {
+ MI.setDesc(TII->get(ModOpcode));
+ Src.setImm(static_cast<int64_t>(ModImm));
continue;
}
}
@@ -863,13 +885,15 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() && Dst.getReg().isPhysical()) {
- int32_t ReverseImm;
+ unsigned ModOpc;
+ int32_t ModImm;
if (isKImmOperand(Src)) {
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
Src.setImm(SignExtend64(Src.getImm(), 32));
- } else if (isReverseInlineImm(Src, ReverseImm)) {
- MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
- Src.setImm(ReverseImm);
+ } else if ((ModOpc = canModifyToInlineImmOp32(TII, Src, ModImm,
+ /*Scalar=*/true))) {
+ MI.setDesc(TII->get(ModOpc));
+ Src.setImm(static_cast<int64_t>(ModImm));
}
}
@@ -1014,7 +1038,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// Copy deadness from the old explicit vcc def to the new implicit def.
if (SDst && SDst->isDead())
- Inst32->findRegisterDefOperand(VCCReg)->setIsDead();
+ Inst32->findRegisterDefOperand(VCCReg, /*TRI=*/nullptr)->setIsDead();
MI.eraseFromParent();
foldImmediates(*Inst32);