aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/RISCV/RISCVExpandPseudoInsts.cpp')
-rw-r--r--lib/Target/RISCV/RISCVExpandPseudoInsts.cpp196
1 files changed, 178 insertions, 18 deletions
diff --git a/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 35c185aa5edd..1c5171a7b7a4 100644
--- a/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -1,9 +1,8 @@
//===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -55,6 +54,22 @@ private:
bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, bool IsMasked,
int Width, MachineBasicBlock::iterator &NextMBBI);
+ bool expandAuipcInstPair(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI,
+ unsigned FlagsHi, unsigned SecondOpcode);
+ bool expandLoadLocalAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSIEAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
};
char RISCVExpandPseudo::ID = 0;
@@ -87,6 +102,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
case RISCV::PseudoAtomicLoadNand32:
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
NextMBBI);
+ case RISCV::PseudoAtomicLoadNand64:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
+ NextMBBI);
case RISCV::PseudoMaskedAtomicSwap32:
return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
NextMBBI);
@@ -111,8 +129,18 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
NextMBBI);
case RISCV::PseudoCmpXchg32:
return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
+ case RISCV::PseudoCmpXchg64:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
case RISCV::PseudoMaskedCmpXchg32:
return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
+ case RISCV::PseudoLLA:
+ return expandLoadLocalAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA:
+ return expandLoadAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_IE:
+ return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
+ case RISCV::PseudoLA_TLS_GD:
+ return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
}
return false;
@@ -152,12 +180,61 @@ static unsigned getSCForRMW32(AtomicOrdering Ordering) {
}
}
+static unsigned getLRForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_D;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_D_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_D_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW64(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_D;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_D;
+ case AtomicOrdering::Release:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_D_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_D_AQ_RL;
+ }
+}
+
+static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getLRForRMW32(Ordering);
+ if (Width == 64)
+ return getLRForRMW64(Ordering);
+ llvm_unreachable("Unexpected LR width\n");
+}
+
+static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
+ if (Width == 32)
+ return getSCForRMW32(Ordering);
+ if (Width == 64)
+ return getSCForRMW64(Ordering);
+ llvm_unreachable("Unexpected SC width\n");
+}
+
static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
DebugLoc DL, MachineBasicBlock *ThisMBB,
MachineBasicBlock *LoopMBB,
MachineBasicBlock *DoneMBB,
AtomicRMWInst::BinOp BinOp, int Width) {
- assert(Width == 32 && "RV64 atomic expansion currently unsupported");
unsigned DestReg = MI.getOperand(0).getReg();
unsigned ScratchReg = MI.getOperand(1).getReg();
unsigned AddrReg = MI.getOperand(2).getReg();
@@ -166,11 +243,11 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
// .loop:
- // lr.w dest, (addr)
+ // lr.[w|d] dest, (addr)
// binop scratch, dest, val
- // sc.w scratch, scratch, (addr)
+ // sc.[w|d] scratch, scratch, (addr)
// bnez scratch, loop
- BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
switch (BinOp) {
default:
@@ -184,7 +261,7 @@ static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
.addImm(-1);
break;
}
- BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(ScratchReg);
BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
@@ -219,7 +296,7 @@ static void doMaskedAtomicBinOpExpansion(
const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
- assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
unsigned DestReg = MI.getOperand(0).getReg();
unsigned ScratchReg = MI.getOperand(1).getReg();
unsigned AddrReg = MI.getOperand(2).getReg();
@@ -333,7 +410,7 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
MachineBasicBlock::iterator &NextMBBI) {
assert(IsMasked == true &&
"Should only need to expand masked atomic max/min");
- assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+ assert(Width == 32 && "Should never need to expand masked 64-bit operations");
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
@@ -451,7 +528,6 @@ bool RISCVExpandPseudo::expandAtomicMinMaxOp(
bool RISCVExpandPseudo::expandAtomicCmpXchg(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
int Width, MachineBasicBlock::iterator &NextMBBI) {
- assert(Width == 32 && "RV64 atomic expansion currently unsupported");
MachineInstr &MI = *MBBI;
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB.getParent();
@@ -483,18 +559,18 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
if (!IsMasked) {
// .loophead:
- // lr.w dest, (addr)
+ // lr.[w|d] dest, (addr)
// bne dest, cmpval, done
- BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
.addReg(DestReg)
.addReg(CmpValReg)
.addMBB(DoneMBB);
// .looptail:
- // sc.w scratch, newval, (addr)
+ // sc.[w|d] scratch, newval, (addr)
// bnez scratch, loophead
- BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(NewValReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
@@ -507,7 +583,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
// and scratch, dest, mask
// bne scratch, cmpval, done
unsigned MaskReg = MI.getOperand(5).getReg();
- BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
.addReg(AddrReg);
BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
.addReg(DestReg)
@@ -525,7 +601,7 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
// bnez scratch, loophead
insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
MaskReg, ScratchReg);
- BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
.addReg(AddrReg)
.addReg(ScratchReg);
BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
@@ -545,6 +621,90 @@ bool RISCVExpandPseudo::expandAtomicCmpXchg(
return true;
}
+bool RISCVExpandPseudo::expandAuipcInstPair(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI, unsigned FlagsHi,
+ unsigned SecondOpcode) {
+ MachineFunction *MF = MBB.getParent();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ const MachineOperand &Symbol = MI.getOperand(1);
+
+ MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Tell AsmPrinter that we unconditionally want the symbol of this label to be
+ // emitted.
+ NewMBB->setLabelMustBeEmitted();
+
+ MF->insert(++MBB.getIterator(), NewMBB);
+
+ BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg)
+ .addDisp(Symbol, 0, FlagsHi);
+ BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg)
+ .addReg(DestReg)
+ .addMBB(NewMBB, RISCVII::MO_PCREL_LO);
+
+ // Move all the rest of the instructions to NewMBB.
+ NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end());
+ // Update machine-CFG edges.
+ NewMBB->transferSuccessorsAndUpdatePHIs(&MBB);
+ // Make the original basic block fall-through to the new.
+ MBB.addSuccessor(NewMBB);
+
+ // Make sure live-ins are correctly attached to this new basic block.
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *NewMBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
+bool RISCVExpandPseudo::expandLoadLocalAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_PCREL_HI,
+ RISCV::ADDI);
+}
+
+bool RISCVExpandPseudo::expandLoadAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ unsigned SecondOpcode;
+ unsigned FlagsHi;
+ if (MF->getTarget().isPositionIndependent()) {
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ FlagsHi = RISCVII::MO_GOT_HI;
+ } else {
+ SecondOpcode = RISCV::ADDI;
+ FlagsHi = RISCVII::MO_PCREL_HI;
+ }
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, FlagsHi, SecondOpcode);
+}
+
+bool RISCVExpandPseudo::expandLoadTLSIEAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineFunction *MF = MBB.getParent();
+
+ const auto &STI = MF->getSubtarget<RISCVSubtarget>();
+ unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GOT_HI,
+ SecondOpcode);
+}
+
+bool RISCVExpandPseudo::expandLoadTLSGDAddress(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GD_HI,
+ RISCV::ADDI);
+}
+
} // end of anonymous namespace
INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",