summaryrefslogtreecommitdiff
path: root/lib/Target/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AMDGPU')
-rw-r--r--lib/Target/AMDGPU/AMDGPU.h4
-rw-r--r--lib/Target/AMDGPU/AMDGPU.td21
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp8
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp10
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.h2
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp3
-rw-r--r--lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp1
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp2881
-rw-r--r--lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp7
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.cpp3
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.h15
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.cpp18
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp34
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h3
-rw-r--r--lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--lib/Target/AMDGPU/FLATInstructions.td34
-rw-r--r--lib/Target/AMDGPU/GCNRegPressure.cpp153
-rw-r--r--lib/Target/AMDGPU/GCNRegPressure.h61
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.cpp282
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.h24
-rw-r--r--lib/Target/AMDGPU/SIAnnotateControlFlow.cpp2
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp22
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.h1
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.cpp310
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.h33
-rw-r--r--lib/Target/AMDGPU/SIInstructions.td7
-rw-r--r--lib/Target/AMDGPU/VOP2Instructions.td51
-rw-r--r--lib/Target/AMDGPU/VOP3Instructions.td3
28 files changed, 3744 insertions, 250 deletions
diff --git a/lib/Target/AMDGPU/AMDGPU.h b/lib/Target/AMDGPU/AMDGPU.h
index 8f6e1e7d8846..3f89702bed50 100644
--- a/lib/Target/AMDGPU/AMDGPU.h
+++ b/lib/Target/AMDGPU/AMDGPU.h
@@ -50,6 +50,10 @@ FunctionPass *createSIDebuggerInsertNopsPass();
FunctionPass *createSIInsertWaitsPass();
FunctionPass *createSIInsertWaitcntsPass();
FunctionPass *createAMDGPUCodeGenPreparePass(const GCNTargetMachine *TM = nullptr);
+FunctionPass *createAMDGPUMachineCFGStructurizerPass();
+
+void initializeAMDGPUMachineCFGStructurizerPass(PassRegistry&);
+extern char &AMDGPUMachineCFGStructurizerID;
ModulePass *createAMDGPUAnnotateKernelFeaturesPass(const TargetMachine *TM = nullptr);
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &);
diff --git a/lib/Target/AMDGPU/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
index 2e5b78bbf7ef..b279bd61e180 100644
--- a/lib/Target/AMDGPU/AMDGPU.td
+++ b/lib/Target/AMDGPU/AMDGPU.td
@@ -61,6 +61,24 @@ def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space",
"Support flat address space"
>;
+def FeatureFlatInstOffsets : SubtargetFeature<"flat-inst-offsets",
+ "FlatInstOffsets",
+ "true",
+ "Flat instructions have immediate offset addressing mode"
+>;
+
+def FeatureFlatGlobalInsts : SubtargetFeature<"flat-global-insts",
+ "FlatGlobalInsts",
+ "true",
+ "Have global_* flat memory instructions"
+>;
+
+def FeatureFlatScratchInsts : SubtargetFeature<"flat-scratch-insts",
+ "FlatScratchInsts",
+ "true",
+ "Have scratch_* flat memory instructions"
+>;
+
def FeatureUnalignedBufferAccess : SubtargetFeature<"unaligned-buffer-access",
"UnalignedBufferAccess",
"true",
@@ -407,7 +425,8 @@ def FeatureGFX9 : SubtargetFeatureGeneration<"GFX9",
FeatureGCN3Encoding, FeatureCIInsts, Feature16BitInsts,
FeatureSMemRealTime, FeatureScalarStores, FeatureInv2PiInlineImm,
FeatureApertureRegs, FeatureGFX9Insts, FeatureVOP3P, FeatureVGPRIndexMode,
- FeatureFastFMAF32, FeatureDPP
+ FeatureFastFMAF32, FeatureDPP,
+ FeatureFlatInstOffsets, FeatureFlatGlobalInsts, FeatureFlatScratchInsts
]
>;
diff --git a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index ccae36ced1f8..7c99752b881f 100644
--- a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -136,8 +136,7 @@ private:
bool SelectMUBUFIntrinsicVOffset(SDValue Offset, SDValue &SOffset,
SDValue &ImmOffset, SDValue &VOffset) const;
- bool SelectFlat(SDValue Addr, SDValue &VAddr,
- SDValue &SLC, SDValue &TFE) const;
+ bool SelectFlat(SDValue Addr, SDValue &VAddr, SDValue &SLC) const;
bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
bool &Imm) const;
@@ -1278,10 +1277,9 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicVOffset(SDValue Offset,
bool AMDGPUDAGToDAGISel::SelectFlat(SDValue Addr,
SDValue &VAddr,
- SDValue &SLC,
- SDValue &TFE) const {
+ SDValue &SLC) const {
VAddr = Addr;
- TFE = SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
+ SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
return true;
}
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 915d1d9e0e68..f80652b87373 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -567,13 +567,19 @@ static bool hasSourceMods(const SDNode *N) {
case AMDGPUISD::INTERP_P1:
case AMDGPUISD::INTERP_P2:
case AMDGPUISD::DIV_SCALE:
+
+ // TODO: Should really be looking at the users of the bitcast. These are
+ // problematic because bitcasts are used to legalize all stores to integer
+ // types.
+ case ISD::BITCAST:
return false;
default:
return true;
}
}
-static bool allUsesHaveSourceMods(const SDNode *N, unsigned CostThreshold = 4) {
+bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
+ unsigned CostThreshold) {
// Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
// it is truly free to use a source modifier in all cases. If there are
// multiple users but for each one will necessitate using VOP3, there will be
@@ -2299,7 +2305,7 @@ static bool isU24(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
DAG.computeKnownBits(Op, Known);
- return (VT.getSizeInBits() - Known.Zero.countLeadingOnes()) <= 24;
+ return (VT.getSizeInBits() - Known.countMinLeadingZeros()) <= 24;
}
static bool isI24(SDValue Op, SelectionDAG &DAG) {
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.h b/lib/Target/AMDGPU/AMDGPUISelLowering.h
index e1a5a2072418..4c588a7bafd0 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -132,6 +132,8 @@ public:
return false;
}
+ static bool allUsesHaveSourceMods(const SDNode *N,
+ unsigned CostThreshold = 4);
bool isFAbsFree(EVT VT) const override;
bool isFNegFree(EVT VT) const override;
bool isTruncateFree(EVT Src, EVT Dest) const override;
diff --git a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 8867ed689a31..a7eac080f885 100644
--- a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -127,9 +127,9 @@ bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
.add(I.getOperand(1))
.add(I.getOperand(0))
.addImm(0)
- .addImm(0)
.addImm(0);
+
// Now that we selected an opcode, we need to constrain the register
// operands to use appropriate classes.
bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
@@ -393,7 +393,6 @@ bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
.add(I.getOperand(0))
.addReg(PtrReg)
.addImm(0)
- .addImm(0)
.addImm(0);
bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
diff --git a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a2567a549028..9de302994e68 100644
--- a/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -33,6 +33,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo() {
const LLT P1 = LLT::pointer(1, 64);
const LLT P2 = LLT::pointer(2, 64);
+ setAction({G_CONSTANT, S32}, Legal);
setAction({G_CONSTANT, S64}, Legal);
setAction({G_GEP, P1}, Legal);
diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
new file mode 100644
index 000000000000..6d2785ba1c60
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -0,0 +1,2881 @@
+//===- AMDGPUMachineCFGStructurizer.cpp - Machine code if conversion pass. ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the machine instruction level CFG structurizer pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "AMDGPUSubtarget.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegionInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <tuple>
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpucfgstructurizer"
+
+namespace {
+class PHILinearizeDestIterator;
+
+class PHILinearize {
+ friend class PHILinearizeDestIterator;
+
+public:
+ typedef std::pair<unsigned, MachineBasicBlock *> PHISourceT;
+
+private:
+ typedef DenseSet<PHISourceT> PHISourcesT;
+ typedef struct {
+ unsigned DestReg;
+ DebugLoc DL;
+ PHISourcesT Sources;
+ } PHIInfoElementT;
+ typedef SmallPtrSet<PHIInfoElementT *, 2> PHIInfoT;
+ PHIInfoT PHIInfo;
+
+ static unsigned phiInfoElementGetDest(PHIInfoElementT *Info);
+ static void phiInfoElementSetDef(PHIInfoElementT *Info, unsigned NewDef);
+ static PHISourcesT &phiInfoElementGetSources(PHIInfoElementT *Info);
+ static void phiInfoElementAddSource(PHIInfoElementT *Info, unsigned SourceReg,
+ MachineBasicBlock *SourceMBB);
+ static void phiInfoElementRemoveSource(PHIInfoElementT *Info,
+ unsigned SourceReg,
+ MachineBasicBlock *SourceMBB);
+ PHIInfoElementT *findPHIInfoElement(unsigned DestReg);
+ PHIInfoElementT *findPHIInfoElementFromSource(unsigned SourceReg,
+ MachineBasicBlock *SourceMBB);
+
+public:
+ bool findSourcesFromMBB(MachineBasicBlock *SourceMBB,
+ SmallVector<unsigned, 4> &Sources);
+ void addDest(unsigned DestReg, const DebugLoc &DL);
+ void replaceDef(unsigned OldDestReg, unsigned NewDestReg);
+ void deleteDef(unsigned DestReg);
+ void addSource(unsigned DestReg, unsigned SourceReg,
+ MachineBasicBlock *SourceMBB);
+ void removeSource(unsigned DestReg, unsigned SourceReg,
+ MachineBasicBlock *SourceMBB = nullptr);
+ bool findDest(unsigned SourceReg, MachineBasicBlock *SourceMBB,
+ unsigned &DestReg);
+ bool isSource(unsigned Reg, MachineBasicBlock *SourceMBB = nullptr);
+ unsigned getNumSources(unsigned DestReg);
+ void dump(MachineRegisterInfo *MRI);
+ void clear();
+
+ typedef PHISourcesT::iterator source_iterator;
+ typedef PHILinearizeDestIterator dest_iterator;
+
+ dest_iterator dests_begin();
+ dest_iterator dests_end();
+
+ source_iterator sources_begin(unsigned Reg);
+ source_iterator sources_end(unsigned Reg);
+};
+
+class PHILinearizeDestIterator {
+private:
+ PHILinearize::PHIInfoT::iterator Iter;
+
+public:
+ unsigned operator*() { return PHILinearize::phiInfoElementGetDest(*Iter); }
+ PHILinearizeDestIterator &operator++() {
+ ++Iter;
+ return *this;
+ }
+ bool operator==(const PHILinearizeDestIterator &I) const {
+ return I.Iter == Iter;
+ }
+ bool operator!=(const PHILinearizeDestIterator &I) const {
+ return I.Iter != Iter;
+ }
+
+ PHILinearizeDestIterator(PHILinearize::PHIInfoT::iterator I) : Iter(I) {}
+};
+
+unsigned PHILinearize::phiInfoElementGetDest(PHIInfoElementT *Info) {
+ return Info->DestReg;
+}
+
+void PHILinearize::phiInfoElementSetDef(PHIInfoElementT *Info,
+ unsigned NewDef) {
+ Info->DestReg = NewDef;
+}
+
+PHILinearize::PHISourcesT &
+PHILinearize::phiInfoElementGetSources(PHIInfoElementT *Info) {
+ return Info->Sources;
+}
+
+void PHILinearize::phiInfoElementAddSource(PHIInfoElementT *Info,
+ unsigned SourceReg,
+ MachineBasicBlock *SourceMBB) {
+ // Assertion ensures we don't use the same SourceMBB for the
+ // sources, because we cannot have different registers with
+ // identical predecessors, but we can have the same register for
+ // multiple predecessors.
+#if !defined(NDEBUG)
+ for (auto SI : phiInfoElementGetSources(Info)) {
+ assert((SI.second != SourceMBB || SourceReg == SI.first));
+ }
+#endif
+
+ phiInfoElementGetSources(Info).insert(PHISourceT(SourceReg, SourceMBB));
+}
+
+void PHILinearize::phiInfoElementRemoveSource(PHIInfoElementT *Info,
+ unsigned SourceReg,
+ MachineBasicBlock *SourceMBB) {
+ auto &Sources = phiInfoElementGetSources(Info);
+ SmallVector<PHISourceT, 4> ElimiatedSources;
+ for (auto SI : Sources) {
+ if (SI.first == SourceReg &&
+ (SI.second == nullptr || SI.second == SourceMBB)) {
+ ElimiatedSources.push_back(PHISourceT(SI.first, SI.second));
+ }
+ }
+
+ for (auto &Source : ElimiatedSources) {
+ Sources.erase(Source);
+ }
+}
+
+PHILinearize::PHIInfoElementT *
+PHILinearize::findPHIInfoElement(unsigned DestReg) {
+ for (auto I : PHIInfo) {
+ if (phiInfoElementGetDest(I) == DestReg) {
+ return I;
+ }
+ }
+ return nullptr;
+}
+
+PHILinearize::PHIInfoElementT *
+PHILinearize::findPHIInfoElementFromSource(unsigned SourceReg,
+ MachineBasicBlock *SourceMBB) {
+ for (auto I : PHIInfo) {
+ for (auto SI : phiInfoElementGetSources(I)) {
+ if (SI.first == SourceReg &&
+ (SI.second == nullptr || SI.second == SourceMBB)) {
+ return I;
+ }
+ }
+ }
+ return nullptr;
+}
+
+bool PHILinearize::findSourcesFromMBB(MachineBasicBlock *SourceMBB,
+ SmallVector<unsigned, 4> &Sources) {
+ bool FoundSource = false;
+ for (auto I : PHIInfo) {
+ for (auto SI : phiInfoElementGetSources(I)) {
+ if (SI.second == SourceMBB) {
+ FoundSource = true;
+ Sources.push_back(SI.first);
+ }
+ }
+ }
+ return FoundSource;
+}
+
+void PHILinearize::addDest(unsigned DestReg, const DebugLoc &DL) {
+ assert(findPHIInfoElement(DestReg) == nullptr && "Dest already exsists");
+ PHISourcesT EmptySet;
+ PHIInfoElementT *NewElement = new PHIInfoElementT();
+ NewElement->DestReg = DestReg;
+ NewElement->DL = DL;
+ NewElement->Sources = EmptySet;
+ PHIInfo.insert(NewElement);
+}
+
+void PHILinearize::replaceDef(unsigned OldDestReg, unsigned NewDestReg) {
+ phiInfoElementSetDef(findPHIInfoElement(OldDestReg), NewDestReg);
+}
+
+void PHILinearize::deleteDef(unsigned DestReg) {
+ PHIInfoElementT *InfoElement = findPHIInfoElement(DestReg);
+ PHIInfo.erase(InfoElement);
+ delete InfoElement;
+}
+
+void PHILinearize::addSource(unsigned DestReg, unsigned SourceReg,
+ MachineBasicBlock *SourceMBB) {
+ phiInfoElementAddSource(findPHIInfoElement(DestReg), SourceReg, SourceMBB);
+}
+
+void PHILinearize::removeSource(unsigned DestReg, unsigned SourceReg,
+ MachineBasicBlock *SourceMBB) {
+ phiInfoElementRemoveSource(findPHIInfoElement(DestReg), SourceReg, SourceMBB);
+}
+
+bool PHILinearize::findDest(unsigned SourceReg, MachineBasicBlock *SourceMBB,
+ unsigned &DestReg) {
+ PHIInfoElementT *InfoElement =
+ findPHIInfoElementFromSource(SourceReg, SourceMBB);
+ if (InfoElement != nullptr) {
+ DestReg = phiInfoElementGetDest(InfoElement);
+ return true;
+ }
+ return false;
+}
+
+bool PHILinearize::isSource(unsigned Reg, MachineBasicBlock *SourceMBB) {
+ unsigned DestReg;
+ return findDest(Reg, SourceMBB, DestReg);
+}
+
+unsigned PHILinearize::getNumSources(unsigned DestReg) {
+ return phiInfoElementGetSources(findPHIInfoElement(DestReg)).size();
+}
+
+void PHILinearize::dump(MachineRegisterInfo *MRI) {
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+ dbgs() << "=PHIInfo Start=\n";
+ for (auto PII : this->PHIInfo) {
+ PHIInfoElementT &Element = *PII;
+ dbgs() << "Dest: " << PrintReg(Element.DestReg, TRI)
+ << " Sources: {";
+ for (auto &SI : Element.Sources) {
+ dbgs() << PrintReg(SI.first, TRI) << "(BB#"
+ << SI.second->getNumber() << "),";
+ }
+ dbgs() << "}\n";
+ }
+ dbgs() << "=PHIInfo End=\n";
+}
+
+void PHILinearize::clear() { PHIInfo = PHIInfoT(); }
+
+PHILinearize::dest_iterator PHILinearize::dests_begin() {
+ return PHILinearizeDestIterator(PHIInfo.begin());
+}
+
+PHILinearize::dest_iterator PHILinearize::dests_end() {
+ return PHILinearizeDestIterator(PHIInfo.end());
+}
+
+PHILinearize::source_iterator PHILinearize::sources_begin(unsigned Reg) {
+ auto InfoElement = findPHIInfoElement(Reg);
+ return phiInfoElementGetSources(InfoElement).begin();
+}
+PHILinearize::source_iterator PHILinearize::sources_end(unsigned Reg) {
+ auto InfoElement = findPHIInfoElement(Reg);
+ return phiInfoElementGetSources(InfoElement).end();
+}
+
+class RegionMRT;
+class MBBMRT;
+
+static unsigned getPHINumInputs(MachineInstr &PHI) {
+ assert(PHI.isPHI());
+ return (PHI.getNumOperands() - 1) / 2;
+}
+
+static MachineBasicBlock *getPHIPred(MachineInstr &PHI, unsigned Index) {
+ assert(PHI.isPHI());
+ return PHI.getOperand(Index * 2 + 2).getMBB();
+}
+
+static void setPhiPred(MachineInstr &PHI, unsigned Index,
+ MachineBasicBlock *NewPred) {
+ PHI.getOperand(Index * 2 + 2).setMBB(NewPred);
+}
+
+static unsigned getPHISourceReg(MachineInstr &PHI, unsigned Index) {
+ assert(PHI.isPHI());
+ return PHI.getOperand(Index * 2 + 1).getReg();
+}
+
+static unsigned getPHIDestReg(MachineInstr &PHI) {
+ assert(PHI.isPHI());
+ return PHI.getOperand(0).getReg();
+}
+
+class LinearizedRegion {
+protected:
+ MachineBasicBlock *Entry;
+ // The exit block is part of the region, and is the last
+ // merge block before exiting the region.
+ MachineBasicBlock *Exit;
+ DenseSet<unsigned> LiveOuts;
+ SmallPtrSet<MachineBasicBlock *, 1> MBBs;
+ bool HasLoop;
+ LinearizedRegion *Parent;
+ RegionMRT *RMRT;
+
+ void storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
+ MachineInstr *DefInstr, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo);
+
+ void storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
+ MachineInstr *DefInstr,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo);
+
+ void storeMBBLiveOuts(MachineBasicBlock *MBB, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo,
+ RegionMRT *TopRegion);
+
+ void storeLiveOuts(MachineBasicBlock *MBB, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo);
+
+ void storeLiveOuts(RegionMRT *Region, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo,
+ RegionMRT *TopRegion = nullptr);
+
+public:
+ void setRegionMRT(RegionMRT *Region) { RMRT = Region; }
+
+ RegionMRT *getRegionMRT() { return RMRT; }
+
+ void setParent(LinearizedRegion *P) { Parent = P; }
+
+ LinearizedRegion *getParent() { return Parent; }
+
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI = nullptr);
+
+ void setBBSelectRegIn(unsigned Reg);
+
+ unsigned getBBSelectRegIn();
+
+ void setBBSelectRegOut(unsigned Reg, bool IsLiveOut);
+
+ unsigned getBBSelectRegOut();
+
+ void setHasLoop(bool Value);
+
+ bool getHasLoop();
+
+ void addLiveOut(unsigned VReg);
+
+ void removeLiveOut(unsigned Reg);
+
+ void replaceLiveOut(unsigned OldReg, unsigned NewReg);
+
+ void replaceRegister(unsigned Register, unsigned NewRegister,
+ MachineRegisterInfo *MRI, bool ReplaceInside,
+ bool ReplaceOutside, bool IncludeLoopPHIs);
+
+ void replaceRegisterInsideRegion(unsigned Register, unsigned NewRegister,
+ bool IncludeLoopPHIs,
+ MachineRegisterInfo *MRI);
+
+ void replaceRegisterOutsideRegion(unsigned Register, unsigned NewRegister,
+ bool IncludeLoopPHIs,
+ MachineRegisterInfo *MRI);
+
+ DenseSet<unsigned> *getLiveOuts();
+
+ void setEntry(MachineBasicBlock *NewEntry);
+
+ MachineBasicBlock *getEntry();
+
+ void setExit(MachineBasicBlock *NewExit);
+
+ MachineBasicBlock *getExit();
+
+ void addMBB(MachineBasicBlock *MBB);
+
+ void addMBBs(LinearizedRegion *InnerRegion);
+
+ bool contains(MachineBasicBlock *MBB);
+
+ bool isLiveOut(unsigned Reg);
+
+ bool hasNoDef(unsigned Reg, MachineRegisterInfo *MRI);
+
+ void removeFalseRegisterKills(MachineRegisterInfo *MRI);
+
+ void initLiveOut(RegionMRT *Region, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo);
+
+ LinearizedRegion(MachineBasicBlock *MBB, const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI, PHILinearize &PHIInfo);
+
+ LinearizedRegion();
+
+ ~LinearizedRegion();
+};
+
+class MRT {
+protected:
+ RegionMRT *Parent;
+ unsigned BBSelectRegIn;
+ unsigned BBSelectRegOut;
+
+public:
+ unsigned getBBSelectRegIn() { return BBSelectRegIn; }
+
+ unsigned getBBSelectRegOut() { return BBSelectRegOut; }
+
+ void setBBSelectRegIn(unsigned Reg) { BBSelectRegIn = Reg; }
+
+ void setBBSelectRegOut(unsigned Reg) { BBSelectRegOut = Reg; }
+
+ virtual RegionMRT *getRegionMRT() { return nullptr; }
+
+ virtual MBBMRT *getMBBMRT() { return nullptr; }
+
+ bool isRegion() { return getRegionMRT() != nullptr; }
+
+ bool isMBB() { return getMBBMRT() != nullptr; }
+
+ bool isRoot() { return Parent == nullptr; }
+
+ void setParent(RegionMRT *Region) { Parent = Region; }
+
+ RegionMRT *getParent() { return Parent; }
+
+ static MachineBasicBlock *
+ initializeMRT(MachineFunction &MF, const MachineRegionInfo *RegionInfo,
+ DenseMap<MachineRegion *, RegionMRT *> &RegionMap);
+
+ static RegionMRT *buildMRT(MachineFunction &MF,
+ const MachineRegionInfo *RegionInfo,
+ const SIInstrInfo *TII,
+ MachineRegisterInfo *MRI);
+
+ virtual void dump(const TargetRegisterInfo *TRI, int depth = 0) = 0;
+
+ void dumpDepth(int depth) {
+ for (int i = depth; i > 0; --i) {
+ dbgs() << " ";
+ }
+ }
+
+ virtual ~MRT() {}
+};
+
+class MBBMRT : public MRT {
+ MachineBasicBlock *MBB;
+
+public:
+ virtual MBBMRT *getMBBMRT() { return this; }
+
+ MachineBasicBlock *getMBB() { return MBB; }
+
+ virtual void dump(const TargetRegisterInfo *TRI, int depth = 0) {
+ dumpDepth(depth);
+ dbgs() << "MBB: " << getMBB()->getNumber();
+ dbgs() << " In: " << PrintReg(getBBSelectRegIn(), TRI);
+ dbgs() << ", Out: " << PrintReg(getBBSelectRegOut(), TRI) << "\n";
+ }
+
+ MBBMRT(MachineBasicBlock *BB) : MBB(BB) {
+ setParent(nullptr);
+ setBBSelectRegOut(0);
+ setBBSelectRegIn(0);
+ }
+};
+
+class RegionMRT : public MRT {
+protected:
+ MachineRegion *Region;
+ LinearizedRegion *LRegion;
+ MachineBasicBlock *Succ;
+
+ SetVector<MRT *> Children;
+
+public:
+ virtual RegionMRT *getRegionMRT() { return this; }
+
+ void setLinearizedRegion(LinearizedRegion *LinearizeRegion) {
+ LRegion = LinearizeRegion;
+ }
+
+ LinearizedRegion *getLinearizedRegion() { return LRegion; }
+
+ MachineRegion *getMachineRegion() { return Region; }
+
+ unsigned getInnerOutputRegister() {
+ return (*(Children.begin()))->getBBSelectRegOut();
+ }
+
+ void addChild(MRT *Tree) { Children.insert(Tree); }
+
+ SetVector<MRT *> *getChildren() { return &Children; }
+
+ virtual void dump(const TargetRegisterInfo *TRI, int depth = 0) {
+ dumpDepth(depth);
+ dbgs() << "Region: " << (void *)Region;
+ dbgs() << " In: " << PrintReg(getBBSelectRegIn(), TRI);
+ dbgs() << ", Out: " << PrintReg(getBBSelectRegOut(), TRI) << "\n";
+
+ dumpDepth(depth);
+ if (getSucc())
+ dbgs() << "Succ: " << getSucc()->getNumber() << "\n";
+ else
+ dbgs() << "Succ: none \n";
+ for (auto MRTI : Children) {
+ MRTI->dump(TRI, depth + 1);
+ }
+ }
+
+ MRT *getEntryTree() { return Children.back(); }
+
+ MRT *getExitTree() { return Children.front(); }
+
+ MachineBasicBlock *getEntry() {
+ MRT *Tree = Children.back();
+ return (Tree->isRegion()) ? Tree->getRegionMRT()->getEntry()
+ : Tree->getMBBMRT()->getMBB();
+ }
+
+ MachineBasicBlock *getExit() {
+ MRT *Tree = Children.front();
+ return (Tree->isRegion()) ? Tree->getRegionMRT()->getExit()
+ : Tree->getMBBMRT()->getMBB();
+ }
+
+ void setSucc(MachineBasicBlock *MBB) { Succ = MBB; }
+
+ MachineBasicBlock *getSucc() { return Succ; }
+
+ bool contains(MachineBasicBlock *MBB) {
+ for (auto CI : Children) {
+ if (CI->isMBB()) {
+ if (MBB == CI->getMBBMRT()->getMBB()) {
+ return true;
+ }
+ } else {
+ if (CI->getRegionMRT()->contains(MBB)) {
+ return true;
+ } else if (CI->getRegionMRT()->getLinearizedRegion() != nullptr &&
+ CI->getRegionMRT()->getLinearizedRegion()->contains(MBB)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ void replaceLiveOutReg(unsigned Register, unsigned NewRegister) {
+ LinearizedRegion *LRegion = getLinearizedRegion();
+ LRegion->replaceLiveOut(Register, NewRegister);
+ for (auto &CI : Children) {
+ if (CI->isRegion()) {
+ CI->getRegionMRT()->replaceLiveOutReg(Register, NewRegister);
+ }
+ }
+ }
+
+ RegionMRT(MachineRegion *MachineRegion)
+ : Region(MachineRegion), LRegion(nullptr), Succ(nullptr) {
+ setParent(nullptr);
+ setBBSelectRegOut(0);
+ setBBSelectRegIn(0);
+ }
+
+ virtual ~RegionMRT() {
+ if (LRegion) {
+ delete LRegion;
+ }
+
+ for (auto CI : Children) {
+ delete &(*CI);
+ }
+ }
+};
+
+static unsigned createBBSelectReg(const SIInstrInfo *TII,
+ MachineRegisterInfo *MRI) {
+ return MRI->createVirtualRegister(TII->getPreferredSelectRegClass(32));
+}
+
+MachineBasicBlock *
+MRT::initializeMRT(MachineFunction &MF, const MachineRegionInfo *RegionInfo,
+ DenseMap<MachineRegion *, RegionMRT *> &RegionMap) {
+ for (auto &MFI : MF) {
+ MachineBasicBlock *ExitMBB = &MFI;
+ if (ExitMBB->succ_size() == 0) {
+ return ExitMBB;
+ }
+ }
+ llvm_unreachable("CFG has no exit block");
+ return nullptr;
+}
+
+RegionMRT *MRT::buildMRT(MachineFunction &MF,
+ const MachineRegionInfo *RegionInfo,
+ const SIInstrInfo *TII, MachineRegisterInfo *MRI) {
+ SmallPtrSet<MachineRegion *, 4> PlacedRegions;
+ DenseMap<MachineRegion *, RegionMRT *> RegionMap;
+ MachineRegion *TopLevelRegion = RegionInfo->getTopLevelRegion();
+ RegionMRT *Result = new RegionMRT(TopLevelRegion);
+ RegionMap[TopLevelRegion] = Result;
+
+ // Insert the exit block first, we need it to be the merge node
+ // for the top level region.
+ MachineBasicBlock *Exit = initializeMRT(MF, RegionInfo, RegionMap);
+
+ unsigned BBSelectRegIn = createBBSelectReg(TII, MRI);
+ MBBMRT *ExitMRT = new MBBMRT(Exit);
+ RegionMap[RegionInfo->getRegionFor(Exit)]->addChild(ExitMRT);
+ ExitMRT->setBBSelectRegIn(BBSelectRegIn);
+
+ for (auto MBBI : post_order(&(MF.front()))) {
+ MachineBasicBlock *MBB = &(*MBBI);
+
+ // Skip Exit since we already added it
+ if (MBB == Exit) {
+ continue;
+ }
+
+ DEBUG(dbgs() << "Visiting BB#" << MBB->getNumber() << "\n");
+ MBBMRT *NewMBB = new MBBMRT(MBB);
+ MachineRegion *Region = RegionInfo->getRegionFor(MBB);
+
+ // Ensure we have the MRT region
+ if (RegionMap.count(Region) == 0) {
+ RegionMRT *NewMRTRegion = new RegionMRT(Region);
+ RegionMap[Region] = NewMRTRegion;
+
+ // Ensure all parents are in the RegionMap
+ MachineRegion *Parent = Region->getParent();
+ while (RegionMap.count(Parent) == 0) {
+ RegionMRT *NewMRTParent = new RegionMRT(Parent);
+ NewMRTParent->addChild(NewMRTRegion);
+ NewMRTRegion->setParent(NewMRTParent);
+ RegionMap[Parent] = NewMRTParent;
+ NewMRTRegion = NewMRTParent;
+ Parent = Parent->getParent();
+ }
+ RegionMap[Parent]->addChild(NewMRTRegion);
+ NewMRTRegion->setParent(RegionMap[Parent]);
+ }
+
+ // Add MBB to Region MRT
+ RegionMap[Region]->addChild(NewMBB);
+ NewMBB->setParent(RegionMap[Region]);
+ RegionMap[Region]->setSucc(Region->getExit());
+ }
+ return Result;
+}
+
+void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg,
+ MachineInstr *DefInstr,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo) {
+ if (TRI->isVirtualRegister(Reg)) {
+ DEBUG(dbgs() << "Considering Register: " << PrintReg(Reg, TRI) << "\n");
+ // If this is a source register to a PHI we are chaining, it
+ // must be live out.
+ if (PHIInfo.isSource(Reg)) {
+ DEBUG(dbgs() << "Add LiveOut (PHI): " << PrintReg(Reg, TRI) << "\n");
+ addLiveOut(Reg);
+ } else {
+ // If this is live out of the MBB
+ for (auto &UI : MRI->use_operands(Reg)) {
+ if (UI.getParent()->getParent() != MBB) {
+ DEBUG(dbgs() << "Add LiveOut (MBB BB#" << MBB->getNumber()
+ << "): " << PrintReg(Reg, TRI) << "\n");
+ addLiveOut(Reg);
+ } else {
+ // If the use is in the same MBB we have to make sure
+ // it is after the def, otherwise it is live out in a loop
+ MachineInstr *UseInstr = UI.getParent();
+ for (MachineBasicBlock::instr_iterator
+ MII = UseInstr->getIterator(),
+ MIE = UseInstr->getParent()->instr_end();
+ MII != MIE; ++MII) {
+ if ((&(*MII)) == DefInstr) {
+ DEBUG(dbgs() << "Add LiveOut (Loop): " << PrintReg(Reg, TRI)
+ << "\n");
+ addLiveOut(Reg);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void LinearizedRegion::storeLiveOutRegRegion(RegionMRT *Region, unsigned Reg,
+ MachineInstr *DefInstr,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo) {
+ if (TRI->isVirtualRegister(Reg)) {
+ DEBUG(dbgs() << "Considering Register: " << PrintReg(Reg, TRI) << "\n");
+ for (auto &UI : MRI->use_operands(Reg)) {
+ if (!Region->contains(UI.getParent()->getParent())) {
+ DEBUG(dbgs() << "Add LiveOut (Region " << (void *)Region
+ << "): " << PrintReg(Reg, TRI) << "\n");
+ addLiveOut(Reg);
+ }
+ }
+ }
+}
+
+void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo) {
+ DEBUG(dbgs() << "-Store Live Outs Begin (BB#" << MBB->getNumber() << ")-\n");
+ for (auto &II : *MBB) {
+ for (auto &RI : II.defs()) {
+ storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo);
+ }
+ for (auto &IRI : II.implicit_operands()) {
+ if (IRI.isDef()) {
+ storeLiveOutReg(MBB, IRI.getReg(), IRI.getParent(), MRI, TRI, PHIInfo);
+ }
+ }
+ }
+
+ // If we have a successor with a PHI, source coming from this MBB we have to
+ // add the register as live out
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ E = MBB->succ_end();
+ SI != E; ++SI) {
+ for (auto &II : *(*SI)) {
+ if (II.isPHI()) {
+ MachineInstr &PHI = II;
+ int numPreds = getPHINumInputs(PHI);
+ for (int i = 0; i < numPreds; ++i) {
+ if (getPHIPred(PHI, i) == MBB) {
+ unsigned PHIReg = getPHISourceReg(PHI, i);
+ DEBUG(dbgs() << "Add LiveOut (PhiSource BB#" << MBB->getNumber()
+ << " -> BB#" << (*SI)->getNumber()
+ << "): " << PrintReg(PHIReg, TRI) << "\n");
+ addLiveOut(PHIReg);
+ }
+ }
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "-Store Live Outs Endn-\n");
+}
+
+void LinearizedRegion::storeMBBLiveOuts(MachineBasicBlock *MBB,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo,
+ RegionMRT *TopRegion) {
+ for (auto &II : *MBB) {
+ for (auto &RI : II.defs()) {
+ storeLiveOutRegRegion(TopRegion, RI.getReg(), RI.getParent(), MRI, TRI,
+ PHIInfo);
+ }
+ for (auto &IRI : II.implicit_operands()) {
+ if (IRI.isDef()) {
+ storeLiveOutRegRegion(TopRegion, IRI.getReg(), IRI.getParent(), MRI,
+ TRI, PHIInfo);
+ }
+ }
+ }
+}
+
+void LinearizedRegion::storeLiveOuts(RegionMRT *Region,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo,
+ RegionMRT *CurrentTopRegion) {
+ MachineBasicBlock *Exit = Region->getSucc();
+
+ RegionMRT *TopRegion =
+ CurrentTopRegion == nullptr ? Region : CurrentTopRegion;
+
+ // Check if exit is end of function, if so, no live outs.
+ if (Exit == nullptr)
+ return;
+
+ auto Children = Region->getChildren();
+ for (auto CI : *Children) {
+ if (CI->isMBB()) {
+ auto MBB = CI->getMBBMRT()->getMBB();
+ storeMBBLiveOuts(MBB, MRI, TRI, PHIInfo, TopRegion);
+ } else {
+ LinearizedRegion *SubRegion = CI->getRegionMRT()->getLinearizedRegion();
+ // We should be limited to only store registers that are live out from the
+ // lineaized region
+ for (auto MBBI : SubRegion->MBBs) {
+ storeMBBLiveOuts(MBBI, MRI, TRI, PHIInfo, TopRegion);
+ }
+ }
+ }
+
+ if (CurrentTopRegion == nullptr) {
+ auto Succ = Region->getSucc();
+ for (auto &II : *Succ) {
+ if (II.isPHI()) {
+ MachineInstr &PHI = II;
+ int numPreds = getPHINumInputs(PHI);
+ for (int i = 0; i < numPreds; ++i) {
+ if (Region->contains(getPHIPred(PHI, i))) {
+ unsigned PHIReg = getPHISourceReg(PHI, i);
+ DEBUG(dbgs() << "Add Region LiveOut (" << (void *)Region
+ << "): " << PrintReg(PHIReg, TRI) << "\n");
+ addLiveOut(PHIReg);
+ }
+ }
+ }
+ }
+ }
+}
+
+void LinearizedRegion::print(raw_ostream &OS, const TargetRegisterInfo *TRI) {
+ OS << "Linearized Region {";
+ bool IsFirst = true;
+ for (const auto &MBB : MBBs) {
+ if (IsFirst) {
+ IsFirst = false;
+ } else {
+ OS << " ,";
+ }
+ OS << MBB->getNumber();
+ }
+ OS << "} (" << Entry->getNumber() << ", "
+ << (Exit == nullptr ? -1 : Exit->getNumber())
+ << "): In:" << PrintReg(getBBSelectRegIn(), TRI)
+ << " Out:" << PrintReg(getBBSelectRegOut(), TRI) << " {";
+ for (auto &LI : LiveOuts) {
+ OS << PrintReg(LI, TRI) << " ";
+ }
+ OS << "} \n";
+}
+
+unsigned LinearizedRegion::getBBSelectRegIn() {
+ return getRegionMRT()->getBBSelectRegIn();
+}
+
+unsigned LinearizedRegion::getBBSelectRegOut() {
+ return getRegionMRT()->getBBSelectRegOut();
+}
+
+void LinearizedRegion::setHasLoop(bool Value) { HasLoop = Value; }
+
+bool LinearizedRegion::getHasLoop() { return HasLoop; }
+
+void LinearizedRegion::addLiveOut(unsigned VReg) { LiveOuts.insert(VReg); }
+
+void LinearizedRegion::removeLiveOut(unsigned Reg) {
+ if (isLiveOut(Reg))
+ LiveOuts.erase(Reg);
+}
+
+void LinearizedRegion::replaceLiveOut(unsigned OldReg, unsigned NewReg) {
+ if (isLiveOut(OldReg)) {
+ removeLiveOut(OldReg);
+ addLiveOut(NewReg);
+ }
+}
+
+void LinearizedRegion::replaceRegister(unsigned Register, unsigned NewRegister,
+ MachineRegisterInfo *MRI,
+ bool ReplaceInside, bool ReplaceOutside,
+ bool IncludeLoopPHI) {
+ assert(Register != NewRegister && "Cannot replace a reg with itself");
+
+ DEBUG(dbgs() << "Pepareing to replace register (region): "
+ << PrintReg(Register, MRI->getTargetRegisterInfo()) << " with "
+ << PrintReg(NewRegister, MRI->getTargetRegisterInfo()) << "\n");
+
+ // If we are replacing outside, we also need to update the LiveOuts
+ if (ReplaceOutside &&
+ (isLiveOut(Register) || this->getParent()->isLiveOut(Register))) {
+ LinearizedRegion *Current = this;
+ while (Current != nullptr && Current->getEntry() != nullptr) {
+ DEBUG(dbgs() << "Region before register replace\n");
+ DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
+ Current->replaceLiveOut(Register, NewRegister);
+ DEBUG(dbgs() << "Region after register replace\n");
+ DEBUG(Current->print(dbgs(), MRI->getTargetRegisterInfo()));
+ Current = Current->getParent();
+ }
+ }
+
+ for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Register),
+ E = MRI->reg_end();
+ I != E;) {
+ MachineOperand &O = *I;
+ ++I;
+
+ // We don't rewrite defs.
+ if (O.isDef())
+ continue;
+
+ bool IsInside = contains(O.getParent()->getParent());
+ bool IsLoopPHI = IsInside && (O.getParent()->isPHI() &&
+ O.getParent()->getParent() == getEntry());
+ bool ShouldReplace = (IsInside && ReplaceInside) ||
+ (!IsInside && ReplaceOutside) ||
+ (IncludeLoopPHI && IsLoopPHI);
+ if (ShouldReplace) {
+
+ if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ DEBUG(dbgs() << "Trying to substitute physical register: "
+ << PrintReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
+ llvm_unreachable("Cannot substitute physical registers");
+ } else {
+ DEBUG(dbgs() << "Replacing register (region): "
+ << PrintReg(Register, MRI->getTargetRegisterInfo())
+ << " with "
+ << PrintReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
+ O.setReg(NewRegister);
+ }
+ }
+ }
+}
+
+void LinearizedRegion::replaceRegisterInsideRegion(unsigned Register,
+ unsigned NewRegister,
+ bool IncludeLoopPHIs,
+ MachineRegisterInfo *MRI) {
+ replaceRegister(Register, NewRegister, MRI, true, false, IncludeLoopPHIs);
+}
+
+void LinearizedRegion::replaceRegisterOutsideRegion(unsigned Register,
+ unsigned NewRegister,
+ bool IncludeLoopPHIs,
+ MachineRegisterInfo *MRI) {
+ replaceRegister(Register, NewRegister, MRI, false, true, IncludeLoopPHIs);
+}
+
+DenseSet<unsigned> *LinearizedRegion::getLiveOuts() { return &LiveOuts; }
+
+void LinearizedRegion::setEntry(MachineBasicBlock *NewEntry) {
+ Entry = NewEntry;
+}
+
+MachineBasicBlock *LinearizedRegion::getEntry() { return Entry; }
+
+void LinearizedRegion::setExit(MachineBasicBlock *NewExit) { Exit = NewExit; }
+
+MachineBasicBlock *LinearizedRegion::getExit() { return Exit; }
+
+void LinearizedRegion::addMBB(MachineBasicBlock *MBB) { MBBs.insert(MBB); }
+
+void LinearizedRegion::addMBBs(LinearizedRegion *InnerRegion) {
+ for (const auto &MBB : InnerRegion->MBBs) {
+ addMBB(MBB);
+ }
+}
+
+bool LinearizedRegion::contains(MachineBasicBlock *MBB) {
+ return MBBs.count(MBB) == 1;
+}
+
+bool LinearizedRegion::isLiveOut(unsigned Reg) {
+ return LiveOuts.count(Reg) == 1;
+}
+
+bool LinearizedRegion::hasNoDef(unsigned Reg, MachineRegisterInfo *MRI) {
+ return MRI->def_begin(Reg) == MRI->def_end();
+}
+
+// After the code has been structurized, what was flagged as kills
+// before are no longer register kills.
+void LinearizedRegion::removeFalseRegisterKills(MachineRegisterInfo *MRI) {
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+ for (auto MBBI : MBBs) {
+ MachineBasicBlock *MBB = MBBI;
+ for (auto &II : *MBB) {
+ for (auto &RI : II.uses()) {
+ if (RI.isReg()) {
+ unsigned Reg = RI.getReg();
+ if (TRI->isVirtualRegister(Reg)) {
+ if (hasNoDef(Reg, MRI))
+ continue;
+ if (!MRI->hasOneDef(Reg)) {
+ DEBUG(this->getEntry()->getParent()->dump());
+ DEBUG(dbgs() << PrintReg(Reg, TRI) << "\n");
+ }
+
+ if (MRI->def_begin(Reg) == MRI->def_end()) {
+ DEBUG(dbgs() << "Register "
+ << PrintReg(Reg, MRI->getTargetRegisterInfo())
+ << " has NO defs\n");
+ } else if (!MRI->hasOneDef(Reg)) {
+ DEBUG(dbgs() << "Register "
+ << PrintReg(Reg, MRI->getTargetRegisterInfo())
+ << " has multiple defs\n");
+ }
+
+ assert(MRI->hasOneDef(Reg) && "Register has multiple definitions");
+ MachineOperand *Def = &(*(MRI->def_begin(Reg)));
+ MachineOperand *UseOperand = &(RI);
+ bool UseIsOutsideDefMBB = Def->getParent()->getParent() != MBB;
+ if (UseIsOutsideDefMBB && UseOperand->isKill()) {
+ DEBUG(dbgs() << "Removing kill flag on register: "
+ << PrintReg(Reg, TRI) << "\n");
+ UseOperand->setIsKill(false);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void LinearizedRegion::initLiveOut(RegionMRT *Region,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo) {
+ storeLiveOuts(Region, MRI, TRI, PHIInfo);
+}
+
+LinearizedRegion::LinearizedRegion(MachineBasicBlock *MBB,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI,
+ PHILinearize &PHIInfo) {
+ setEntry(MBB);
+ setExit(MBB);
+ storeLiveOuts(MBB, MRI, TRI, PHIInfo);
+ MBBs.insert(MBB);
+ Parent = nullptr;
+}
+
+LinearizedRegion::LinearizedRegion() {
+ setEntry(nullptr);
+ setExit(nullptr);
+ Parent = nullptr;
+}
+
+LinearizedRegion::~LinearizedRegion() {}
+
+class AMDGPUMachineCFGStructurizer : public MachineFunctionPass {
+private:
+ const MachineRegionInfo *Regions;
+ const SIInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ unsigned BBSelectRegister;
+ PHILinearize PHIInfo;
+ DenseMap<MachineBasicBlock *, MachineBasicBlock *> FallthroughMap;
+
+ void getPHIRegionIndices(RegionMRT *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &RegionIndices);
+ void getPHIRegionIndices(LinearizedRegion *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &RegionIndices);
+ void getPHINonRegionIndices(LinearizedRegion *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHINonRegionIndices);
+
+ void storePHILinearizationInfoDest(
+ unsigned LDestReg, MachineInstr &PHI,
+ SmallVector<unsigned, 2> *RegionIndices = nullptr);
+
+ unsigned storePHILinearizationInfo(MachineInstr &PHI,
+ SmallVector<unsigned, 2> *RegionIndices);
+
+ void extractKilledPHIs(MachineBasicBlock *MBB);
+
+ bool shrinkPHI(MachineInstr &PHI, SmallVector<unsigned, 2> &PHIIndices,
+ unsigned *ReplaceReg);
+
+ bool shrinkPHI(MachineInstr &PHI, unsigned CombinedSourceReg,
+ MachineBasicBlock *SourceMBB,
+ SmallVector<unsigned, 2> &PHIIndices, unsigned *ReplaceReg);
+
+ void replacePHI(MachineInstr &PHI, unsigned CombinedSourceReg,
+ MachineBasicBlock *LastMerge,
+ SmallVector<unsigned, 2> &PHIRegionIndices);
+ void replaceEntryPHI(MachineInstr &PHI, unsigned CombinedSourceReg,
+ MachineBasicBlock *IfMBB,
+ SmallVector<unsigned, 2> &PHIRegionIndices);
+ void replaceLiveOutRegs(MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHIRegionIndices,
+ unsigned CombinedSourceReg,
+ LinearizedRegion *LRegion);
+ void rewriteRegionExitPHI(RegionMRT *Region, MachineBasicBlock *LastMerge,
+ MachineInstr &PHI, LinearizedRegion *LRegion);
+
+ void rewriteRegionExitPHIs(RegionMRT *Region, MachineBasicBlock *LastMerge,
+ LinearizedRegion *LRegion);
+ void rewriteRegionEntryPHI(LinearizedRegion *Region, MachineBasicBlock *IfMBB,
+ MachineInstr &PHI);
+ void rewriteRegionEntryPHIs(LinearizedRegion *Region,
+ MachineBasicBlock *IfMBB);
+
+ bool regionIsSimpleIf(RegionMRT *Region);
+
+ void transformSimpleIfRegion(RegionMRT *Region);
+
+ void eliminateDeadBranchOperands(MachineBasicBlock::instr_iterator &II);
+
+ void insertUnconditionalBranch(MachineBasicBlock *MBB,
+ MachineBasicBlock *Dest,
+ const DebugLoc &DL = DebugLoc());
+
+ MachineBasicBlock *createLinearizedExitBlock(RegionMRT *Region);
+
+ void insertMergePHI(MachineBasicBlock *IfBB, MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB, unsigned DestRegister,
+ unsigned IfSourceRegister, unsigned CodeSourceRegister,
+ bool IsUndefIfSource = false);
+
+ MachineBasicBlock *createIfBlock(MachineBasicBlock *MergeBB,
+ MachineBasicBlock *CodeBBStart,
+ MachineBasicBlock *CodeBBEnd,
+ MachineBasicBlock *SelectBB, unsigned IfReg,
+ bool InheritPreds);
+
+ void prunePHIInfo(MachineBasicBlock *MBB);
+ void createEntryPHI(LinearizedRegion *CurrentRegion, unsigned DestReg);
+
+ void createEntryPHIs(LinearizedRegion *CurrentRegion);
+ void resolvePHIInfos(MachineBasicBlock *FunctionEntry);
+
+ void replaceRegisterWith(unsigned Register, unsigned NewRegister);
+
+ MachineBasicBlock *createIfRegion(MachineBasicBlock *MergeBB,
+ MachineBasicBlock *CodeBB,
+ LinearizedRegion *LRegion,
+ unsigned BBSelectRegIn,
+ unsigned BBSelectRegOut);
+
+ MachineBasicBlock *
+ createIfRegion(MachineBasicBlock *MergeMBB, LinearizedRegion *InnerRegion,
+ LinearizedRegion *CurrentRegion, MachineBasicBlock *SelectBB,
+ unsigned BBSelectRegIn, unsigned BBSelectRegOut);
+ void ensureCondIsNotKilled(SmallVector<MachineOperand, 1> Cond);
+
+ void rewriteCodeBBTerminator(MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ unsigned BBSelectReg);
+
+ MachineInstr *getDefInstr(unsigned Reg);
+ void insertChainedPHI(MachineBasicBlock *IfBB, MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ LinearizedRegion *InnerRegion, unsigned DestReg,
+ unsigned SourceReg);
+ bool containsDef(MachineBasicBlock *MBB, LinearizedRegion *InnerRegion,
+ unsigned Register);
+ void rewriteLiveOutRegs(MachineBasicBlock *IfBB, MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ LinearizedRegion *InnerRegion,
+ LinearizedRegion *LRegion);
+
+ void splitLoopPHI(MachineInstr &PHI, MachineBasicBlock *Entry,
+ MachineBasicBlock *EntrySucc, LinearizedRegion *LRegion);
+ void splitLoopPHIs(MachineBasicBlock *Entry, MachineBasicBlock *EntrySucc,
+ LinearizedRegion *LRegion);
+
+ MachineBasicBlock *splitExit(LinearizedRegion *LRegion);
+
+ MachineBasicBlock *splitEntry(LinearizedRegion *LRegion);
+
+ LinearizedRegion *initLinearizedRegion(RegionMRT *Region);
+
+ bool structurizeComplexRegion(RegionMRT *Region);
+
+ bool structurizeRegion(RegionMRT *Region);
+
+ bool structurizeRegions(RegionMRT *Region, bool isTopRegion);
+
+public:
+ static char ID;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineRegionInfoPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ AMDGPUMachineCFGStructurizer() : MachineFunctionPass(ID) {
+ initializeAMDGPUMachineCFGStructurizerPass(*PassRegistry::getPassRegistry());
+ }
+
+ void initFallthroughMap(MachineFunction &MF);
+
+ void createLinearizedRegion(RegionMRT *Region, unsigned SelectOut);
+
+ unsigned initializeSelectRegisters(MRT *MRT, unsigned ExistingExitReg,
+ MachineRegisterInfo *MRI,
+ const SIInstrInfo *TII);
+
+ RegionMRT *RMRT;
+ void setRegionMRT(RegionMRT *RegionTree) { RMRT = RegionTree; }
+
+ RegionMRT *getRegionMRT() { return RMRT; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+}
+
+char AMDGPUMachineCFGStructurizer::ID = 0;
+
+bool AMDGPUMachineCFGStructurizer::regionIsSimpleIf(RegionMRT *Region) {
+ MachineBasicBlock *Entry = Region->getEntry();
+ MachineBasicBlock *Succ = Region->getSucc();
+ bool FoundBypass = false;
+ bool FoundIf = false;
+
+ if (Entry->succ_size() != 2) {
+ return false;
+ }
+
+ for (MachineBasicBlock::const_succ_iterator SI = Entry->succ_begin(),
+ E = Entry->succ_end();
+ SI != E; ++SI) {
+ MachineBasicBlock *Current = *SI;
+
+ if (Current == Succ) {
+ FoundBypass = true;
+ } else if ((Current->succ_size() == 1) &&
+ *(Current->succ_begin()) == Succ) {
+ FoundIf = true;
+ }
+ }
+
+ return FoundIf && FoundBypass;
+}
+
+void AMDGPUMachineCFGStructurizer::transformSimpleIfRegion(RegionMRT *Region) {
+ MachineBasicBlock *Entry = Region->getEntry();
+ MachineBasicBlock *Exit = Region->getExit();
+ TII->convertNonUniformIfRegion(Entry, Exit);
+}
+
+static void fixMBBTerminator(MachineBasicBlock *MBB) {
+
+ if (MBB->succ_size() == 1) {
+ auto *Succ = *(MBB->succ_begin());
+ for (auto &TI : MBB->terminators()) {
+ for (auto &UI : TI.uses()) {
+ if (UI.isMBB() && UI.getMBB() != Succ) {
+ UI.setMBB(Succ);
+ }
+ }
+ }
+ }
+}
+
+static void fixRegionTerminator(RegionMRT *Region) {
+ MachineBasicBlock *InternalSucc = nullptr;
+ MachineBasicBlock *ExternalSucc = nullptr;
+ LinearizedRegion *LRegion = Region->getLinearizedRegion();
+ auto Exit = LRegion->getExit();
+
+ SmallPtrSet<MachineBasicBlock *, 2> Successors;
+ for (MachineBasicBlock::const_succ_iterator SI = Exit->succ_begin(),
+ SE = Exit->succ_end();
+ SI != SE; ++SI) {
+ MachineBasicBlock *Succ = *SI;
+ if (LRegion->contains(Succ)) {
+ // Do not allow re-assign
+ assert(InternalSucc == nullptr);
+ InternalSucc = Succ;
+ } else {
+ // Do not allow re-assign
+ assert(ExternalSucc == nullptr);
+ ExternalSucc = Succ;
+ }
+ }
+
+ for (auto &TI : Exit->terminators()) {
+ for (auto &UI : TI.uses()) {
+ if (UI.isMBB()) {
+ auto Target = UI.getMBB();
+ if (Target != InternalSucc && Target != ExternalSucc) {
+ UI.setMBB(ExternalSucc);
+ }
+ }
+ }
+ }
+}
+
+// If a region region is just a sequence of regions (and the exit
+// block in the case of the top level region), we can simply skip
+// linearizing it, because it is already linear
+bool regionIsSequence(RegionMRT *Region) {
+ auto Children = Region->getChildren();
+ for (auto CI : *Children) {
+ if (!CI->isRegion()) {
+ if (CI->getMBBMRT()->getMBB()->succ_size() > 1) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void fixupRegionExits(RegionMRT *Region) {
+ auto Children = Region->getChildren();
+ for (auto CI : *Children) {
+ if (!CI->isRegion()) {
+ fixMBBTerminator(CI->getMBBMRT()->getMBB());
+ } else {
+ fixRegionTerminator(CI->getRegionMRT());
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::getPHIRegionIndices(
+ RegionMRT *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHIRegionIndices) {
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ MachineBasicBlock *Pred = getPHIPred(PHI, i);
+ if (Region->contains(Pred)) {
+ PHIRegionIndices.push_back(i);
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::getPHIRegionIndices(
+ LinearizedRegion *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHIRegionIndices) {
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ MachineBasicBlock *Pred = getPHIPred(PHI, i);
+ if (Region->contains(Pred)) {
+ PHIRegionIndices.push_back(i);
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::getPHINonRegionIndices(
+ LinearizedRegion *Region, MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHINonRegionIndices) {
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ MachineBasicBlock *Pred = getPHIPred(PHI, i);
+ if (!Region->contains(Pred)) {
+ PHINonRegionIndices.push_back(i);
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::storePHILinearizationInfoDest(
+ unsigned LDestReg, MachineInstr &PHI,
+ SmallVector<unsigned, 2> *RegionIndices) {
+ if (RegionIndices) {
+ for (auto i : *RegionIndices) {
+ PHIInfo.addSource(LDestReg, getPHISourceReg(PHI, i), getPHIPred(PHI, i));
+ }
+ } else {
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ PHIInfo.addSource(LDestReg, getPHISourceReg(PHI, i), getPHIPred(PHI, i));
+ }
+ }
+}
+
+unsigned AMDGPUMachineCFGStructurizer::storePHILinearizationInfo(
+ MachineInstr &PHI, SmallVector<unsigned, 2> *RegionIndices) {
+ unsigned DestReg = getPHIDestReg(PHI);
+ unsigned LinearizeDestReg =
+ MRI->createVirtualRegister(MRI->getRegClass(DestReg));
+ PHIInfo.addDest(LinearizeDestReg, PHI.getDebugLoc());
+ storePHILinearizationInfoDest(LinearizeDestReg, PHI, RegionIndices);
+ return LinearizeDestReg;
+}
+
+void AMDGPUMachineCFGStructurizer::extractKilledPHIs(MachineBasicBlock *MBB) {
+ // We need to create a new chain for the killed phi, but there is no
+ // need to do the renaming outside or inside the block.
+ SmallPtrSet<MachineInstr *, 2> PHIs;
+ for (MachineBasicBlock::instr_iterator I = MBB->instr_begin(),
+ E = MBB->instr_end();
+ I != E; ++I) {
+ MachineInstr &Instr = *I;
+ if (Instr.isPHI()) {
+ unsigned PHIDestReg = getPHIDestReg(Instr);
+ DEBUG(dbgs() << "Extractking killed phi:\n");
+ DEBUG(Instr.dump());
+ PHIs.insert(&Instr);
+ PHIInfo.addDest(PHIDestReg, Instr.getDebugLoc());
+ storePHILinearizationInfoDest(PHIDestReg, Instr);
+ }
+ }
+
+ for (auto PI : PHIs) {
+ PI->eraseFromParent();
+ }
+}
+
+static bool isPHIRegionIndex(SmallVector<unsigned, 2> PHIRegionIndices,
+ unsigned Index) {
+ for (auto i : PHIRegionIndices) {
+ if (i == Index)
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
+ SmallVector<unsigned, 2> &PHIIndices,
+ unsigned *ReplaceReg) {
+ return shrinkPHI(PHI, 0, nullptr, PHIIndices, ReplaceReg);
+}
+
+bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
+ unsigned CombinedSourceReg,
+ MachineBasicBlock *SourceMBB,
+ SmallVector<unsigned, 2> &PHIIndices,
+ unsigned *ReplaceReg) {
+ DEBUG(dbgs() << "Shrink PHI: ");
+ DEBUG(PHI.dump());
+ DEBUG(dbgs() << " to " << PrintReg(getPHIDestReg(PHI), TRI)
+ << "<def> = PHI(");
+
+ bool Replaced = false;
+ unsigned NumInputs = getPHINumInputs(PHI);
+ int SingleExternalEntryIndex = -1;
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (!isPHIRegionIndex(PHIIndices, i)) {
+ if (SingleExternalEntryIndex == -1) {
+ // Single entry
+ SingleExternalEntryIndex = i;
+ } else {
+ // Multiple entries
+ SingleExternalEntryIndex = -2;
+ }
+ }
+ }
+
+ if (SingleExternalEntryIndex > -1) {
+ *ReplaceReg = getPHISourceReg(PHI, SingleExternalEntryIndex);
+ // We should not rewrite the code, we should only pick up the single value
+ // that represents the shrunk PHI.
+ Replaced = true;
+ } else {
+ MachineBasicBlock *MBB = PHI.getParent();
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
+ getPHIDestReg(PHI));
+ if (SourceMBB) {
+ MIB.addReg(CombinedSourceReg);
+ MIB.addMBB(SourceMBB);
+ DEBUG(dbgs() << PrintReg(CombinedSourceReg, TRI) << ", BB#"
+ << SourceMBB->getNumber());
+ }
+
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (isPHIRegionIndex(PHIIndices, i)) {
+ continue;
+ }
+ unsigned SourceReg = getPHISourceReg(PHI, i);
+ MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
+ MIB.addReg(SourceReg);
+ MIB.addMBB(SourcePred);
+ DEBUG(dbgs() << PrintReg(SourceReg, TRI) << ", BB#"
+ << SourcePred->getNumber());
+ }
+ DEBUG(dbgs() << ")\n");
+ }
+ PHI.eraseFromParent();
+ return Replaced;
+}
+
+void AMDGPUMachineCFGStructurizer::replacePHI(
+ MachineInstr &PHI, unsigned CombinedSourceReg, MachineBasicBlock *LastMerge,
+ SmallVector<unsigned, 2> &PHIRegionIndices) {
+ DEBUG(dbgs() << "Replace PHI: ");
+ DEBUG(PHI.dump());
+ DEBUG(dbgs() << " with " << PrintReg(getPHIDestReg(PHI), TRI)
+ << "<def> = PHI(");
+
+ bool HasExternalEdge = false;
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (!isPHIRegionIndex(PHIRegionIndices, i)) {
+ HasExternalEdge = true;
+ }
+ }
+
+ if (HasExternalEdge) {
+ MachineBasicBlock *MBB = PHI.getParent();
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
+ getPHIDestReg(PHI));
+ MIB.addReg(CombinedSourceReg);
+ MIB.addMBB(LastMerge);
+ DEBUG(dbgs() << PrintReg(CombinedSourceReg, TRI) << ", BB#"
+ << LastMerge->getNumber());
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (isPHIRegionIndex(PHIRegionIndices, i)) {
+ continue;
+ }
+ unsigned SourceReg = getPHISourceReg(PHI, i);
+ MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
+ MIB.addReg(SourceReg);
+ MIB.addMBB(SourcePred);
+ DEBUG(dbgs() << PrintReg(SourceReg, TRI) << ", BB#"
+ << SourcePred->getNumber());
+ }
+ DEBUG(dbgs() << ")\n");
+ } else {
+ replaceRegisterWith(getPHIDestReg(PHI), CombinedSourceReg);
+ }
+ PHI.eraseFromParent();
+}
+
+void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
+ MachineInstr &PHI, unsigned CombinedSourceReg, MachineBasicBlock *IfMBB,
+ SmallVector<unsigned, 2> &PHIRegionIndices) {
+
+ DEBUG(dbgs() << "Replace entry PHI: ");
+ DEBUG(PHI.dump());
+ DEBUG(dbgs() << " with ");
+
+ unsigned NumInputs = getPHINumInputs(PHI);
+ unsigned NumNonRegionInputs = NumInputs;
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (isPHIRegionIndex(PHIRegionIndices, i)) {
+ NumNonRegionInputs--;
+ }
+ }
+
+ if (NumNonRegionInputs == 0) {
+ auto DestReg = getPHIDestReg(PHI);
+ replaceRegisterWith(DestReg, CombinedSourceReg);
+ DEBUG(dbgs() << " register " << PrintReg(CombinedSourceReg, TRI) << "\n");
+ PHI.eraseFromParent();
+ } else {
+ DEBUG(dbgs() << PrintReg(getPHIDestReg(PHI), TRI) << "<def> = PHI(");
+ MachineBasicBlock *MBB = PHI.getParent();
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
+ getPHIDestReg(PHI));
+ MIB.addReg(CombinedSourceReg);
+ MIB.addMBB(IfMBB);
+ DEBUG(dbgs() << PrintReg(CombinedSourceReg, TRI) << ", BB#"
+ << IfMBB->getNumber());
+ unsigned NumInputs = getPHINumInputs(PHI);
+ for (unsigned i = 0; i < NumInputs; ++i) {
+ if (isPHIRegionIndex(PHIRegionIndices, i)) {
+ continue;
+ }
+ unsigned SourceReg = getPHISourceReg(PHI, i);
+ MachineBasicBlock *SourcePred = getPHIPred(PHI, i);
+ MIB.addReg(SourceReg);
+ MIB.addMBB(SourcePred);
+ DEBUG(dbgs() << PrintReg(SourceReg, TRI) << ", BB#"
+ << SourcePred->getNumber());
+ }
+ DEBUG(dbgs() << ")\n");
+ PHI.eraseFromParent();
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::replaceLiveOutRegs(
+ MachineInstr &PHI, SmallVector<unsigned, 2> &PHIRegionIndices,
+ unsigned CombinedSourceReg, LinearizedRegion *LRegion) {
+ bool WasLiveOut = false;
+ for (auto PII : PHIRegionIndices) {
+ unsigned Reg = getPHISourceReg(PHI, PII);
+ if (LRegion->isLiveOut(Reg)) {
+ bool IsDead = true;
+
+ // Check if register is live out of the basic block
+ MachineBasicBlock *DefMBB = getDefInstr(Reg)->getParent();
+ for (auto UI = MRI->use_begin(Reg), E = MRI->use_end(); UI != E; ++UI) {
+ if ((*UI).getParent()->getParent() != DefMBB) {
+ IsDead = false;
+ }
+ }
+
+ DEBUG(dbgs() << "Register " << PrintReg(Reg, TRI) << " is "
+ << (IsDead ? "dead" : "alive") << " after PHI replace\n");
+ if (IsDead) {
+ LRegion->removeLiveOut(Reg);
+ }
+ WasLiveOut = true;
+ }
+ }
+
+ if (WasLiveOut)
+ LRegion->addLiveOut(CombinedSourceReg);
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteRegionExitPHI(RegionMRT *Region,
+ MachineBasicBlock *LastMerge,
+ MachineInstr &PHI,
+ LinearizedRegion *LRegion) {
+ SmallVector<unsigned, 2> PHIRegionIndices;
+ getPHIRegionIndices(Region, PHI, PHIRegionIndices);
+ unsigned LinearizedSourceReg =
+ storePHILinearizationInfo(PHI, &PHIRegionIndices);
+
+ replacePHI(PHI, LinearizedSourceReg, LastMerge, PHIRegionIndices);
+ replaceLiveOutRegs(PHI, PHIRegionIndices, LinearizedSourceReg, LRegion);
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteRegionEntryPHI(LinearizedRegion *Region,
+ MachineBasicBlock *IfMBB,
+ MachineInstr &PHI) {
+ SmallVector<unsigned, 2> PHINonRegionIndices;
+ getPHINonRegionIndices(Region, PHI, PHINonRegionIndices);
+ unsigned LinearizedSourceReg =
+ storePHILinearizationInfo(PHI, &PHINonRegionIndices);
+ replaceEntryPHI(PHI, LinearizedSourceReg, IfMBB, PHINonRegionIndices);
+}
+
+static void collectPHIs(MachineBasicBlock *MBB,
+ SmallVector<MachineInstr *, 2> &PHIs) {
+ for (auto &BBI : *MBB) {
+ if (BBI.isPHI()) {
+ PHIs.push_back(&BBI);
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteRegionExitPHIs(RegionMRT *Region,
+ MachineBasicBlock *LastMerge,
+ LinearizedRegion *LRegion) {
+ SmallVector<MachineInstr *, 2> PHIs;
+ auto Exit = Region->getSucc();
+ if (Exit == nullptr)
+ return;
+
+ collectPHIs(Exit, PHIs);
+
+ for (auto PHII : PHIs) {
+ rewriteRegionExitPHI(Region, LastMerge, *PHII, LRegion);
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteRegionEntryPHIs(LinearizedRegion *Region,
+ MachineBasicBlock *IfMBB) {
+ SmallVector<MachineInstr *, 2> PHIs;
+ auto Entry = Region->getEntry();
+
+ collectPHIs(Entry, PHIs);
+
+ for (auto PHII : PHIs) {
+ rewriteRegionEntryPHI(Region, IfMBB, *PHII);
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::insertUnconditionalBranch(MachineBasicBlock *MBB,
+ MachineBasicBlock *Dest,
+ const DebugLoc &DL) {
+ DEBUG(dbgs() << "Inserting unconditional branch: " << MBB->getNumber()
+ << " -> " << Dest->getNumber() << "\n");
+ MachineBasicBlock::instr_iterator Terminator = MBB->getFirstInstrTerminator();
+ bool HasTerminator = Terminator != MBB->instr_end();
+ if (HasTerminator) {
+ TII->ReplaceTailWithBranchTo(Terminator, Dest);
+ }
+ if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(Dest)) {
+ TII->insertUnconditionalBranch(*MBB, Dest, DL);
+ }
+}
+
+static MachineBasicBlock *getSingleExitNode(MachineFunction &MF) {
+ MachineBasicBlock *result = nullptr;
+ for (auto &MFI : MF) {
+ if (MFI.succ_size() == 0) {
+ if (result == nullptr) {
+ result = &MFI;
+ } else {
+ return nullptr;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool hasOneExitNode(MachineFunction &MF) {
+ return getSingleExitNode(MF) != nullptr;
+}
+
+MachineBasicBlock *
+AMDGPUMachineCFGStructurizer::createLinearizedExitBlock(RegionMRT *Region) {
+ auto Exit = Region->getSucc();
+
+ // If the exit is the end of the function, we just use the existing
+ MachineFunction *MF = Region->getEntry()->getParent();
+ if (Exit == nullptr && hasOneExitNode(*MF)) {
+ return &(*(--(Region->getEntry()->getParent()->end())));
+ }
+
+ MachineBasicBlock *LastMerge = MF->CreateMachineBasicBlock();
+ if (Exit == nullptr) {
+ MachineFunction::iterator ExitIter = MF->end();
+ MF->insert(ExitIter, LastMerge);
+ } else {
+ MachineFunction::iterator ExitIter = Exit->getIterator();
+ MF->insert(ExitIter, LastMerge);
+ LastMerge->addSuccessor(Exit);
+ insertUnconditionalBranch(LastMerge, Exit);
+ DEBUG(dbgs() << "Created exit block: " << LastMerge->getNumber() << "\n");
+ }
+ return LastMerge;
+}
+
+void AMDGPUMachineCFGStructurizer::insertMergePHI(MachineBasicBlock *IfBB,
+ MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ unsigned DestRegister,
+ unsigned IfSourceRegister,
+ unsigned CodeSourceRegister,
+ bool IsUndefIfSource) {
+ // If this is the function exit block, we don't need a phi.
+ if (MergeBB->succ_begin() == MergeBB->succ_end()) {
+ return;
+ }
+ DEBUG(dbgs() << "Merge PHI (BB#" << MergeBB->getNumber()
+ << "): " << PrintReg(DestRegister, TRI) << "<def> = PHI("
+ << PrintReg(IfSourceRegister, TRI) << ", BB#"
+ << IfBB->getNumber() << PrintReg(CodeSourceRegister, TRI)
+ << ", BB#" << CodeBB->getNumber() << ")\n");
+ const DebugLoc &DL = MergeBB->findDebugLoc(MergeBB->begin());
+ MachineInstrBuilder MIB = BuildMI(*MergeBB, MergeBB->instr_begin(), DL,
+ TII->get(TargetOpcode::PHI), DestRegister);
+ if (IsUndefIfSource && false) {
+ MIB.addReg(IfSourceRegister, RegState::Undef);
+ } else {
+ MIB.addReg(IfSourceRegister);
+ }
+ MIB.addMBB(IfBB);
+ MIB.addReg(CodeSourceRegister);
+ MIB.addMBB(CodeBB);
+}
+
+static void removeExternalCFGSuccessors(MachineBasicBlock *MBB) {
+ for (MachineBasicBlock::succ_iterator PI = MBB->succ_begin(),
+ E = MBB->succ_end();
+ PI != E; ++PI) {
+ if ((*PI) != MBB) {
+ (MBB)->removeSuccessor(*PI);
+ }
+ }
+}
+
+static void removeExternalCFGEdges(MachineBasicBlock *StartMBB,
+ MachineBasicBlock *EndMBB) {
+
+ // We have to check against the StartMBB successor becasuse a
+ // structurized region with a loop will have the entry block split,
+ // and the backedge will go to the entry successor.
+ DenseSet<std::pair<MachineBasicBlock *, MachineBasicBlock *>> Succs;
+ unsigned SuccSize = StartMBB->succ_size();
+ if (SuccSize > 0) {
+ MachineBasicBlock *StartMBBSucc = *(StartMBB->succ_begin());
+ for (MachineBasicBlock::succ_iterator PI = EndMBB->succ_begin(),
+ E = EndMBB->succ_end();
+ PI != E; ++PI) {
+ // Either we have a back-edge to the entry block, or a back-edge to the
+ // succesor of the entry block since the block may be split.
+ if ((*PI) != StartMBB &&
+ !((*PI) == StartMBBSucc && StartMBB != EndMBB && SuccSize == 1)) {
+ Succs.insert(
+ std::pair<MachineBasicBlock *, MachineBasicBlock *>(EndMBB, *PI));
+ }
+ }
+ }
+
+ for (MachineBasicBlock::pred_iterator PI = StartMBB->pred_begin(),
+ E = StartMBB->pred_end();
+ PI != E; ++PI) {
+ if ((*PI) != EndMBB) {
+ Succs.insert(
+ std::pair<MachineBasicBlock *, MachineBasicBlock *>(*PI, StartMBB));
+ }
+ }
+
+ for (auto SI : Succs) {
+ std::pair<MachineBasicBlock *, MachineBasicBlock *> Edge = SI;
+ DEBUG(dbgs() << "Removing edge: BB#" << Edge.first->getNumber() << " -> BB#"
+ << Edge.second->getNumber() << "\n");
+ Edge.first->removeSuccessor(Edge.second);
+ }
+}
+
+MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfBlock(
+ MachineBasicBlock *MergeBB, MachineBasicBlock *CodeBBStart,
+ MachineBasicBlock *CodeBBEnd, MachineBasicBlock *SelectBB, unsigned IfReg,
+ bool InheritPreds) {
+ MachineFunction *MF = MergeBB->getParent();
+ MachineBasicBlock *IfBB = MF->CreateMachineBasicBlock();
+
+ if (InheritPreds) {
+ for (MachineBasicBlock::pred_iterator PI = CodeBBStart->pred_begin(),
+ E = CodeBBStart->pred_end();
+ PI != E; ++PI) {
+ if ((*PI) != CodeBBEnd) {
+ MachineBasicBlock *Pred = (*PI);
+ Pred->addSuccessor(IfBB);
+ }
+ }
+ }
+
+ removeExternalCFGEdges(CodeBBStart, CodeBBEnd);
+
+ auto CodeBBStartI = CodeBBStart->getIterator();
+ auto CodeBBEndI = CodeBBEnd->getIterator();
+ auto MergeIter = MergeBB->getIterator();
+ MF->insert(MergeIter, IfBB);
+ MF->splice(MergeIter, CodeBBStartI, ++CodeBBEndI);
+ IfBB->addSuccessor(MergeBB);
+ IfBB->addSuccessor(CodeBBStart);
+
+ DEBUG(dbgs() << "Created If block: " << IfBB->getNumber() << "\n");
+ // Ensure that the MergeBB is a succesor of the CodeEndBB.
+ if (!CodeBBEnd->isSuccessor(MergeBB))
+ CodeBBEnd->addSuccessor(MergeBB);
+
+ DEBUG(dbgs() << "Moved MBB#" << CodeBBStart->getNumber() << " through MBB#"
+ << CodeBBEnd->getNumber() << "\n");
+
+ // If we have a single predecessor we can find a reasonable debug location
+ MachineBasicBlock *SinglePred =
+ CodeBBStart->pred_size() == 1 ? *(CodeBBStart->pred_begin()) : nullptr;
+ const DebugLoc &DL = SinglePred
+ ? SinglePred->findDebugLoc(SinglePred->getFirstTerminator())
+ : DebugLoc();
+
+ unsigned Reg =
+ TII->insertEQ(IfBB, IfBB->begin(), DL, IfReg,
+ SelectBB->getNumber() /* CodeBBStart->getNumber() */);
+ if (&(*(IfBB->getParent()->begin())) == IfBB) {
+ TII->materializeImmediate(*IfBB, IfBB->begin(), DL, IfReg,
+ CodeBBStart->getNumber());
+ }
+ MachineOperand RegOp = MachineOperand::CreateReg(Reg, false, false, true);
+ ArrayRef<MachineOperand> Cond(RegOp);
+ TII->insertBranch(*IfBB, MergeBB, CodeBBStart, Cond, DL);
+
+ return IfBB;
+}
+
+void AMDGPUMachineCFGStructurizer::ensureCondIsNotKilled(
+ SmallVector<MachineOperand, 1> Cond) {
+ if (Cond.size() != 1)
+ return;
+ if (!Cond[0].isReg())
+ return;
+
+ unsigned CondReg = Cond[0].getReg();
+ for (auto UI = MRI->use_begin(CondReg), E = MRI->use_end(); UI != E; ++UI) {
+ (*UI).setIsKill(false);
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteCodeBBTerminator(MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ unsigned BBSelectReg) {
+ MachineBasicBlock *TrueBB = nullptr;
+ MachineBasicBlock *FalseBB = nullptr;
+ SmallVector<MachineOperand, 1> Cond;
+ MachineBasicBlock *FallthroughBB = FallthroughMap[CodeBB];
+ TII->analyzeBranch(*CodeBB, TrueBB, FalseBB, Cond);
+
+ const DebugLoc &DL = CodeBB->findDebugLoc(CodeBB->getFirstTerminator());
+
+ if (FalseBB == nullptr && TrueBB == nullptr && FallthroughBB == nullptr) {
+ // This is an exit block, hence no successors. We will assign the
+ // bb select register to the entry block.
+ TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
+ BBSelectReg,
+ CodeBB->getParent()->begin()->getNumber());
+ insertUnconditionalBranch(CodeBB, MergeBB, DL);
+ return;
+ }
+
+ if (FalseBB == nullptr && TrueBB == nullptr) {
+ TrueBB = FallthroughBB;
+ } else if (TrueBB != nullptr) {
+ FalseBB =
+ (FallthroughBB && (FallthroughBB != TrueBB)) ? FallthroughBB : FalseBB;
+ }
+
+ if ((TrueBB != nullptr && FalseBB == nullptr) || (TrueBB == FalseBB)) {
+ TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
+ BBSelectReg, TrueBB->getNumber());
+ } else {
+ const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectReg);
+ unsigned TrueBBReg = MRI->createVirtualRegister(RegClass);
+ unsigned FalseBBReg = MRI->createVirtualRegister(RegClass);
+ TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
+ TrueBBReg, TrueBB->getNumber());
+ TII->materializeImmediate(*CodeBB, CodeBB->getFirstTerminator(), DL,
+ FalseBBReg, FalseBB->getNumber());
+ ensureCondIsNotKilled(Cond);
+ TII->insertVectorSelect(*CodeBB, CodeBB->getFirstTerminator(), DL,
+ BBSelectReg, Cond, TrueBBReg, FalseBBReg);
+ }
+
+ insertUnconditionalBranch(CodeBB, MergeBB, DL);
+}
+
+MachineInstr *AMDGPUMachineCFGStructurizer::getDefInstr(unsigned Reg) {
+ if (MRI->def_begin(Reg) == MRI->def_end()) {
+ DEBUG(dbgs() << "Register " << PrintReg(Reg, MRI->getTargetRegisterInfo())
+ << " has NO defs\n");
+ } else if (!MRI->hasOneDef(Reg)) {
+ DEBUG(dbgs() << "Register " << PrintReg(Reg, MRI->getTargetRegisterInfo())
+ << " has multiple defs\n");
+ DEBUG(dbgs() << "DEFS BEGIN:\n");
+ for (auto DI = MRI->def_begin(Reg), DE = MRI->def_end(); DI != DE; ++DI) {
+ DEBUG(DI->getParent()->dump());
+ }
+ DEBUG(dbgs() << "DEFS END\n");
+ }
+
+ assert(MRI->hasOneDef(Reg) && "Register has multiple definitions");
+ return (*(MRI->def_begin(Reg))).getParent();
+}
+
+void AMDGPUMachineCFGStructurizer::insertChainedPHI(MachineBasicBlock *IfBB,
+ MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ LinearizedRegion *InnerRegion,
+ unsigned DestReg,
+ unsigned SourceReg) {
+ // In this function we know we are part of a chain already, so we need
+ // to add the registers to the existing chain, and rename the register
+ // inside the region.
+ bool IsSingleBB = InnerRegion->getEntry() == InnerRegion->getExit();
+ MachineInstr *DefInstr = getDefInstr(SourceReg);
+ if (DefInstr->isPHI() && DefInstr->getParent() == CodeBB && IsSingleBB) {
+ // Handle the case where the def is a PHI-def inside a basic
+ // block, then we only need to do renaming. Special care needs to
+ // be taken if the PHI-def is part of an existing chain, or if a
+ // new one needs to be created.
+ InnerRegion->replaceRegisterInsideRegion(SourceReg, DestReg, true, MRI);
+
+ // We collect all PHI Information, and if we are at the region entry,
+ // all PHIs will be removed, and then re-introduced if needed.
+ storePHILinearizationInfoDest(DestReg, *DefInstr);
+ // We have picked up all the information we need now and can remove
+ // the PHI
+ PHIInfo.removeSource(DestReg, SourceReg, CodeBB);
+ DefInstr->eraseFromParent();
+ } else {
+ // If this is not a phi-def, or it is a phi-def but from a linearized region
+ if (IsSingleBB && DefInstr->getParent() == InnerRegion->getEntry()) {
+ // If this is a single BB and the definition is in this block we
+ // need to replace any uses outside the region.
+ InnerRegion->replaceRegisterOutsideRegion(SourceReg, DestReg, false, MRI);
+ }
+ const TargetRegisterClass *RegClass = MRI->getRegClass(DestReg);
+ unsigned NextDestReg = MRI->createVirtualRegister(RegClass);
+ bool IsLastDef = PHIInfo.getNumSources(DestReg) == 1;
+ DEBUG(dbgs() << "Insert Chained PHI\n");
+ insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, DestReg, NextDestReg,
+ SourceReg, IsLastDef);
+
+ PHIInfo.removeSource(DestReg, SourceReg, CodeBB);
+ if (IsLastDef) {
+ const DebugLoc &DL = IfBB->findDebugLoc(IfBB->getFirstTerminator());
+ TII->materializeImmediate(*IfBB, IfBB->getFirstTerminator(), DL,
+ NextDestReg, 0);
+ PHIInfo.deleteDef(DestReg);
+ } else {
+ PHIInfo.replaceDef(DestReg, NextDestReg);
+ }
+ }
+}
+
+bool AMDGPUMachineCFGStructurizer::containsDef(MachineBasicBlock *MBB,
+ LinearizedRegion *InnerRegion,
+ unsigned Register) {
+ return getDefInstr(Register)->getParent() == MBB ||
+ InnerRegion->contains(getDefInstr(Register)->getParent());
+}
+
+void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB,
+ MachineBasicBlock *CodeBB,
+ MachineBasicBlock *MergeBB,
+ LinearizedRegion *InnerRegion,
+ LinearizedRegion *LRegion) {
+ DenseSet<unsigned> *LiveOuts = InnerRegion->getLiveOuts();
+ SmallVector<unsigned, 4> OldLiveOuts;
+ bool IsSingleBB = InnerRegion->getEntry() == InnerRegion->getExit();
+ for (auto OLI : *LiveOuts) {
+ OldLiveOuts.push_back(OLI);
+ }
+
+ for (auto LI : OldLiveOuts) {
+ DEBUG(dbgs() << "LiveOut: " << PrintReg(LI, TRI));
+ if (!containsDef(CodeBB, InnerRegion, LI) ||
+ (!IsSingleBB && (getDefInstr(LI)->getParent() == LRegion->getExit()))) {
+ // If the register simly lives through the CodeBB, we don't have
+ // to rewrite anything since the register is not defined in this
+ // part of the code.
+ DEBUG(dbgs() << "- through");
+ continue;
+ }
+ DEBUG(dbgs() << "\n");
+ unsigned Reg = LI;
+ if (/*!PHIInfo.isSource(Reg) &&*/ Reg != InnerRegion->getBBSelectRegOut()) {
+ // If the register is live out, we do want to create a phi,
+ // unless it is from the Exit block, becasuse in that case there
+ // is already a PHI, and no need to create a new one.
+
+ // If the register is just a live out def and not part of a phi
+ // chain, we need to create a PHI node to handle the if region,
+ // and replace all uses outside of the region with the new dest
+ // register, unless it is the outgoing BB select register. We have
+ // already creaed phi nodes for these.
+ const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
+ unsigned PHIDestReg = MRI->createVirtualRegister(RegClass);
+ unsigned IfSourceReg = MRI->createVirtualRegister(RegClass);
+ // Create initializer, this value is never used, but is needed
+ // to satisfy SSA.
+ DEBUG(dbgs() << "Initializer for reg: " << PrintReg(Reg) << "\n");
+ TII->materializeImmediate(*IfBB, IfBB->getFirstTerminator(), DebugLoc(),
+ IfSourceReg, 0);
+
+ InnerRegion->replaceRegisterOutsideRegion(Reg, PHIDestReg, true, MRI);
+ DEBUG(dbgs() << "Insert Non-Chained Live out PHI\n");
+ insertMergePHI(IfBB, InnerRegion->getExit(), MergeBB, PHIDestReg,
+ IfSourceReg, Reg, true);
+ }
+ }
+
+ // Handle the chained definitions in PHIInfo, checking if this basic block
+ // is a source block for a definition.
+ SmallVector<unsigned, 4> Sources;
+ if (PHIInfo.findSourcesFromMBB(CodeBB, Sources)) {
+ DEBUG(dbgs() << "Inserting PHI Live Out from BB#" << CodeBB->getNumber()
+ << "\n");
+ for (auto SI : Sources) {
+ unsigned DestReg;
+ PHIInfo.findDest(SI, CodeBB, DestReg);
+ insertChainedPHI(IfBB, CodeBB, MergeBB, InnerRegion, DestReg, SI);
+ }
+ DEBUG(dbgs() << "Insertion done.\n");
+ }
+
+ DEBUG(PHIInfo.dump(MRI));
+}
+
+void AMDGPUMachineCFGStructurizer::prunePHIInfo(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Before PHI Prune\n");
+ DEBUG(PHIInfo.dump(MRI));
+ SmallVector<std::tuple<unsigned, unsigned, MachineBasicBlock *>, 4>
+ ElimiatedSources;
+ for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
+ ++DRI) {
+
+ unsigned DestReg = *DRI;
+ auto SE = PHIInfo.sources_end(DestReg);
+
+ bool MBBContainsPHISource = false;
+ // Check if there is a PHI source in this MBB
+ for (auto SRI = PHIInfo.sources_begin(DestReg); SRI != SE; ++SRI) {
+ unsigned SourceReg = (*SRI).first;
+ MachineOperand *Def = &(*(MRI->def_begin(SourceReg)));
+ if (Def->getParent()->getParent() == MBB) {
+ MBBContainsPHISource = true;
+ }
+ }
+
+ // If so, all other sources are useless since we know this block
+ // is always executed when the region is executed.
+ if (MBBContainsPHISource) {
+ for (auto SRI = PHIInfo.sources_begin(DestReg); SRI != SE; ++SRI) {
+ PHILinearize::PHISourceT Source = *SRI;
+ unsigned SourceReg = Source.first;
+ MachineBasicBlock *SourceMBB = Source.second;
+ MachineOperand *Def = &(*(MRI->def_begin(SourceReg)));
+ if (Def->getParent()->getParent() != MBB) {
+ ElimiatedSources.push_back(
+ std::make_tuple(DestReg, SourceReg, SourceMBB));
+ }
+ }
+ }
+ }
+
+ // Remove the PHI sources that are in the given MBB
+ for (auto &SourceInfo : ElimiatedSources) {
+ PHIInfo.removeSource(std::get<0>(SourceInfo), std::get<1>(SourceInfo),
+ std::get<2>(SourceInfo));
+ }
+ DEBUG(dbgs() << "After PHI Prune\n");
+ DEBUG(PHIInfo.dump(MRI));
+}
+
+void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegion,
+ unsigned DestReg) {
+ MachineBasicBlock *Entry = CurrentRegion->getEntry();
+ MachineBasicBlock *Exit = CurrentRegion->getExit();
+
+ DEBUG(dbgs() << "RegionExit: " << Exit->getNumber()
+ << " Pred: " << (*(Entry->pred_begin()))->getNumber() << "\n");
+
+ int NumSources = 0;
+ auto SE = PHIInfo.sources_end(DestReg);
+
+ for (auto SRI = PHIInfo.sources_begin(DestReg); SRI != SE; ++SRI) {
+ NumSources++;
+ }
+
+ if (NumSources == 1) {
+ auto SRI = PHIInfo.sources_begin(DestReg);
+ unsigned SourceReg = (*SRI).first;
+ replaceRegisterWith(DestReg, SourceReg);
+ } else {
+ const DebugLoc &DL = Entry->findDebugLoc(Entry->begin());
+ MachineInstrBuilder MIB = BuildMI(*Entry, Entry->instr_begin(), DL,
+ TII->get(TargetOpcode::PHI), DestReg);
+ DEBUG(dbgs() << "Entry PHI " << PrintReg(DestReg, TRI) << "<def> = PHI(");
+
+ unsigned CurrentBackedgeReg = 0;
+
+ for (auto SRI = PHIInfo.sources_begin(DestReg); SRI != SE; ++SRI) {
+ unsigned SourceReg = (*SRI).first;
+
+ if (CurrentRegion->contains((*SRI).second)) {
+ if (CurrentBackedgeReg == 0) {
+ CurrentBackedgeReg = SourceReg;
+ } else {
+ MachineInstr *PHIDefInstr = getDefInstr(SourceReg);
+ MachineBasicBlock *PHIDefMBB = PHIDefInstr->getParent();
+ const TargetRegisterClass *RegClass =
+ MRI->getRegClass(CurrentBackedgeReg);
+ unsigned NewBackedgeReg = MRI->createVirtualRegister(RegClass);
+ MachineInstrBuilder BackedgePHI =
+ BuildMI(*PHIDefMBB, PHIDefMBB->instr_begin(), DL,
+ TII->get(TargetOpcode::PHI), NewBackedgeReg);
+ BackedgePHI.addReg(CurrentBackedgeReg);
+ BackedgePHI.addMBB(getPHIPred(*PHIDefInstr, 0));
+ BackedgePHI.addReg(getPHISourceReg(*PHIDefInstr, 1));
+ BackedgePHI.addMBB((*SRI).second);
+ CurrentBackedgeReg = NewBackedgeReg;
+ DEBUG(dbgs() << "Inserting backedge PHI: "
+ << PrintReg(NewBackedgeReg, TRI) << "<def> = PHI("
+ << PrintReg(CurrentBackedgeReg, TRI) << ", BB#"
+ << getPHIPred(*PHIDefInstr, 0)->getNumber() << ", "
+ << PrintReg(getPHISourceReg(*PHIDefInstr, 1), TRI)
+ << ", BB#" << (*SRI).second->getNumber());
+ }
+ } else {
+ MIB.addReg(SourceReg);
+ MIB.addMBB((*SRI).second);
+ DEBUG(dbgs() << PrintReg(SourceReg, TRI) << ", BB#"
+ << (*SRI).second->getNumber() << ", ");
+ }
+ }
+
+ // Add the final backedge register source to the entry phi
+ if (CurrentBackedgeReg != 0) {
+ MIB.addReg(CurrentBackedgeReg);
+ MIB.addMBB(Exit);
+ DEBUG(dbgs() << PrintReg(CurrentBackedgeReg, TRI) << ", BB#"
+ << Exit->getNumber() << ")\n");
+ } else {
+ DEBUG(dbgs() << ")\n");
+ }
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::createEntryPHIs(LinearizedRegion *CurrentRegion) {
+ DEBUG(PHIInfo.dump(MRI));
+
+ for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
+ ++DRI) {
+
+ unsigned DestReg = *DRI;
+ createEntryPHI(CurrentRegion, DestReg);
+ }
+ PHIInfo.clear();
+}
+
+void AMDGPUMachineCFGStructurizer::replaceRegisterWith(unsigned Register,
+ unsigned NewRegister) {
+ assert(Register != NewRegister && "Cannot replace a reg with itself");
+
+ for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Register),
+ E = MRI->reg_end();
+ I != E;) {
+ MachineOperand &O = *I;
+ ++I;
+ if (TargetRegisterInfo::isPhysicalRegister(NewRegister)) {
+ DEBUG(dbgs() << "Trying to substitute physical register: "
+ << PrintReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
+ llvm_unreachable("Cannot substitute physical registers");
+ // We don't handle physical registers, but if we need to
+ // in the future This is how we do it:
+ // O.substPhysReg(NewRegister, *TRI);
+ } else {
+ DEBUG(dbgs() << "Replacing register: "
+ << PrintReg(Register, MRI->getTargetRegisterInfo())
+ << " with "
+ << PrintReg(NewRegister, MRI->getTargetRegisterInfo())
+ << "\n");
+ O.setReg(NewRegister);
+ }
+ }
+ PHIInfo.deleteDef(Register);
+
+ getRegionMRT()->replaceLiveOutReg(Register, NewRegister);
+
+ DEBUG(PHIInfo.dump(MRI));
+}
+
+void AMDGPUMachineCFGStructurizer::resolvePHIInfos(MachineBasicBlock *FunctionEntry) {
+ DEBUG(dbgs() << "Resolve PHI Infos\n");
+ DEBUG(PHIInfo.dump(MRI));
+ for (auto DRI = PHIInfo.dests_begin(), DE = PHIInfo.dests_end(); DRI != DE;
+ ++DRI) {
+ unsigned DestReg = *DRI;
+ DEBUG(dbgs() << "DestReg: " << PrintReg(DestReg, TRI) << "\n");
+ auto SRI = PHIInfo.sources_begin(DestReg);
+ unsigned SourceReg = (*SRI).first;
+ DEBUG(dbgs() << "DestReg: " << PrintReg(DestReg, TRI)
+ << " SourceReg: " << PrintReg(SourceReg, TRI) << "\n");
+
+ assert(PHIInfo.sources_end(DestReg) == ++SRI &&
+ "More than one phi source in entry node");
+ replaceRegisterWith(DestReg, SourceReg);
+ }
+}
+
+static bool isFunctionEntryBlock(MachineBasicBlock *MBB) {
+ return ((&(*(MBB->getParent()->begin()))) == MBB);
+}
+
+MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
+ MachineBasicBlock *MergeBB, MachineBasicBlock *CodeBB,
+ LinearizedRegion *CurrentRegion, unsigned BBSelectRegIn,
+ unsigned BBSelectRegOut) {
+ if (isFunctionEntryBlock(CodeBB) && !CurrentRegion->getHasLoop()) {
+ // Handle non-loop function entry block.
+ // We need to allow loops to the entry block and then
+ rewriteCodeBBTerminator(CodeBB, MergeBB, BBSelectRegOut);
+ resolvePHIInfos(CodeBB);
+ removeExternalCFGSuccessors(CodeBB);
+ CodeBB->addSuccessor(MergeBB);
+ CurrentRegion->addMBB(CodeBB);
+ return nullptr;
+ }
+ if (CurrentRegion->getEntry() == CodeBB && !CurrentRegion->getHasLoop()) {
+ // Handle non-loop region entry block.
+ MachineFunction *MF = MergeBB->getParent();
+ auto MergeIter = MergeBB->getIterator();
+ auto CodeBBStartIter = CodeBB->getIterator();
+ auto CodeBBEndIter = ++(CodeBB->getIterator());
+ if (CodeBBEndIter != MergeIter) {
+ MF->splice(MergeIter, CodeBBStartIter, CodeBBEndIter);
+ }
+ rewriteCodeBBTerminator(CodeBB, MergeBB, BBSelectRegOut);
+ prunePHIInfo(CodeBB);
+ createEntryPHIs(CurrentRegion);
+ removeExternalCFGSuccessors(CodeBB);
+ CodeBB->addSuccessor(MergeBB);
+ CurrentRegion->addMBB(CodeBB);
+ return nullptr;
+ } else {
+ // Handle internal block.
+ const TargetRegisterClass *RegClass = MRI->getRegClass(BBSelectRegIn);
+ unsigned CodeBBSelectReg = MRI->createVirtualRegister(RegClass);
+ rewriteCodeBBTerminator(CodeBB, MergeBB, CodeBBSelectReg);
+ bool IsRegionEntryBB = CurrentRegion->getEntry() == CodeBB;
+ MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeBB, CodeBB, CodeBB,
+ BBSelectRegIn, IsRegionEntryBB);
+ CurrentRegion->addMBB(IfBB);
+ // If this is the entry block we need to make the If block the new
+ // linearized region entry.
+ if (IsRegionEntryBB) {
+ CurrentRegion->setEntry(IfBB);
+
+ if (CurrentRegion->getHasLoop()) {
+ MachineBasicBlock *RegionExit = CurrentRegion->getExit();
+ MachineBasicBlock *ETrueBB = nullptr;
+ MachineBasicBlock *EFalseBB = nullptr;
+ SmallVector<MachineOperand, 1> ECond;
+
+ const DebugLoc &DL = DebugLoc();
+ TII->analyzeBranch(*RegionExit, ETrueBB, EFalseBB, ECond);
+ TII->removeBranch(*RegionExit);
+
+ // We need to create a backedge if there is a loop
+ unsigned Reg = TII->insertNE(
+ RegionExit, RegionExit->instr_end(), DL,
+ CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
+ CurrentRegion->getRegionMRT()->getEntry()->getNumber());
+ MachineOperand RegOp =
+ MachineOperand::CreateReg(Reg, false, false, true);
+ ArrayRef<MachineOperand> Cond(RegOp);
+ DEBUG(dbgs() << "RegionExitReg: ");
+ DEBUG(Cond[0].print(dbgs(), TRI));
+ DEBUG(dbgs() << "\n");
+ TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
+ Cond, DebugLoc());
+ RegionExit->addSuccessor(CurrentRegion->getEntry());
+ }
+ }
+ CurrentRegion->addMBB(CodeBB);
+ LinearizedRegion InnerRegion(CodeBB, MRI, TRI, PHIInfo);
+
+ InnerRegion.setParent(CurrentRegion);
+ DEBUG(dbgs() << "Insert BB Select PHI (BB)\n");
+ insertMergePHI(IfBB, CodeBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
+ CodeBBSelectReg);
+ InnerRegion.addMBB(MergeBB);
+
+ DEBUG(InnerRegion.print(dbgs(), TRI));
+ rewriteLiveOutRegs(IfBB, CodeBB, MergeBB, &InnerRegion, CurrentRegion);
+ extractKilledPHIs(CodeBB);
+ if (IsRegionEntryBB) {
+ createEntryPHIs(CurrentRegion);
+ }
+ return IfBB;
+ }
+}
+
+MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfRegion(
+ MachineBasicBlock *MergeBB, LinearizedRegion *InnerRegion,
+ LinearizedRegion *CurrentRegion, MachineBasicBlock *SelectBB,
+ unsigned BBSelectRegIn, unsigned BBSelectRegOut) {
+ unsigned CodeBBSelectReg =
+ InnerRegion->getRegionMRT()->getInnerOutputRegister();
+ MachineBasicBlock *CodeEntryBB = InnerRegion->getEntry();
+ MachineBasicBlock *CodeExitBB = InnerRegion->getExit();
+ MachineBasicBlock *IfBB = createIfBlock(MergeBB, CodeEntryBB, CodeExitBB,
+ SelectBB, BBSelectRegIn, true);
+ CurrentRegion->addMBB(IfBB);
+ bool isEntry = CurrentRegion->getEntry() == InnerRegion->getEntry();
+ if (isEntry) {
+
+ if (CurrentRegion->getHasLoop()) {
+ MachineBasicBlock *RegionExit = CurrentRegion->getExit();
+ MachineBasicBlock *ETrueBB = nullptr;
+ MachineBasicBlock *EFalseBB = nullptr;
+ SmallVector<MachineOperand, 1> ECond;
+
+ const DebugLoc &DL = DebugLoc();
+ TII->analyzeBranch(*RegionExit, ETrueBB, EFalseBB, ECond);
+ TII->removeBranch(*RegionExit);
+
+ // We need to create a backedge if there is a loop
+ unsigned Reg =
+ TII->insertNE(RegionExit, RegionExit->instr_end(), DL,
+ CurrentRegion->getRegionMRT()->getInnerOutputRegister(),
+ CurrentRegion->getRegionMRT()->getEntry()->getNumber());
+ MachineOperand RegOp = MachineOperand::CreateReg(Reg, false, false, true);
+ ArrayRef<MachineOperand> Cond(RegOp);
+ DEBUG(dbgs() << "RegionExitReg: ");
+ DEBUG(Cond[0].print(dbgs(), TRI));
+ DEBUG(dbgs() << "\n");
+ TII->insertBranch(*RegionExit, CurrentRegion->getEntry(), RegionExit,
+ Cond, DebugLoc());
+ RegionExit->addSuccessor(IfBB);
+ }
+ }
+ CurrentRegion->addMBBs(InnerRegion);
+ DEBUG(dbgs() << "Insert BB Select PHI (region)\n");
+ insertMergePHI(IfBB, CodeExitBB, MergeBB, BBSelectRegOut, BBSelectRegIn,
+ CodeBBSelectReg);
+
+ rewriteLiveOutRegs(IfBB, /* CodeEntryBB */ CodeExitBB, MergeBB, InnerRegion,
+ CurrentRegion);
+
+ rewriteRegionEntryPHIs(InnerRegion, IfBB);
+
+ if (isEntry) {
+ CurrentRegion->setEntry(IfBB);
+ }
+
+ if (isEntry) {
+ createEntryPHIs(CurrentRegion);
+ }
+
+ return IfBB;
+}
+
+void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI,
+ MachineBasicBlock *Entry,
+ MachineBasicBlock *EntrySucc,
+ LinearizedRegion *LRegion) {
+ SmallVector<unsigned, 2> PHIRegionIndices;
+ getPHIRegionIndices(LRegion, PHI, PHIRegionIndices);
+
+ assert(PHIRegionIndices.size() == 1);
+
+ unsigned RegionIndex = PHIRegionIndices[0];
+ unsigned RegionSourceReg = getPHISourceReg(PHI, RegionIndex);
+ MachineBasicBlock *RegionSourceMBB = getPHIPred(PHI, RegionIndex);
+ unsigned PHIDest = getPHIDestReg(PHI);
+ unsigned PHISource = PHIDest;
+ unsigned ReplaceReg;
+
+ if (shrinkPHI(PHI, PHIRegionIndices, &ReplaceReg)) {
+ PHISource = ReplaceReg;
+ }
+
+ const TargetRegisterClass *RegClass = MRI->getRegClass(PHIDest);
+ unsigned NewDestReg = MRI->createVirtualRegister(RegClass);
+ LRegion->replaceRegisterInsideRegion(PHIDest, NewDestReg, false, MRI);
+ MachineInstrBuilder MIB =
+ BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(),
+ TII->get(TargetOpcode::PHI), NewDestReg);
+ DEBUG(dbgs() << "Split Entry PHI " << PrintReg(NewDestReg, TRI)
+ << "<def> = PHI(");
+ MIB.addReg(PHISource);
+ MIB.addMBB(Entry);
+ DEBUG(dbgs() << PrintReg(PHISource, TRI) << ", BB#" << Entry->getNumber());
+ MIB.addReg(RegionSourceReg);
+ MIB.addMBB(RegionSourceMBB);
+ DEBUG(dbgs() << " ," << PrintReg(RegionSourceReg, TRI) << ", BB#"
+ << RegionSourceMBB->getNumber() << ")\n");
+}
+
+void AMDGPUMachineCFGStructurizer::splitLoopPHIs(MachineBasicBlock *Entry,
+ MachineBasicBlock *EntrySucc,
+ LinearizedRegion *LRegion) {
+ SmallVector<MachineInstr *, 2> PHIs;
+ collectPHIs(Entry, PHIs);
+
+ for (auto PHII : PHIs) {
+ splitLoopPHI(*PHII, Entry, EntrySucc, LRegion);
+ }
+}
+
+// Split the exit block so that we can insert a end control flow
+MachineBasicBlock *
+AMDGPUMachineCFGStructurizer::splitExit(LinearizedRegion *LRegion) {
+ auto MRTRegion = LRegion->getRegionMRT();
+ auto Exit = LRegion->getExit();
+ auto MF = Exit->getParent();
+ auto Succ = MRTRegion->getSucc();
+
+ auto NewExit = MF->CreateMachineBasicBlock();
+ auto AfterExitIter = Exit->getIterator();
+ AfterExitIter++;
+ MF->insert(AfterExitIter, NewExit);
+ Exit->removeSuccessor(Succ);
+ Exit->addSuccessor(NewExit);
+ NewExit->addSuccessor(Succ);
+ insertUnconditionalBranch(NewExit, Succ);
+ LRegion->addMBB(NewExit);
+ LRegion->setExit(NewExit);
+
+ DEBUG(dbgs() << "Created new exit block: " << NewExit->getNumber() << "\n");
+
+ // Replace any PHI Predecessors in the successor with NewExit
+ for (auto &II : *Succ) {
+ MachineInstr &Instr = II;
+
+ // If we are past the PHI instructions we are done
+ if (!Instr.isPHI())
+ break;
+
+ int numPreds = getPHINumInputs(Instr);
+ for (int i = 0; i < numPreds; ++i) {
+ auto Pred = getPHIPred(Instr, i);
+ if (Pred == Exit) {
+ setPhiPred(Instr, i, NewExit);
+ }
+ }
+ }
+
+ return NewExit;
+}
+
+
+static MachineBasicBlock *split(MachineBasicBlock::iterator I) {
+ // Create the fall-through block.
+ MachineBasicBlock *MBB = (*I).getParent();
+ MachineFunction *MF = MBB->getParent();
+ MachineBasicBlock *SuccMBB = MF->CreateMachineBasicBlock();
+ auto MBBIter = ++(MBB->getIterator());
+ MF->insert(MBBIter, SuccMBB);
+ SuccMBB->transferSuccessorsAndUpdatePHIs(MBB);
+ MBB->addSuccessor(SuccMBB);
+
+ // Splice the code over.
+ SuccMBB->splice(SuccMBB->end(), MBB, I, MBB->end());
+
+ return SuccMBB;
+}
+
+// Split the entry block separating PHI-nodes and the rest of the code
+// This is needed to insert an initializer for the bb select register
+// inloop regions.
+
+MachineBasicBlock *
+AMDGPUMachineCFGStructurizer::splitEntry(LinearizedRegion *LRegion) {
+ MachineBasicBlock *Entry = LRegion->getEntry();
+ MachineBasicBlock *EntrySucc = split(Entry->getFirstNonPHI());
+ MachineBasicBlock *Exit = LRegion->getExit();
+
+ DEBUG(dbgs() << "Split BB#" << Entry->getNumber() << " to BB#"
+ << Entry->getNumber() << " -> BB#" << EntrySucc->getNumber()
+ << "\n");
+ LRegion->addMBB(EntrySucc);
+
+ // Make the backedge go to Entry Succ
+ if (Exit->isSuccessor(Entry)) {
+ Exit->removeSuccessor(Entry);
+ }
+ Exit->addSuccessor(EntrySucc);
+ MachineInstr &Branch = *(Exit->instr_rbegin());
+ for (auto &UI : Branch.uses()) {
+ if (UI.isMBB() && UI.getMBB() == Entry) {
+ UI.setMBB(EntrySucc);
+ }
+ }
+
+ splitLoopPHIs(Entry, EntrySucc, LRegion);
+
+ return EntrySucc;
+}
+
+LinearizedRegion *
+AMDGPUMachineCFGStructurizer::initLinearizedRegion(RegionMRT *Region) {
+ LinearizedRegion *LRegion = Region->getLinearizedRegion();
+ LRegion->initLiveOut(Region, MRI, TRI, PHIInfo);
+ LRegion->setEntry(Region->getEntry());
+ return LRegion;
+}
+
+static void removeOldExitPreds(RegionMRT *Region) {
+ MachineBasicBlock *Exit = Region->getSucc();
+ if (Exit == nullptr) {
+ return;
+ }
+ for (MachineBasicBlock::pred_iterator PI = Exit->pred_begin(),
+ E = Exit->pred_end();
+ PI != E; ++PI) {
+ if (Region->contains(*PI)) {
+ (*PI)->removeSuccessor(Exit);
+ }
+ }
+}
+
+static bool mbbHasBackEdge(MachineBasicBlock *MBB,
+ SmallPtrSet<MachineBasicBlock *, 8> &MBBs) {
+ for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
+ if (MBBs.count(*SI) != 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool containsNewBackedge(MRT *Tree,
+ SmallPtrSet<MachineBasicBlock *, 8> &MBBs) {
+ // Need to traverse this in reverse since it is in post order.
+ if (Tree == nullptr)
+ return false;
+
+ if (Tree->isMBB()) {
+ MachineBasicBlock *MBB = Tree->getMBBMRT()->getMBB();
+ MBBs.insert(MBB);
+ if (mbbHasBackEdge(MBB, MBBs)) {
+ return true;
+ }
+ } else {
+ RegionMRT *Region = Tree->getRegionMRT();
+ SetVector<MRT *> *Children = Region->getChildren();
+ for (auto CI = Children->rbegin(), CE = Children->rend(); CI != CE; ++CI) {
+ if (containsNewBackedge(*CI, MBBs))
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool containsNewBackedge(RegionMRT *Region) {
+ SmallPtrSet<MachineBasicBlock *, 8> MBBs;
+ return containsNewBackedge(Region, MBBs);
+}
+
+bool AMDGPUMachineCFGStructurizer::structurizeComplexRegion(RegionMRT *Region) {
+ auto *LRegion = initLinearizedRegion(Region);
+ LRegion->setHasLoop(containsNewBackedge(Region));
+ MachineBasicBlock *LastMerge = createLinearizedExitBlock(Region);
+ MachineBasicBlock *CurrentMerge = LastMerge;
+ LRegion->addMBB(LastMerge);
+ LRegion->setExit(LastMerge);
+
+ rewriteRegionExitPHIs(Region, LastMerge, LRegion);
+ removeOldExitPreds(Region);
+
+ DEBUG(PHIInfo.dump(MRI));
+
+ SetVector<MRT *> *Children = Region->getChildren();
+ DEBUG(dbgs() << "===========If Region Start===============\n");
+ if (LRegion->getHasLoop()) {
+ DEBUG(dbgs() << "Has Backedge: Yes\n");
+ } else {
+ DEBUG(dbgs() << "Has Backedge: No\n");
+ }
+
+ unsigned BBSelectRegIn;
+ unsigned BBSelectRegOut;
+ for (auto CI = Children->begin(), CE = Children->end(); CI != CE; ++CI) {
+ DEBUG(dbgs() << "CurrentRegion: \n");
+ DEBUG(LRegion->print(dbgs(), TRI));
+
+ auto CNI = CI;
+ ++CNI;
+
+ MRT *Child = (*CI);
+
+ if (Child->isRegion()) {
+
+ LinearizedRegion *InnerLRegion =
+ Child->getRegionMRT()->getLinearizedRegion();
+ // We found the block is the exit of an inner region, we need
+ // to put it in the current linearized region.
+
+ DEBUG(dbgs() << "Linearizing region: ");
+ DEBUG(InnerLRegion->print(dbgs(), TRI));
+ DEBUG(dbgs() << "\n");
+
+ MachineBasicBlock *InnerEntry = InnerLRegion->getEntry();
+ if ((&(*(InnerEntry->getParent()->begin()))) == InnerEntry) {
+ // Entry has already been linearized, no need to do this region.
+ unsigned OuterSelect = InnerLRegion->getBBSelectRegOut();
+ unsigned InnerSelectReg =
+ InnerLRegion->getRegionMRT()->getInnerOutputRegister();
+ replaceRegisterWith(InnerSelectReg, OuterSelect),
+ resolvePHIInfos(InnerEntry);
+ if (!InnerLRegion->getExit()->isSuccessor(CurrentMerge))
+ InnerLRegion->getExit()->addSuccessor(CurrentMerge);
+ continue;
+ }
+
+ BBSelectRegOut = Child->getBBSelectRegOut();
+ BBSelectRegIn = Child->getBBSelectRegIn();
+
+ DEBUG(dbgs() << "BBSelectRegIn: " << PrintReg(BBSelectRegIn, TRI)
+ << "\n");
+ DEBUG(dbgs() << "BBSelectRegOut: " << PrintReg(BBSelectRegOut, TRI)
+ << "\n");
+
+ MachineBasicBlock *IfEnd = CurrentMerge;
+ CurrentMerge = createIfRegion(CurrentMerge, InnerLRegion, LRegion,
+ Child->getRegionMRT()->getEntry(),
+ BBSelectRegIn, BBSelectRegOut);
+ TII->convertNonUniformIfRegion(CurrentMerge, IfEnd);
+ } else {
+ MachineBasicBlock *MBB = Child->getMBBMRT()->getMBB();
+ DEBUG(dbgs() << "Linearizing block: " << MBB->getNumber() << "\n");
+
+ if (MBB == getSingleExitNode(*(MBB->getParent()))) {
+ // If this is the exit block then we need to skip to the next.
+ // The "in" register will be transferred to "out" in the next
+ // iteration.
+ continue;
+ }
+
+ BBSelectRegOut = Child->getBBSelectRegOut();
+ BBSelectRegIn = Child->getBBSelectRegIn();
+
+ DEBUG(dbgs() << "BBSelectRegIn: " << PrintReg(BBSelectRegIn, TRI)
+ << "\n");
+ DEBUG(dbgs() << "BBSelectRegOut: " << PrintReg(BBSelectRegOut, TRI)
+ << "\n");
+
+ MachineBasicBlock *IfEnd = CurrentMerge;
+ // This is a basic block that is not part of an inner region, we
+ // need to put it in the current linearized region.
+ CurrentMerge = createIfRegion(CurrentMerge, MBB, LRegion, BBSelectRegIn,
+ BBSelectRegOut);
+ if (CurrentMerge) {
+ TII->convertNonUniformIfRegion(CurrentMerge, IfEnd);
+ }
+
+ DEBUG(PHIInfo.dump(MRI));
+ }
+ }
+
+ LRegion->removeFalseRegisterKills(MRI);
+
+ if (LRegion->getHasLoop()) {
+ MachineBasicBlock *NewSucc = splitEntry(LRegion);
+ if (isFunctionEntryBlock(LRegion->getEntry())) {
+ resolvePHIInfos(LRegion->getEntry());
+ }
+ const DebugLoc &DL = NewSucc->findDebugLoc(NewSucc->getFirstNonPHI());
+ unsigned InReg = LRegion->getBBSelectRegIn();
+ unsigned InnerSelectReg =
+ MRI->createVirtualRegister(MRI->getRegClass(InReg));
+ unsigned NewInReg = MRI->createVirtualRegister(MRI->getRegClass(InReg));
+ TII->materializeImmediate(*(LRegion->getEntry()),
+ LRegion->getEntry()->getFirstTerminator(), DL,
+ NewInReg, Region->getEntry()->getNumber());
+ // Need to be careful about updating the registers inside the region.
+ LRegion->replaceRegisterInsideRegion(InReg, InnerSelectReg, false, MRI);
+ DEBUG(dbgs() << "Loop BBSelect Merge PHI:\n");
+ insertMergePHI(LRegion->getEntry(), LRegion->getExit(), NewSucc,
+ InnerSelectReg, NewInReg,
+ LRegion->getRegionMRT()->getInnerOutputRegister());
+ splitExit(LRegion);
+ TII->convertNonUniformLoopRegion(NewSucc, LastMerge);
+ }
+
+ if (Region->isRoot()) {
+ TII->insertReturn(*LastMerge);
+ }
+
+ DEBUG(Region->getEntry()->getParent()->dump());
+ DEBUG(LRegion->print(dbgs(), TRI));
+ DEBUG(PHIInfo.dump(MRI));
+
+ DEBUG(dbgs() << "===========If Region End===============\n");
+
+ Region->setLinearizedRegion(LRegion);
+ return true;
+}
+
+bool AMDGPUMachineCFGStructurizer::structurizeRegion(RegionMRT *Region) {
+ if (false && regionIsSimpleIf(Region)) {
+ transformSimpleIfRegion(Region);
+ return true;
+ } else if (regionIsSequence(Region)) {
+ fixupRegionExits(Region);
+ return false;
+ } else {
+ structurizeComplexRegion(Region);
+ }
+ return false;
+}
+
+static int structurize_once = 0;
+
+bool AMDGPUMachineCFGStructurizer::structurizeRegions(RegionMRT *Region,
+ bool isTopRegion) {
+ bool Changed = false;
+
+ auto Children = Region->getChildren();
+ for (auto CI : *Children) {
+ if (CI->isRegion()) {
+ Changed |= structurizeRegions(CI->getRegionMRT(), false);
+ }
+ }
+
+ if (structurize_once < 2 || true) {
+ Changed |= structurizeRegion(Region);
+ structurize_once++;
+ }
+ return Changed;
+}
+
+void AMDGPUMachineCFGStructurizer::initFallthroughMap(MachineFunction &MF) {
+ DEBUG(dbgs() << "Fallthrough Map:\n");
+ for (auto &MBBI : MF) {
+ MachineBasicBlock *MBB = MBBI.getFallThrough();
+ if (MBB != nullptr) {
+ DEBUG(dbgs() << "Fallthrough: " << MBBI.getNumber() << " -> "
+ << MBB->getNumber() << "\n");
+ }
+ FallthroughMap[&MBBI] = MBB;
+ }
+}
+
+void AMDGPUMachineCFGStructurizer::createLinearizedRegion(RegionMRT *Region,
+ unsigned SelectOut) {
+ LinearizedRegion *LRegion = new LinearizedRegion();
+ if (SelectOut) {
+ LRegion->addLiveOut(SelectOut);
+ DEBUG(dbgs() << "Add LiveOut (BBSelect): " << PrintReg(SelectOut, TRI)
+ << "\n");
+ }
+ LRegion->setRegionMRT(Region);
+ Region->setLinearizedRegion(LRegion);
+ LRegion->setParent(Region->getParent()
+ ? Region->getParent()->getLinearizedRegion()
+ : nullptr);
+}
+
+unsigned
+AMDGPUMachineCFGStructurizer::initializeSelectRegisters(MRT *MRT, unsigned SelectOut,
+ MachineRegisterInfo *MRI,
+ const SIInstrInfo *TII) {
+ if (MRT->isRegion()) {
+ RegionMRT *Region = MRT->getRegionMRT();
+ Region->setBBSelectRegOut(SelectOut);
+ unsigned InnerSelectOut = createBBSelectReg(TII, MRI);
+
+ // Fixme: Move linearization creation to the original spot
+ createLinearizedRegion(Region, SelectOut);
+
+ for (auto CI = Region->getChildren()->begin(),
+ CE = Region->getChildren()->end();
+ CI != CE; ++CI) {
+ InnerSelectOut =
+ initializeSelectRegisters((*CI), InnerSelectOut, MRI, TII);
+ }
+ MRT->setBBSelectRegIn(InnerSelectOut);
+ return InnerSelectOut;
+ } else {
+ MRT->setBBSelectRegOut(SelectOut);
+ unsigned NewSelectIn = createBBSelectReg(TII, MRI);
+ MRT->setBBSelectRegIn(NewSelectIn);
+ return NewSelectIn;
+ }
+}
+
+static void checkRegOnlyPHIInputs(MachineFunction &MF) {
+ for (auto &MBBI : MF) {
+ for (MachineBasicBlock::instr_iterator I = MBBI.instr_begin(),
+ E = MBBI.instr_end();
+ I != E; ++I) {
+ MachineInstr &Instr = *I;
+ if (Instr.isPHI()) {
+ int numPreds = getPHINumInputs(Instr);
+ for (int i = 0; i < numPreds; ++i) {
+ assert(Instr.getOperand(i * 2 + 1).isReg() &&
+ "PHI Operand not a register");
+ }
+ }
+ }
+ }
+}
+
+
+INITIALIZE_PASS_BEGIN(AMDGPUMachineCFGStructurizer, "amdgpu-machine-cfg-structurizer",
+ "AMDGPU Machine CFG Structurizer", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineRegionInfoPass)
+INITIALIZE_PASS_END(AMDGPUMachineCFGStructurizer, "amdgpu-machine-cfg-structurizer",
+ "AMDGPU Machine CFG Structurizer", false, false)
+
+char AMDGPUMachineCFGStructurizerID = AMDGPUMachineCFGStructurizer::ID;
+
+
+bool AMDGPUMachineCFGStructurizer::runOnMachineFunction(MachineFunction &MF) {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ TRI = ST.getRegisterInfo();
+ MRI = &(MF.getRegInfo());
+ initFallthroughMap(MF);
+
+ checkRegOnlyPHIInputs(MF);
+ DEBUG(dbgs() << "----STRUCTURIZER START----\n");
+ DEBUG(MF.dump());
+
+ Regions = &(getAnalysis<MachineRegionInfoPass>().getRegionInfo());
+ DEBUG(Regions->dump());
+
+ RegionMRT *RTree = MRT::buildMRT(MF, Regions, TII, MRI);
+ setRegionMRT(RTree);
+ initializeSelectRegisters(RTree, 0, MRI, TII);
+ DEBUG(RTree->dump(TRI));
+ bool result = structurizeRegions(RTree, true);
+ delete RTree;
+ DEBUG(dbgs() << "----STRUCTURIZER END----\n");
+ initFallthroughMap(MF);
+ return result;
+}
+
+FunctionPass *llvm::createAMDGPUMachineCFGStructurizerPass() {
+ return new AMDGPUMachineCFGStructurizer();
+}
diff --git a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 36dcc699d4ea..e40f39557747 100644
--- a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -397,14 +397,17 @@ static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
- case Instruction::Load:
+ case Instruction::Load: {
+ LoadInst *LI = cast<LoadInst>(Inst);
+ return !LI->isVolatile();
+ }
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value.
StoreInst *SI = cast<StoreInst>(Inst);
- return SI->getPointerOperand() == User;
+ return (SI->getPointerOperand() == User) && !SI->isVolatile();
}
default:
return false;
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 972c28579f7a..6e301b4ad527 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -125,6 +125,9 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
HasSDWA(false),
HasDPP(false),
FlatAddressSpace(false),
+ FlatInstOffsets(false),
+ FlatGlobalInsts(false),
+ FlatScratchInsts(false),
R600ALUInst(false),
CaymanISA(false),
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index a5cda817ac11..bed7d326b3dd 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -145,6 +145,9 @@ protected:
bool HasSDWA;
bool HasDPP;
bool FlatAddressSpace;
+ bool FlatInstOffsets;
+ bool FlatGlobalInsts;
+ bool FlatScratchInsts;
bool R600ALUInst;
bool CaymanISA;
bool CFALUBug;
@@ -380,6 +383,18 @@ public:
return FlatAddressSpace;
}
+ bool hasFlatInstOffsets() const {
+ return FlatInstOffsets;
+ }
+
+ bool hasFlatGlobalInsts() const {
+ return FlatGlobalInsts;
+ }
+
+ bool hasFlatScratchInsts() const {
+ return FlatScratchInsts;
+ }
+
bool isMesaKernel(const MachineFunction &MF) const {
return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction()->getCallingConv());
}
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index cd5bad04d0b3..386a88b0520f 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -118,6 +118,13 @@ static cl::opt<bool> EnableSIInsertWaitcntsPass(
cl::desc("Use new waitcnt insertion pass"),
cl::init(false));
+// Option to run late CFG structurizer
+static cl::opt<bool> LateCFGStructurize(
+ "amdgpu-late-structurize",
+ cl::desc("Enable late CFG structurization"),
+ cl::init(false),
+ cl::Hidden);
+
extern "C" void LLVMInitializeAMDGPUTarget() {
// Register the target
RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
@@ -702,11 +709,15 @@ bool GCNPassConfig::addPreISel() {
// Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
// regions formed by them.
addPass(&AMDGPUUnifyDivergentExitNodesID);
- addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
+ if (!LateCFGStructurize) {
+ addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
+ }
addPass(createSinkingPass());
addPass(createSITypeRewriter());
addPass(createAMDGPUAnnotateUniformValues());
- addPass(createSIAnnotateControlFlowPass());
+ if (!LateCFGStructurize) {
+ addPass(createSIAnnotateControlFlowPass());
+ }
return false;
}
@@ -770,6 +781,9 @@ bool GCNPassConfig::addGlobalInstructionSelect() {
#endif
void GCNPassConfig::addPreRegAlloc() {
+ if (LateCFGStructurize) {
+ addPass(createAMDGPUMachineCFGStructurizerPass());
+ }
addPass(createSIWholeQuadModePass());
}
diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index c9482c37ec80..beafebc1284a 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -363,13 +363,22 @@ int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
unsigned Index) {
switch (Opcode) {
case Instruction::ExtractElement:
- case Instruction::InsertElement:
+ case Instruction::InsertElement: {
+ unsigned EltSize
+ = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
+ if (EltSize < 32) {
+ if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
+ return 0;
+ return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
+ }
+
// Extracts are just reads of a subregister, so are free. Inserts are
// considered free because we don't want to have any cost for scalarizing
// operations, and we don't have to copy into a different register class.
// Dynamic indexing isn't free and is best avoided.
return Index == ~0u ? 2 : 0;
+ }
default:
return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
}
@@ -479,3 +488,26 @@ bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
return false;
}
+
+unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) {
+ if (ST->hasVOP3PInsts()) {
+ VectorType *VT = cast<VectorType>(Tp);
+ if (VT->getNumElements() == 2 &&
+ DL.getTypeSizeInBits(VT->getElementType()) == 16) {
+ // With op_sel VOP3P instructions freely can access the low half or high
+ // half of a register, so any swizzle is free.
+
+ switch (Kind) {
+ case TTI::SK_Broadcast:
+ case TTI::SK_Reverse:
+ case TTI::SK_PermuteSingleSrc:
+ return 0;
+ default:
+ break;
+ }
+ }
+ }
+
+ return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
+}
diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 71d6306bc1a5..e0024e21e82b 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -114,6 +114,9 @@ public:
}
unsigned getVectorSplitCost() { return 0; }
+
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp);
};
} // end namespace llvm
diff --git a/lib/Target/AMDGPU/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
index 7c0ef4aeac3c..cafce0164fa9 100644
--- a/lib/Target/AMDGPU/CMakeLists.txt
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -48,6 +48,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUISelDAGToDAG.cpp
AMDGPULowerIntrinsics.cpp
AMDGPUMCInstLower.cpp
+ AMDGPUMachineCFGStructurizer.cpp
AMDGPUMachineFunction.cpp
AMDGPUUnifyMetadata.cpp
AMDGPUOpenCLImageTypeLoweringPass.cpp
diff --git a/lib/Target/AMDGPU/FLATInstructions.td b/lib/Target/AMDGPU/FLATInstructions.td
index b0ac0e689a0b..8ba9efd42c70 100644
--- a/lib/Target/AMDGPU/FLATInstructions.td
+++ b/lib/Target/AMDGPU/FLATInstructions.td
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-def FLATAtomic : ComplexPattern<i64, 3, "SelectFlat">;
+def FLATAtomic : ComplexPattern<i64, 2, "SelectFlat">;
//===----------------------------------------------------------------------===//
// FLAT classes
@@ -62,7 +62,9 @@ class FLAT_Real <bits<7> op, FLAT_Pseudo ps> :
bits<8> vdst;
bits<1> slc;
bits<1> glc;
- bits<1> tfe;
+
+ // We don't use tfe right now, and it was removed in gfx9.
+ bits<1> tfe = 0;
// 15-0 is reserved.
let Inst{16} = !if(ps.has_glc, glc, ps.glcValue);
@@ -79,8 +81,8 @@ class FLAT_Real <bits<7> op, FLAT_Pseudo ps> :
class FLAT_Load_Pseudo <string opName, RegisterClass regClass> : FLAT_Pseudo<
opName,
(outs regClass:$vdst),
- (ins VReg_64:$vaddr, GLC:$glc, slc:$slc, tfe:$tfe),
- " $vdst, $vaddr$glc$slc$tfe"> {
+ (ins VReg_64:$vaddr, GLC:$glc, slc:$slc),
+ " $vdst, $vaddr$glc$slc"> {
let has_data = 0;
let mayLoad = 1;
}
@@ -88,8 +90,8 @@ class FLAT_Load_Pseudo <string opName, RegisterClass regClass> : FLAT_Pseudo<
class FLAT_Store_Pseudo <string opName, RegisterClass vdataClass> : FLAT_Pseudo<
opName,
(outs),
- (ins VReg_64:$vaddr, vdataClass:$vdata, GLC:$glc, slc:$slc, tfe:$tfe),
- " $vaddr, $vdata$glc$slc$tfe"> {
+ (ins VReg_64:$vaddr, vdataClass:$vdata, GLC:$glc, slc:$slc),
+ " $vaddr, $vdata$glc$slc"> {
let mayLoad = 0;
let mayStore = 1;
let has_vdst = 0;
@@ -105,8 +107,8 @@ multiclass FLAT_Atomic_Pseudo<
def "" : FLAT_Pseudo <opName,
(outs),
- (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc, tfe:$tfe),
- " $vaddr, $vdata$slc$tfe",
+ (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc),
+ " $vaddr, $vdata$slc",
[]>,
AtomicNoRet <NAME, 0> {
let mayLoad = 1;
@@ -119,10 +121,10 @@ multiclass FLAT_Atomic_Pseudo<
def _RTN : FLAT_Pseudo <opName,
(outs vdst_rc:$vdst),
- (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc, tfe:$tfe),
- " $vdst, $vaddr, $vdata glc$slc$tfe",
+ (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc),
+ " $vdst, $vaddr, $vdata glc$slc",
[(set vt:$vdst,
- (atomic (FLATAtomic i64:$vaddr, i1:$slc, i1:$tfe), data_vt:$vdata))]>,
+ (atomic (FLATAtomic i64:$vaddr, i1:$slc), data_vt:$vdata))]>,
AtomicNoRet <NAME, 1> {
let mayLoad = 1;
let mayStore = 1;
@@ -311,30 +313,30 @@ def flat_truncstorei16 : flat_st <truncstorei16>;
// Patterns for global loads with no offset.
class FlatLoadPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
(vt (node i64:$addr)),
- (inst $addr, 0, 0, 0)
+ (inst $addr, 0, 0)
>;
class FlatLoadAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
(vt (node i64:$addr)),
- (inst $addr, 1, 0, 0)
+ (inst $addr, 1, 0)
>;
class FlatStorePat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
(node vt:$data, i64:$addr),
- (inst $addr, $data, 0, 0, 0)
+ (inst $addr, $data, 0, 0)
>;
class FlatStoreAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
// atomic store follows atomic binop convention so the address comes
// first.
(node i64:$addr, vt:$data),
- (inst $addr, $data, 1, 0, 0)
+ (inst $addr, $data, 1, 0)
>;
class FlatAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt,
ValueType data_vt = vt> : Pat <
(vt (node i64:$addr, data_vt:$data)),
- (inst $addr, $data, 0, 0)
+ (inst $addr, $data, 0)
>;
let Predicates = [isCIVI] in {
diff --git a/lib/Target/AMDGPU/GCNRegPressure.cpp b/lib/Target/AMDGPU/GCNRegPressure.cpp
index bf16a8216001..8066428fe44a 100644
--- a/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -27,7 +27,7 @@ void llvm::printLivesAt(SlotIndex SI,
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
- if (MRI.reg_nodbg_empty(Reg))
+ if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
if (LI.hasSubRanges()) {
@@ -131,13 +131,13 @@ bool GCNRegPressure::less(const SISubtarget &ST,
const GCNRegPressure& O,
unsigned MaxOccupancy) const {
const auto SGPROcc = std::min(MaxOccupancy,
- ST.getOccupancyWithNumSGPRs(getSGRPNum()));
+ ST.getOccupancyWithNumSGPRs(getSGPRNum()));
const auto VGPROcc = std::min(MaxOccupancy,
- ST.getOccupancyWithNumVGPRs(getVGRPNum()));
+ ST.getOccupancyWithNumVGPRs(getVGPRNum()));
const auto OtherSGPROcc = std::min(MaxOccupancy,
- ST.getOccupancyWithNumSGPRs(O.getSGRPNum()));
+ ST.getOccupancyWithNumSGPRs(O.getSGPRNum()));
const auto OtherVGPROcc = std::min(MaxOccupancy,
- ST.getOccupancyWithNumVGPRs(O.getVGRPNum()));
+ ST.getOccupancyWithNumVGPRs(O.getVGPRNum()));
const auto Occ = std::min(SGPROcc, VGPROcc);
const auto OtherOcc = std::min(OtherSGPROcc, OtherVGPROcc);
@@ -167,17 +167,17 @@ bool GCNRegPressure::less(const SISubtarget &ST,
return VW < OtherVW;
}
}
- return SGPRImportant ? (getSGRPNum() < O.getSGRPNum()):
- (getVGRPNum() < O.getVGRPNum());
+ return SGPRImportant ? (getSGPRNum() < O.getSGPRNum()):
+ (getVGPRNum() < O.getVGPRNum());
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD
void GCNRegPressure::print(raw_ostream &OS, const SISubtarget *ST) const {
- OS << "VGPRs: " << getVGRPNum();
- if (ST) OS << "(O" << ST->getOccupancyWithNumVGPRs(getVGRPNum()) << ')';
- OS << ", SGPRs: " << getSGRPNum();
- if (ST) OS << "(O" << ST->getOccupancyWithNumSGPRs(getSGRPNum()) << ')';
+ OS << "VGPRs: " << getVGPRNum();
+ if (ST) OS << "(O" << ST->getOccupancyWithNumVGPRs(getVGPRNum()) << ')';
+ OS << ", SGPRs: " << getSGPRNum();
+ if (ST) OS << "(O" << ST->getOccupancyWithNumSGPRs(getSGPRNum()) << ')';
OS << ", LVGPR WT: " << getVGPRTuplesWeight()
<< ", LSGPR WT: " << getSGPRTuplesWeight();
if (ST) OS << " -> Occ: " << getOccupancy(*ST);
@@ -192,7 +192,6 @@ LaneBitmask llvm::getLiveLaneMask(unsigned Reg,
SlotIndex SI,
const LiveIntervals &LIS,
const MachineRegisterInfo &MRI) {
- assert(!MRI.reg_nodbg_empty(Reg));
LaneBitmask LiveMask;
const auto &LI = LIS.getInterval(Reg);
if (LI.hasSubRanges()) {
@@ -214,7 +213,7 @@ GCNRPTracker::LiveRegSet llvm::getLiveRegs(SlotIndex SI,
GCNRPTracker::LiveRegSet LiveRegs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
auto Reg = TargetRegisterInfo::index2VirtReg(I);
- if (MRI.reg_nodbg_empty(Reg))
+ if (!LIS.hasInterval(Reg))
continue;
auto LiveMask = getLiveLaneMask(Reg, SI, LIS, MRI);
if (LiveMask.any())
@@ -223,13 +222,7 @@ GCNRPTracker::LiveRegSet llvm::getLiveRegs(SlotIndex SI,
return LiveRegs;
}
-void GCNUpwardRPTracker::reset(const MachineInstr &MI) {
- MRI = &MI.getParent()->getParent()->getRegInfo();
- LiveRegs = getLiveRegsAfter(MI, LIS);
- MaxPressure = CurPressure = getRegPressure(*MRI, LiveRegs);
-}
-
-LaneBitmask GCNUpwardRPTracker::getDefRegMask(const MachineOperand &MO) const {
+LaneBitmask GCNRPTracker::getDefRegMask(const MachineOperand &MO) const {
assert(MO.isDef() && MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()));
@@ -241,7 +234,7 @@ LaneBitmask GCNUpwardRPTracker::getDefRegMask(const MachineOperand &MO) const {
MRI->getTargetRegisterInfo()->getSubRegIndexLaneMask(MO.getSubReg());
}
-LaneBitmask GCNUpwardRPTracker::getUsedRegMask(const MachineOperand &MO) const {
+LaneBitmask GCNRPTracker::getUsedRegMask(const MachineOperand &MO) const {
assert(MO.isUse() && MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()));
@@ -259,6 +252,18 @@ LaneBitmask GCNUpwardRPTracker::getUsedRegMask(const MachineOperand &MO) const {
return getLiveLaneMask(MO.getReg(), SI, LIS, *MRI);
}
+void GCNUpwardRPTracker::reset(const MachineInstr &MI,
+ const LiveRegSet *LiveRegsCopy) {
+ MRI = &MI.getParent()->getParent()->getRegInfo();
+ if (LiveRegsCopy) {
+ if (&LiveRegs != LiveRegsCopy)
+ LiveRegs = *LiveRegsCopy;
+ } else {
+ LiveRegs = getLiveRegsAfter(MI, LIS);
+ }
+ MaxPressure = CurPressure = getRegPressure(*MRI, LiveRegs);
+}
+
void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
assert(MRI && "call reset first");
@@ -297,6 +302,100 @@ void GCNUpwardRPTracker::recede(const MachineInstr &MI) {
MaxPressure = max(MaxPressure, CurPressure);
}
+bool GCNDownwardRPTracker::reset(const MachineInstr &MI,
+ const LiveRegSet *LiveRegsCopy) {
+ MRI = &MI.getParent()->getParent()->getRegInfo();
+ LastTrackedMI = nullptr;
+ MBBEnd = MI.getParent()->end();
+ NextMI = &MI;
+ NextMI = skipDebugInstructionsForward(NextMI, MBBEnd);
+ if (NextMI == MBBEnd)
+ return false;
+ if (LiveRegsCopy) {
+ if (&LiveRegs != LiveRegsCopy)
+ LiveRegs = *LiveRegsCopy;
+ } else {
+ LiveRegs = getLiveRegsBefore(*NextMI, LIS);
+ }
+ MaxPressure = CurPressure = getRegPressure(*MRI, LiveRegs);
+ return true;
+}
+
+bool GCNDownwardRPTracker::advanceBeforeNext() {
+ assert(MRI && "call reset first");
+
+ NextMI = skipDebugInstructionsForward(NextMI, MBBEnd);
+ if (NextMI == MBBEnd)
+ return false;
+
+ SlotIndex SI = LIS.getInstructionIndex(*NextMI).getBaseIndex();
+ assert(SI.isValid());
+
+ // Remove dead registers or mask bits.
+ for (auto &It : LiveRegs) {
+ const LiveInterval &LI = LIS.getInterval(It.first);
+ if (LI.hasSubRanges()) {
+ for (const auto &S : LI.subranges()) {
+ if (!S.liveAt(SI)) {
+ auto PrevMask = It.second;
+ It.second &= ~S.LaneMask;
+ CurPressure.inc(It.first, PrevMask, It.second, *MRI);
+ }
+ }
+ } else if (!LI.liveAt(SI)) {
+ auto PrevMask = It.second;
+ It.second = LaneBitmask::getNone();
+ CurPressure.inc(It.first, PrevMask, It.second, *MRI);
+ }
+ if (It.second.none())
+ LiveRegs.erase(It.first);
+ }
+
+ MaxPressure = max(MaxPressure, CurPressure);
+
+ return true;
+}
+
+void GCNDownwardRPTracker::advanceToNext() {
+ LastTrackedMI = &*NextMI++;
+
+ // Add new registers or mask bits.
+ for (const auto &MO : LastTrackedMI->defs()) {
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ auto &LiveMask = LiveRegs[Reg];
+ auto PrevMask = LiveMask;
+ LiveMask |= getDefRegMask(MO);
+ CurPressure.inc(Reg, PrevMask, LiveMask, *MRI);
+ }
+
+ MaxPressure = max(MaxPressure, CurPressure);
+}
+
+bool GCNDownwardRPTracker::advance() {
+ // If we have just called reset live set is actual.
+ if ((NextMI == MBBEnd) || (LastTrackedMI && !advanceBeforeNext()))
+ return false;
+ advanceToNext();
+ return true;
+}
+
+bool GCNDownwardRPTracker::advance(MachineBasicBlock::const_iterator End) {
+ while (NextMI != End)
+ if (!advance()) return false;
+ return true;
+}
+
+bool GCNDownwardRPTracker::advance(MachineBasicBlock::const_iterator Begin,
+ MachineBasicBlock::const_iterator End,
+ const LiveRegSet *LiveRegsCopy) {
+ reset(*Begin, LiveRegsCopy);
+ return advance(End);
+}
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD
static void reportMismatch(const GCNRPTracker::LiveRegSet &LISLR,
@@ -352,4 +451,16 @@ bool GCNUpwardRPTracker::isValid() const {
return true;
}
+void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
+ const MachineRegisterInfo &MRI) {
+ const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
+ auto It = LiveRegs.find(Reg);
+ if (It != LiveRegs.end() && It->second.any())
+ OS << ' ' << PrintVRegOrUnit(Reg, TRI) << ':'
+ << PrintLaneMask(It->second);
+ }
+ OS << '\n';
+}
#endif
diff --git a/lib/Target/AMDGPU/GCNRegPressure.h b/lib/Target/AMDGPU/GCNRegPressure.h
index 82e76a7bfddc..9875ca6a6d16 100644
--- a/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/lib/Target/AMDGPU/GCNRegPressure.h
@@ -33,19 +33,19 @@ struct GCNRegPressure {
clear();
}
- bool empty() const { return getSGRPNum() == 0 && getVGRPNum() == 0; }
+ bool empty() const { return getSGPRNum() == 0 && getVGPRNum() == 0; }
void clear() { std::fill(&Value[0], &Value[TOTAL_KINDS], 0); }
- unsigned getSGRPNum() const { return Value[SGPR32]; }
- unsigned getVGRPNum() const { return Value[VGPR32]; }
+ unsigned getSGPRNum() const { return Value[SGPR32]; }
+ unsigned getVGPRNum() const { return Value[VGPR32]; }
unsigned getVGPRTuplesWeight() const { return Value[VGPR_TUPLE]; }
unsigned getSGPRTuplesWeight() const { return Value[SGPR_TUPLE]; }
unsigned getOccupancy(const SISubtarget &ST) const {
- return std::min(ST.getOccupancyWithNumSGPRs(getSGRPNum()),
- ST.getOccupancyWithNumVGPRs(getVGRPNum()));
+ return std::min(ST.getOccupancyWithNumSGPRs(getSGPRNum()),
+ ST.getOccupancyWithNumVGPRs(getVGPRNum()));
}
void inc(unsigned Reg,
@@ -92,16 +92,21 @@ public:
typedef DenseMap<unsigned, LaneBitmask> LiveRegSet;
protected:
+ const LiveIntervals &LIS;
LiveRegSet LiveRegs;
GCNRegPressure CurPressure, MaxPressure;
const MachineInstr *LastTrackedMI = nullptr;
mutable const MachineRegisterInfo *MRI = nullptr;
- GCNRPTracker() {}
+ GCNRPTracker(const LiveIntervals &LIS_) : LIS(LIS_) {}
+ LaneBitmask getDefRegMask(const MachineOperand &MO) const;
+ LaneBitmask getUsedRegMask(const MachineOperand &MO) const;
public:
// live regs for the current state
const decltype(LiveRegs) &getLiveRegs() const { return LiveRegs; }
const MachineInstr *getLastTrackedMI() const { return LastTrackedMI; }
+ void clearMaxPressure() { MaxPressure.clear(); }
+
// returns MaxPressure, resetting it
decltype(MaxPressure) moveMaxPressure() {
auto Res = MaxPressure;
@@ -111,17 +116,16 @@ public:
decltype(LiveRegs) moveLiveRegs() {
return std::move(LiveRegs);
}
+ static void printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
+ const MachineRegisterInfo &MRI);
};
class GCNUpwardRPTracker : public GCNRPTracker {
- const LiveIntervals &LIS;
- LaneBitmask getDefRegMask(const MachineOperand &MO) const;
- LaneBitmask getUsedRegMask(const MachineOperand &MO) const;
public:
- GCNUpwardRPTracker(const LiveIntervals &LIS_) : LIS(LIS_) {}
+ GCNUpwardRPTracker(const LiveIntervals &LIS_) : GCNRPTracker(LIS_) {}
// reset tracker to the point just below MI
// filling live regs upon this point using LIS
- void reset(const MachineInstr &MI);
+ void reset(const MachineInstr &MI, const LiveRegSet *LiveRegs = nullptr);
// move to the state just above the MI
void recede(const MachineInstr &MI);
@@ -131,6 +135,41 @@ public:
bool isValid() const;
};
+class GCNDownwardRPTracker : public GCNRPTracker {
+ // Last position of reset or advanceBeforeNext
+ MachineBasicBlock::const_iterator NextMI;
+
+ MachineBasicBlock::const_iterator MBBEnd;
+
+public:
+ GCNDownwardRPTracker(const LiveIntervals &LIS_) : GCNRPTracker(LIS_) {}
+
+ const MachineBasicBlock::const_iterator getNext() const { return NextMI; }
+
+ // Reset tracker to the point before the MI
+ // filling live regs upon this point using LIS.
+ // Returns false if block is empty except debug values.
+ bool reset(const MachineInstr &MI, const LiveRegSet *LiveRegs = nullptr);
+
+ // Move to the state right before the next MI. Returns false if reached
+ // end of the block.
+ bool advanceBeforeNext();
+
+ // Move to the state at the MI, advanceBeforeNext has to be called first.
+ void advanceToNext();
+
+ // Move to the state at the next MI. Returns false if reached end of block.
+ bool advance();
+
+ // Advance instructions until before End.
+ bool advance(MachineBasicBlock::const_iterator End);
+
+ // Reset to Begin and advance to End.
+ bool advance(MachineBasicBlock::const_iterator Begin,
+ MachineBasicBlock::const_iterator End,
+ const LiveRegSet *LiveRegsCopy = nullptr);
+};
+
LaneBitmask getLiveLaneMask(unsigned Reg,
SlotIndex SI,
const LiveIntervals &LIS,
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 630442625aa3..8ec46665daf5 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -316,46 +316,57 @@ GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
MFI(*MF.getInfo<SIMachineFunctionInfo>()),
StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(),
*MF.getFunction())),
- MinOccupancy(StartingOccupancy), Stage(0) {
+ MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
}
void GCNScheduleDAGMILive::schedule() {
+ if (Stage == 0) {
+ // Just record regions at the first pass.
+ Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
+ return;
+ }
+
std::vector<MachineInstr*> Unsched;
Unsched.reserve(NumRegionInstrs);
for (auto &I : *this)
Unsched.push_back(&I);
- std::pair<unsigned, unsigned> PressureBefore;
+ GCNRegPressure PressureBefore;
if (LIS) {
- DEBUG(dbgs() << "Pressure before scheduling:\n");
- discoverLiveIns();
- PressureBefore = getRealRegPressure();
+ PressureBefore = Pressure[RegionIdx];
+
+ DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
+ GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
+ dbgs() << "Region live-in pressure: ";
+ llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
+ dbgs() << "Region register pressure: ";
+ PressureBefore.print(dbgs()));
}
ScheduleDAGMILive::schedule();
- if (Stage == 0)
- Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
+ Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
if (!LIS)
return;
// Check the results of scheduling.
GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
- DEBUG(dbgs() << "Pressure after scheduling:\n");
auto PressureAfter = getRealRegPressure();
- LiveIns.clear();
- if (PressureAfter.first <= S.SGPRCriticalLimit &&
- PressureAfter.second <= S.VGPRCriticalLimit) {
+ DEBUG(dbgs() << "Pressure after scheduling: "; PressureAfter.print(dbgs()));
+
+ if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
+ PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
+ Pressure[RegionIdx] = PressureAfter;
DEBUG(dbgs() << "Pressure in desired limits, done.\n");
return;
}
- unsigned WavesAfter = getMaxWaves(PressureAfter.first,
- PressureAfter.second, MF);
- unsigned WavesBefore = getMaxWaves(PressureBefore.first,
- PressureBefore.second, MF);
+ unsigned WavesAfter = getMaxWaves(PressureAfter.getSGPRNum(),
+ PressureAfter.getVGPRNum(), MF);
+ unsigned WavesBefore = getMaxWaves(PressureBefore.getSGPRNum(),
+ PressureBefore.getVGPRNum(), MF);
DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore <<
", after " << WavesAfter << ".\n");
@@ -368,8 +379,10 @@ void GCNScheduleDAGMILive::schedule() {
<< MinOccupancy << ".\n");
}
- if (WavesAfter >= WavesBefore)
+ if (WavesAfter >= WavesBefore) {
+ Pressure[RegionIdx] = PressureAfter;
return;
+ }
DEBUG(dbgs() << "Attempting to revert scheduling.\n");
RegionEnd = RegionBegin;
@@ -398,166 +411,139 @@ void GCNScheduleDAGMILive::schedule() {
DEBUG(dbgs() << "Scheduling " << *MI);
}
RegionBegin = Unsched.front()->getIterator();
- if (Stage == 0)
- Regions.back() = std::make_pair(RegionBegin, RegionEnd);
+ Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
placeDebugValues();
}
-static inline void setMask(const MachineRegisterInfo &MRI,
- const SIRegisterInfo *SRI, unsigned Reg,
- LaneBitmask &PrevMask, LaneBitmask NewMask,
- unsigned &SGPRs, unsigned &VGPRs) {
- int NewRegs = countPopulation(NewMask.getAsInteger()) -
- countPopulation(PrevMask.getAsInteger());
- if (SRI->isSGPRReg(MRI, Reg))
- SGPRs += NewRegs;
- if (SRI->isVGPR(MRI, Reg))
- VGPRs += NewRegs;
- assert ((int)SGPRs >= 0 && (int)VGPRs >= 0);
- PrevMask = NewMask;
+GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
+ GCNDownwardRPTracker RPTracker(*LIS);
+ RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
+ return RPTracker.moveMaxPressure();
}
-void GCNScheduleDAGMILive::discoverLiveIns() {
- unsigned SGPRs = 0;
- unsigned VGPRs = 0;
+void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
+ GCNDownwardRPTracker RPTracker(*LIS);
+
+ // If the block has the only successor then live-ins of that successor are
+ // live-outs of the current block. We can reuse calculated live set if the
+ // successor will be sent to scheduling past current block.
+ const MachineBasicBlock *OnlySucc = nullptr;
+ if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
+ SlotIndexes *Ind = LIS->getSlotIndexes();
+ if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
+ OnlySucc = *MBB->succ_begin();
+ }
- auto &MI = *begin()->getParent()->getFirstNonDebugInstr();
- const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
- SlotIndex SI = LIS->getInstructionIndex(MI).getBaseIndex();
- assert (SI.isValid());
-
- DEBUG(dbgs() << "Region live-ins:");
- for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
- if (MRI.reg_nodbg_empty(Reg))
- continue;
- const LiveInterval &LI = LIS->getInterval(Reg);
- LaneBitmask LaneMask = LaneBitmask::getNone();
- if (LI.hasSubRanges()) {
- for (const auto &S : LI.subranges())
- if (S.liveAt(SI))
- LaneMask |= S.LaneMask;
- } else if (LI.liveAt(SI)) {
- LaneMask = MRI.getMaxLaneMaskForVReg(Reg);
- }
+ // Scheduler sends regions from the end of the block upwards.
+ size_t CurRegion = RegionIdx;
+ for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
+ if (Regions[CurRegion].first->getParent() != MBB)
+ break;
+ --CurRegion;
+
+ auto I = MBB->begin();
+ auto LiveInIt = MBBLiveIns.find(MBB);
+ if (LiveInIt != MBBLiveIns.end()) {
+ auto LiveIn = std::move(LiveInIt->second);
+ RPTracker.reset(*MBB->begin(), &LiveIn);
+ MBBLiveIns.erase(LiveInIt);
+ } else {
+ I = Regions[CurRegion].first;
+ RPTracker.reset(*I);
+ }
- if (LaneMask.any()) {
- setMask(MRI, SRI, Reg, LiveIns[Reg], LaneMask, SGPRs, VGPRs);
+ for ( ; ; ) {
+ I = RPTracker.getNext();
- DEBUG(dbgs() << ' ' << PrintVRegOrUnit(Reg, SRI) << ':'
- << PrintLaneMask(LiveIns[Reg]));
+ if (Regions[CurRegion].first == I) {
+ LiveIns[CurRegion] = RPTracker.getLiveRegs();
+ RPTracker.clearMaxPressure();
}
- }
- LiveInPressure = std::make_pair(SGPRs, VGPRs);
+ if (Regions[CurRegion].second == I) {
+ Pressure[CurRegion] = RPTracker.moveMaxPressure();
+ if (CurRegion-- == RegionIdx)
+ break;
+ }
+ RPTracker.advanceToNext();
+ RPTracker.advanceBeforeNext();
+ }
- DEBUG(dbgs() << "\nLive-in pressure:\nSGPR = " << SGPRs
- << "\nVGPR = " << VGPRs << '\n');
+ if (OnlySucc) {
+ if (I != MBB->end()) {
+ RPTracker.advanceToNext();
+ RPTracker.advance(MBB->end());
+ }
+ RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
+ RPTracker.advanceBeforeNext();
+ MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
+ }
}
-std::pair<unsigned, unsigned>
-GCNScheduleDAGMILive::getRealRegPressure() const {
- unsigned SGPRs, MaxSGPRs, VGPRs, MaxVGPRs;
- SGPRs = MaxSGPRs = LiveInPressure.first;
- VGPRs = MaxVGPRs = LiveInPressure.second;
-
- const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
- DenseMap<unsigned, LaneBitmask> LiveRegs(LiveIns);
+void GCNScheduleDAGMILive::finalizeSchedule() {
+ GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
+ DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
- for (const MachineInstr &MI : *this) {
- if (MI.isDebugValue())
- continue;
- SlotIndex SI = LIS->getInstructionIndex(MI).getBaseIndex();
- assert (SI.isValid());
+ LiveIns.resize(Regions.size());
+ Pressure.resize(Regions.size());
- // Remove dead registers or mask bits.
- for (auto &It : LiveRegs) {
- if (It.second.none())
- continue;
- const LiveInterval &LI = LIS->getInterval(It.first);
- if (LI.hasSubRanges()) {
- for (const auto &S : LI.subranges())
- if (!S.liveAt(SI))
- setMask(MRI, SRI, It.first, It.second, It.second & ~S.LaneMask,
- SGPRs, VGPRs);
- } else if (!LI.liveAt(SI)) {
- setMask(MRI, SRI, It.first, It.second, LaneBitmask::getNone(),
- SGPRs, VGPRs);
- }
- }
+ do {
+ Stage++;
+ RegionIdx = 0;
+ MachineBasicBlock *MBB = nullptr;
- // Add new registers or mask bits.
- for (const auto &MO : MI.defs()) {
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
- unsigned SubRegIdx = MO.getSubReg();
- LaneBitmask LaneMask = SubRegIdx != 0
- ? TRI->getSubRegIndexLaneMask(SubRegIdx)
- : MRI.getMaxLaneMaskForVReg(Reg);
- LaneBitmask &LM = LiveRegs[Reg];
- setMask(MRI, SRI, Reg, LM, LM | LaneMask, SGPRs, VGPRs);
- }
- MaxSGPRs = std::max(MaxSGPRs, SGPRs);
- MaxVGPRs = std::max(MaxVGPRs, VGPRs);
- }
+ if (Stage > 1) {
+ // Retry function scheduling if we found resulting occupancy and it is
+ // lower than used for first pass scheduling. This will give more freedom
+ // to schedule low register pressure blocks.
+ // Code is partially copied from MachineSchedulerBase::scheduleRegions().
- DEBUG(dbgs() << "Real region's register pressure:\nSGPR = " << MaxSGPRs
- << "\nVGPR = " << MaxVGPRs << '\n');
+ if (!LIS || StartingOccupancy <= MinOccupancy)
+ break;
- return std::make_pair(MaxSGPRs, MaxVGPRs);
-}
+ DEBUG(dbgs()
+ << "Retrying function scheduling with lowest recorded occupancy "
+ << MinOccupancy << ".\n");
-void GCNScheduleDAGMILive::finalizeSchedule() {
- // Retry function scheduling if we found resulting occupancy and it is
- // lower than used for first pass scheduling. This will give more freedom
- // to schedule low register pressure blocks.
- // Code is partially copied from MachineSchedulerBase::scheduleRegions().
+ S.setTargetOccupancy(MinOccupancy);
+ }
- if (!LIS || StartingOccupancy <= MinOccupancy)
- return;
+ for (auto Region : Regions) {
+ RegionBegin = Region.first;
+ RegionEnd = Region.second;
- DEBUG(dbgs() << "Retrying function scheduling with lowest recorded occupancy "
- << MinOccupancy << ".\n");
+ if (RegionBegin->getParent() != MBB) {
+ if (MBB) finishBlock();
+ MBB = RegionBegin->getParent();
+ startBlock(MBB);
+ if (Stage == 1)
+ computeBlockPressure(MBB);
+ }
- Stage++;
- GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
- S.setTargetOccupancy(MinOccupancy);
+ unsigned NumRegionInstrs = std::distance(begin(), end());
+ enterRegion(MBB, begin(), end(), NumRegionInstrs);
- MachineBasicBlock *MBB = nullptr;
- for (auto Region : Regions) {
- RegionBegin = Region.first;
- RegionEnd = Region.second;
+ // Skip empty scheduling regions (0 or 1 schedulable instructions).
+ if (begin() == end() || begin() == std::prev(end())) {
+ exitRegion();
+ continue;
+ }
- if (RegionBegin->getParent() != MBB) {
- if (MBB) finishBlock();
- MBB = RegionBegin->getParent();
- startBlock(MBB);
- }
+ DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ DEBUG(dbgs() << MF.getName()
+ << ":BB#" << MBB->getNumber() << " " << MBB->getName()
+ << "\n From: " << *begin() << " To: ";
+ if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
- unsigned NumRegionInstrs = std::distance(begin(), end());
- enterRegion(MBB, begin(), end(), NumRegionInstrs);
+ schedule();
- // Skip empty scheduling regions (0 or 1 schedulable instructions).
- if (begin() == end() || begin() == std::prev(end())) {
exitRegion();
- continue;
+ ++RegionIdx;
}
- DEBUG(dbgs() << "********** MI Scheduling **********\n");
- DEBUG(dbgs() << MF.getName()
- << ":BB#" << MBB->getNumber() << " " << MBB->getName()
- << "\n From: " << *begin() << " To: ";
- if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
- else dbgs() << "End";
- dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+ finishBlock();
- schedule();
-
- exitRegion();
- }
- finishBlock();
- LiveIns.shrink_and_clear();
+ } while (Stage < 2);
}
diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.h b/lib/Target/AMDGPU/GCNSchedStrategy.h
index 15af232704ff..3ed3cd5b3b1c 100644
--- a/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
#define LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
+#include "GCNRegPressure.h"
#include "llvm/CodeGen/MachineScheduler.h"
namespace llvm {
@@ -74,21 +75,28 @@ class GCNScheduleDAGMILive : public ScheduleDAGMILive {
// Scheduling stage number.
unsigned Stage;
+ // Current region index.
+ size_t RegionIdx;
+
// Vecor of regions recorder for later rescheduling
SmallVector<std::pair<MachineBasicBlock::iterator,
MachineBasicBlock::iterator>, 32> Regions;
- // Region live-ins.
- DenseMap<unsigned, LaneBitmask> LiveIns;
+ // Region live-in cache.
+ SmallVector<GCNRPTracker::LiveRegSet, 32> LiveIns;
+
+ // Region pressure cache.
+ SmallVector<GCNRegPressure, 32> Pressure;
+
+ // Temporary basic block live-in cache.
+ DenseMap<const MachineBasicBlock*, GCNRPTracker::LiveRegSet> MBBLiveIns;
- // Number of live-ins to the current region, first SGPR then VGPR.
- std::pair<unsigned, unsigned> LiveInPressure;
+ // Return current region pressure.
+ GCNRegPressure getRealRegPressure() const;
- // Collect current region live-ins.
- void discoverLiveIns();
+ // Compute and cache live-ins and pressure for all regions in block.
+ void computeBlockPressure(const MachineBasicBlock *MBB);
- // Return current region pressure. First value is SGPR number, second is VGPR.
- std::pair<unsigned, unsigned> getRealRegPressure() const;
public:
GCNScheduleDAGMILive(MachineSchedContext *C,
diff --git a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index d8cb98fe1b19..8cb35c506135 100644
--- a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -126,7 +126,7 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) {
Void = Type::getVoidTy(Context);
Boolean = Type::getInt1Ty(Context);
Int64 = Type::getInt64Ty(Context);
- ReturnStruct = StructType::get(Boolean, Int64, (Type *)nullptr);
+ ReturnStruct = StructType::get(Boolean, Int64);
BoolTrue = ConstantInt::getTrue(Context);
BoolFalse = ConstantInt::getFalse(Context);
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index cc93c27731ff..48a14e4dbea2 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -488,6 +488,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::FCANONICALIZE);
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
// All memory operations. Some folding on the pointer operand is done to help
// matching the constant offsets in the addressing modes.
@@ -2003,6 +2004,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
break;
}
assert(Found);
+ (void)Found;
// This should be before all vector instructions.
BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
@@ -4604,6 +4606,24 @@ SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
return SDValue();
}
+SDValue SITargetLowering::performExtractVectorEltCombine(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ SDValue Vec = N->getOperand(0);
+
+ SelectionDAG &DAG= DCI.DAG;
+ if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) {
+ SDLoc SL(N);
+ EVT EltVT = N->getValueType(0);
+ SDValue Idx = N->getOperand(1);
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
+ Vec.getOperand(0), Idx);
+ return DAG.getNode(ISD::FNEG, SL, EltVT, Elt);
+ }
+
+ return SDValue();
+}
+
+
unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0,
const SDNode *N1) const {
@@ -4891,6 +4911,8 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
break;
}
+ case ISD::EXTRACT_VECTOR_ELT:
+ return performExtractVectorEltCombine(N, DCI);
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
index d177777ad5ee..046e677756d1 100644
--- a/lib/Target/AMDGPU/SIISelLowering.h
+++ b/lib/Target/AMDGPU/SIISelLowering.h
@@ -100,6 +100,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performFMed3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performCvtPkRTZCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
unsigned getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0, const SDNode *N1) const;
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index 92e452a3d6a0..065fd09eb356 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -496,6 +496,188 @@ int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
return Opcode;
}
+void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ int64_t Value) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
+ if (RegClass == &AMDGPU::SReg_32RegClass ||
+ RegClass == &AMDGPU::SGPR_32RegClass ||
+ RegClass == &AMDGPU::SReg_32_XM0RegClass ||
+ RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
+ .addImm(Value);
+ return;
+ }
+
+ if (RegClass == &AMDGPU::SReg_64RegClass ||
+ RegClass == &AMDGPU::SGPR_64RegClass ||
+ RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
+ .addImm(Value);
+ return;
+ }
+
+ if (RegClass == &AMDGPU::VGPR_32RegClass) {
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
+ .addImm(Value);
+ return;
+ }
+ if (RegClass == &AMDGPU::VReg_64RegClass) {
+ BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
+ .addImm(Value);
+ return;
+ }
+
+ unsigned EltSize = 4;
+ unsigned Opcode = AMDGPU::V_MOV_B32_e32;
+ if (RI.isSGPRClass(RegClass)) {
+ if (RI.getRegSizeInBits(*RegClass) > 32) {
+ Opcode = AMDGPU::S_MOV_B64;
+ EltSize = 8;
+ } else {
+ Opcode = AMDGPU::S_MOV_B32;
+ EltSize = 4;
+ }
+ }
+
+ ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
+ for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
+ int64_t IdxValue = Idx == 0 ? Value : 0;
+
+ MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
+ get(Opcode), RI.getSubReg(DestReg, Idx));
+ Builder.addImm(IdxValue);
+ }
+}
+
+const TargetRegisterClass *
+SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
+ return &AMDGPU::VGPR_32RegClass;
+}
+
+void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond,
+ unsigned TrueReg,
+ unsigned FalseReg) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
+ "Not a VGPR32 reg");
+
+ if (Cond.size() == 1) {
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .add(Cond[0]);
+ } else if (Cond.size() == 2) {
+ assert(Cond[0].isImm() && "Cond[0] is not an immediate");
+ switch (Cond[0].getImm()) {
+ case SIInstrInfo::SCC_TRUE: {
+ unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
+ .addImm(-1)
+ .addImm(0);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .addReg(SReg);
+ break;
+ }
+ case SIInstrInfo::SCC_FALSE: {
+ unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
+ .addImm(0)
+ .addImm(-1);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .addReg(SReg);
+ break;
+ }
+ case SIInstrInfo::VCCNZ: {
+ MachineOperand RegOp = Cond[1];
+ RegOp.setImplicit(false);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .add(RegOp);
+ break;
+ }
+ case SIInstrInfo::VCCZ: {
+ MachineOperand RegOp = Cond[1];
+ RegOp.setImplicit(false);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .add(RegOp);
+ break;
+ }
+ case SIInstrInfo::EXECNZ: {
+ unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
+ .addImm(0);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
+ .addImm(-1)
+ .addImm(0);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .addReg(SReg);
+ break;
+ }
+ case SIInstrInfo::EXECZ: {
+ unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
+ .addImm(0);
+ BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg)
+ .addImm(0)
+ .addImm(-1);
+ BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
+ .addReg(FalseReg)
+ .addReg(TrueReg)
+ .addReg(SReg);
+ llvm_unreachable("Unhandled branch predicate EXECZ");
+ break;
+ }
+ default:
+ llvm_unreachable("invalid branch predicate");
+ }
+ } else {
+ llvm_unreachable("Can only handle Cond size 1 or 2");
+ }
+}
+
+unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL,
+ unsigned SrcReg, int Value) const {
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
+ .addImm(Value)
+ .addReg(SrcReg);
+
+ return Reg;
+}
+
+unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL,
+ unsigned SrcReg, int Value) const {
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
+ .addImm(Value)
+ .addReg(SrcReg);
+
+ return Reg;
+}
+
unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
if (RI.getRegSizeInBits(*DstRC) == 32) {
@@ -834,6 +1016,20 @@ void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
insertWaitStates(MBB, MI, 1);
}
+void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
+ auto MF = MBB.getParent();
+ SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
+
+ assert(Info->isEntryFunction());
+
+ if (MBB.succ_empty()) {
+ bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
+ if (HasNoTerminator)
+ BuildMI(MBB, MBB.end(), DebugLoc(),
+ get(Info->returnsVoid() ? AMDGPU::S_ENDPGM : AMDGPU::SI_RETURN_TO_EPILOG));
+ }
+}
+
unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default: return 1; // FIXME: Do wait states equal cycles?
@@ -1241,14 +1437,20 @@ bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
return false;
}
- BranchPredicate Pred = getBranchPredicate(I->getOpcode());
- if (Pred == INVALID_BR)
- return true;
+ MachineBasicBlock *CondBB = nullptr;
- MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
- Cond.push_back(MachineOperand::CreateImm(Pred));
- Cond.push_back(I->getOperand(1)); // Save the branch register.
+ if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
+ CondBB = I->getOperand(1).getMBB();
+ Cond.push_back(I->getOperand(0));
+ } else {
+ BranchPredicate Pred = getBranchPredicate(I->getOpcode());
+ if (Pred == INVALID_BR)
+ return true;
+ CondBB = I->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(Pred));
+ Cond.push_back(I->getOperand(1)); // Save the branch register.
+ }
++I;
if (I == MBB.end()) {
@@ -1351,6 +1553,13 @@ unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
return 1;
}
+ if(Cond.size() == 1 && Cond[0].isReg()) {
+ BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
+ .add(Cond[0])
+ .addMBB(TBB);
+ return 1;
+ }
+
assert(TBB && Cond[0].isImm());
unsigned Opcode
@@ -1390,9 +1599,16 @@ unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
bool SIInstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const {
- assert(Cond.size() == 2);
- Cond[0].setImm(-Cond[0].getImm());
- return false;
+ if (Cond.size() != 2) {
+ return true;
+ }
+
+ if (Cond[0].isImm()) {
+ Cond[0].setImm(-Cond[0].getImm());
+ return false;
+ }
+
+ return true;
}
bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
@@ -3920,6 +4136,82 @@ bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
return false;
}
+bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const {
+ return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO;
+}
+
+void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
+ MachineBasicBlock *IfEnd) const {
+ MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator();
+ assert(TI != IfEntry->end());
+
+ MachineInstr *Branch = &(*TI);
+ MachineFunction *MF = IfEntry->getParent();
+ MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
+
+ if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
+ unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ MachineInstr *SIIF =
+ BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
+ .add(Branch->getOperand(0))
+ .add(Branch->getOperand(1));
+ MachineInstr *SIEND =
+ BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF))
+ .addReg(DstReg);
+
+ IfEntry->erase(TI);
+ IfEntry->insert(IfEntry->end(), SIIF);
+ IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND);
+ }
+}
+
+void SIInstrInfo::convertNonUniformLoopRegion(
+ MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const {
+ MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator();
+ // We expect 2 terminators, one conditional and one unconditional.
+ assert(TI != LoopEnd->end());
+
+ MachineInstr *Branch = &(*TI);
+ MachineFunction *MF = LoopEnd->getParent();
+ MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo();
+
+ if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
+
+ unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ MachineInstrBuilder HeaderPHIBuilder =
+ BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
+ for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
+ E = LoopEntry->pred_end();
+ PI != E; ++PI) {
+ if (*PI == LoopEnd) {
+ HeaderPHIBuilder.addReg(BackEdgeReg);
+ } else {
+ MachineBasicBlock *PMBB = *PI;
+ unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
+ ZeroReg, 0);
+ HeaderPHIBuilder.addReg(ZeroReg);
+ }
+ HeaderPHIBuilder.addMBB(*PI);
+ }
+ MachineInstr *HeaderPhi = HeaderPHIBuilder;
+ MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(),
+ get(AMDGPU::SI_IF_BREAK), BackEdgeReg)
+ .addReg(DstReg)
+ .add(Branch->getOperand(0));
+ MachineInstr *SILOOP =
+ BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP))
+ .addReg(BackEdgeReg)
+ .addMBB(LoopEntry);
+
+ LoopEntry->insert(LoopEntry->begin(), HeaderPhi);
+ LoopEnd->erase(TI);
+ LoopEnd->insert(LoopEnd->end(), SIIFBREAK);
+ LoopEnd->insert(LoopEnd->end(), SILOOP);
+ }
+}
+
ArrayRef<std::pair<int, const char *>>
SIInstrInfo::getSerializableTargetIndices() const {
static const std::pair<int, const char *> TargetIndices[] = {
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index 03a5ef74b179..f6e5e8883f63 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -143,6 +143,23 @@ public:
RegScavenger *RS, unsigned TmpReg,
unsigned Offset, unsigned Size) const;
+ void materializeImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL,
+ unsigned DestReg,
+ int64_t Value) const;
+
+ const TargetRegisterClass *getPreferredSelectRegClass(
+ unsigned Size) const;
+
+ unsigned insertNE(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I, const DebugLoc &DL,
+ unsigned SrcReg, int Value) const;
+
+ unsigned insertEQ(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I, const DebugLoc &DL,
+ unsigned SrcReg, int Value) const;
+
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, unsigned SrcReg,
bool isKill, int FrameIndex,
@@ -193,7 +210,7 @@ public:
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const override;
+ bool AllowModify = false) const override;
unsigned removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved = nullptr) const override;
@@ -218,6 +235,11 @@ public:
unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const override;
+ void insertVectorSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, const DebugLoc &DL,
+ unsigned DstReg, ArrayRef<MachineOperand> Cond,
+ unsigned TrueReg, unsigned FalseReg) const;
+
bool
areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
@@ -705,6 +727,7 @@ public:
void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
+ void insertReturn(MachineBasicBlock &MBB) const;
/// \brief Return the number of wait states that result from executing this
/// instruction.
unsigned getNumWaitStates(const MachineInstr &MI) const;
@@ -750,6 +773,14 @@ public:
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const;
+ bool isNonUniformBranchInstr(MachineInstr &Instr) const;
+
+ void convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
+ MachineBasicBlock *IfEnd) const;
+
+ void convertNonUniformLoopRegion(MachineBasicBlock *LoopEntry,
+ MachineBasicBlock *LoopEnd) const;
+
ArrayRef<std::pair<int, const char *>>
getSerializableTargetIndices() const override;
diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
index 7ccb54f54e34..3b4bdc864253 100644
--- a/lib/Target/AMDGPU/SIInstructions.td
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -174,6 +174,13 @@ def SI_MASK_BRANCH : VPseudoInstSI <
let isTerminator = 1 in {
+ def SI_NON_UNIFORM_BRCOND_PSEUDO : CFPseudoInstSI <
+ (outs),
+ (ins SReg_64:$vcc, brtarget:$target),
+ [(brcond i1:$vcc, bb:$target)]> {
+ let Size = 12;
+}
+
def SI_IF: CFPseudoInstSI <
(outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
[(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> {
diff --git a/lib/Target/AMDGPU/VOP2Instructions.td b/lib/Target/AMDGPU/VOP2Instructions.td
index 2281f338ab45..4a11d9471f1d 100644
--- a/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/lib/Target/AMDGPU/VOP2Instructions.td
@@ -164,8 +164,11 @@ multiclass VOP2eInst <string opName,
class VOP_MADAK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
field dag Ins32 = (ins VCSrc_f32:$src0, VGPR_32:$src1, ImmOpType:$imm);
- field string Asm32 = "$vdst, $src0, $src1, $imm";
field bit HasExt = 0;
+
+ // Hack to stop printing _e64
+ let DstRC = RegisterOperand<VGPR_32>;
+ field string Asm32 = " $vdst, $src0, $src1, $imm";
}
def VOP_MADAK_F16 : VOP_MADAK <f16>;
@@ -174,8 +177,11 @@ def VOP_MADAK_F32 : VOP_MADAK <f32>;
class VOP_MADMK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
field dag Ins32 = (ins VCSrc_f32:$src0, ImmOpType:$imm, VGPR_32:$src1);
- field string Asm32 = "$vdst, $src0, $imm, $src1";
field bit HasExt = 0;
+
+ // Hack to stop printing _e64
+ let DstRC = RegisterOperand<VGPR_32>;
+ field string Asm32 = " $vdst, $src0, $imm, $src1";
}
def VOP_MADMK_F16 : VOP_MADMK <f16>;
@@ -298,7 +304,7 @@ def VOP_WRITELANE : VOPProfile<[i32, i32, i32]> {
let SubtargetPredicate = isGCN in {
defm V_CNDMASK_B32 : VOP2eInst <"v_cndmask_b32", VOP2e_I32_I32_I32_I1>;
-def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32>;
+def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32, [], "">;
let isCommutable = 1 in {
defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, fadd>;
@@ -328,7 +334,7 @@ let Constraints = "$vdst = $src2", DisableEncoding="$src2",
defm V_MAC_F32 : VOP2Inst <"v_mac_f32", VOP_MAC_F32>;
}
-def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32>;
+def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32, [], "">;
// No patterns so that the scalar instructions are always selected.
// The scalar versions will be replaced with vector when needed later.
@@ -383,7 +389,7 @@ defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32>;
let SubtargetPredicate = isVI in {
-def V_MADMK_F16 : VOP2_Pseudo <"v_madmk_f16", VOP_MADMK_F16>;
+def V_MADMK_F16 : VOP2_Pseudo <"v_madmk_f16", VOP_MADMK_F16, [], "">;
defm V_LSHLREV_B16 : VOP2Inst <"v_lshlrev_b16", VOP_I16_I16_I16>;
defm V_LSHRREV_B16 : VOP2Inst <"v_lshrrev_b16", VOP_I16_I16_I16>;
defm V_ASHRREV_I16 : VOP2Inst <"v_ashrrev_i16", VOP_I16_I16_I16>;
@@ -394,7 +400,7 @@ defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, fadd>;
defm V_SUB_F16 : VOP2Inst <"v_sub_f16", VOP_F16_F16_F16, fsub>;
defm V_SUBREV_F16 : VOP2Inst <"v_subrev_f16", VOP_F16_F16_F16, null_frag, "v_sub_f16">;
defm V_MUL_F16 : VOP2Inst <"v_mul_f16", VOP_F16_F16_F16, fmul>;
-def V_MADAK_F16 : VOP2_Pseudo <"v_madak_f16", VOP_MADAK_F16>;
+def V_MADAK_F16 : VOP2_Pseudo <"v_madak_f16", VOP_MADAK_F16, [], "">;
defm V_ADD_U16 : VOP2Inst <"v_add_u16", VOP_I16_I16_I16>;
defm V_SUB_U16 : VOP2Inst <"v_sub_u16" , VOP_I16_I16_I16>;
defm V_SUBREV_U16 : VOP2Inst <"v_subrev_u16", VOP_I16_I16_I16, null_frag, "v_sub_u16">;
@@ -651,6 +657,17 @@ multiclass VOP2_Real_e64_vi <bits<10> op> {
VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
}
+multiclass VOP2_Real_e64only_vi <bits<10> op> {
+ def _e64_vi :
+ VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
+ VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
+ // Hack to stop printing _e64
+ VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME#"_e64");
+ let OutOperandList = (outs VGPR_32:$vdst);
+ let AsmString = ps.Mnemonic # " " # ps.AsmOperands;
+ }
+}
+
multiclass Base_VOP2be_Real_e32e64_vi <bits<6> op> : VOP2_Real_e32_vi<op> {
def _e64_vi :
VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
@@ -718,17 +735,17 @@ defm V_SUBBREV_U32 : VOP2be_Real_e32e64_vi <0x1e>;
defm V_READLANE_B32 : VOP32_Real_vi <0x289>;
defm V_WRITELANE_B32 : VOP32_Real_vi <0x28a>;
-defm V_BFM_B32 : VOP2_Real_e64_vi <0x293>;
-defm V_BCNT_U32_B32 : VOP2_Real_e64_vi <0x28b>;
-defm V_MBCNT_LO_U32_B32 : VOP2_Real_e64_vi <0x28c>;
-defm V_MBCNT_HI_U32_B32 : VOP2_Real_e64_vi <0x28d>;
-defm V_LDEXP_F32 : VOP2_Real_e64_vi <0x288>;
-defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e64_vi <0x1f0>;
-defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e64_vi <0x294>;
-defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e64_vi <0x295>;
-defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e64_vi <0x296>;
-defm V_CVT_PK_U16_U32 : VOP2_Real_e64_vi <0x297>;
-defm V_CVT_PK_I16_I32 : VOP2_Real_e64_vi <0x298>;
+defm V_BFM_B32 : VOP2_Real_e64only_vi <0x293>;
+defm V_BCNT_U32_B32 : VOP2_Real_e64only_vi <0x28b>;
+defm V_MBCNT_LO_U32_B32 : VOP2_Real_e64only_vi <0x28c>;
+defm V_MBCNT_HI_U32_B32 : VOP2_Real_e64only_vi <0x28d>;
+defm V_LDEXP_F32 : VOP2_Real_e64only_vi <0x288>;
+defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e64only_vi <0x1f0>;
+defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e64only_vi <0x294>;
+defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e64only_vi <0x295>;
+defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e64only_vi <0x296>;
+defm V_CVT_PK_U16_U32 : VOP2_Real_e64only_vi <0x297>;
+defm V_CVT_PK_I16_I32 : VOP2_Real_e64only_vi <0x298>;
defm V_ADD_F16 : VOP2_Real_e32e64_vi <0x1f>;
defm V_SUB_F16 : VOP2_Real_e32e64_vi <0x20>;
diff --git a/lib/Target/AMDGPU/VOP3Instructions.td b/lib/Target/AMDGPU/VOP3Instructions.td
index 217a07488853..ffa6c60d6b1f 100644
--- a/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/lib/Target/AMDGPU/VOP3Instructions.td
@@ -232,7 +232,6 @@ def V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>>;
let SubtargetPredicate = isCIVI in {
-def V_MQSAD_U16_U8 : VOP3Inst <"v_mqsad_u16_u8", VOP3_Profile<VOP_I32_I32_I32>>;
def V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64>, int_amdgcn_qsad_pk_u16_u8>;
def V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOP3_Profile<VOP_V4I32_I64_I32_V4I32>, int_amdgcn_mqsad_u32_u8>;
@@ -402,7 +401,6 @@ multiclass VOP3be_Real_ci<bits<9> op> {
}
}
-defm V_MQSAD_U16_U8 : VOP3_Real_ci <0x172>;
defm V_QSAD_PK_U16_U8 : VOP3_Real_ci <0x172>;
defm V_MQSAD_U32_U8 : VOP3_Real_ci <0x175>;
defm V_MAD_U64_U32 : VOP3be_Real_ci <0x176>;
@@ -426,7 +424,6 @@ multiclass VOP3be_Real_vi<bits<10> op> {
} // End AssemblerPredicates = [isVI], DecoderNamespace = "VI"
-defm V_MQSAD_U16_U8 : VOP3_Real_vi <0x172>;
defm V_MAD_U64_U32 : VOP3be_Real_vi <0x1E8>;
defm V_MAD_I64_I32 : VOP3be_Real_vi <0x1E9>;