aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-01-22 20:31:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-01-22 20:31:01 +0000
commit8bcb0991864975618c09697b1aca10683346d9f0 (patch)
tree0afab28faa50e5f27698f8dd6c1921fff8d25e39 /contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp
parentb14637d118e110006a149a79b649c5695e7f419a (diff)
parent1d5ae1026e831016fc29fd927877c86af904481f (diff)
Notes
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp59
1 files changed, 30 insertions, 29 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp
index ae1170ad1be6..f0721ea3b76d 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -82,6 +82,10 @@ cl::opt<bool>
DumpCriticalPathLength("misched-dcpl", cl::Hidden,
cl::desc("Print critical path length to stdout"));
+cl::opt<bool> VerifyScheduling(
+ "verify-misched", cl::Hidden,
+ cl::desc("Verify machine instrs before and after machine scheduling"));
+
} // end namespace llvm
#ifndef NDEBUG
@@ -122,9 +126,6 @@ static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
cl::desc("Enable memop clustering."),
cl::init(true));
-static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
- cl::desc("Verify machine instrs before and after machine scheduling"));
-
// DAG subtrees must have at least this many nodes.
static const unsigned MinSubtreeSize = 8;
@@ -198,6 +199,7 @@ char &llvm::MachineSchedulerID = MachineScheduler::ID;
INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
"Machine Instruction Scheduler", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
@@ -210,7 +212,7 @@ MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- AU.addRequiredID(MachineDominatorsID);
+ AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<TargetPassConfig>();
@@ -234,7 +236,7 @@ PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- AU.addRequiredID(MachineDominatorsID);
+ AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -933,8 +935,8 @@ void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
if (TrackLaneMasks && !MO.isUse())
continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ Register Reg = MO.getReg();
+ if (!Register::isVirtualRegister(Reg))
continue;
// Ignore re-defs.
@@ -985,7 +987,7 @@ void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
"ShouldTrackLaneMasks requires ShouldTrackPressure");
}
-// Setup the register pressure trackers for the top scheduled top and bottom
+// Setup the register pressure trackers for the top scheduled and bottom
// scheduled regions.
void ScheduleDAGMILive::initRegPressure() {
VRegUses.clear();
@@ -1095,7 +1097,7 @@ void ScheduleDAGMILive::updatePressureDiffs(
for (const RegisterMaskPair &P : LiveUses) {
unsigned Reg = P.RegUnit;
/// FIXME: Currently assuming single-use physregs.
- if (!TRI->isVirtualRegister(Reg))
+ if (!Register::isVirtualRegister(Reg))
continue;
if (ShouldTrackLaneMasks) {
@@ -1319,8 +1321,8 @@ unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
// Visit each live out vreg def to find def/use pairs that cross iterations.
for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
unsigned Reg = P.RegUnit;
- if (!TRI->isVirtualRegister(Reg))
- continue;
+ if (!Register::isVirtualRegister(Reg))
+ continue;
const LiveInterval &LI = LIS->getInterval(Reg);
const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
if (!DefVNI)
@@ -1538,14 +1540,14 @@ namespace llvm {
std::unique_ptr<ScheduleDAGMutation>
createLoadClusterDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
- return EnableMemOpCluster ? llvm::make_unique<LoadClusterMutation>(TII, TRI)
+ return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI)
: nullptr;
}
std::unique_ptr<ScheduleDAGMutation>
createStoreClusterDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
- return EnableMemOpCluster ? llvm::make_unique<StoreClusterMutation>(TII, TRI)
+ return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI)
: nullptr;
}
@@ -1657,7 +1659,7 @@ namespace llvm {
std::unique_ptr<ScheduleDAGMutation>
createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
- return llvm::make_unique<CopyConstrain>(TII, TRI);
+ return std::make_unique<CopyConstrain>(TII, TRI);
}
} // end namespace llvm
@@ -1687,13 +1689,13 @@ void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
// Check for pure vreg copies.
const MachineOperand &SrcOp = Copy->getOperand(1);
- unsigned SrcReg = SrcOp.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
+ Register SrcReg = SrcOp.getReg();
+ if (!Register::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
return;
const MachineOperand &DstOp = Copy->getOperand(0);
- unsigned DstReg = DstOp.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
+ Register DstReg = DstOp.getReg();
+ if (!Register::isVirtualRegister(DstReg) || DstOp.isDead())
return;
// Check if either the dest or source is local. If it's live across a back
@@ -2914,14 +2916,12 @@ int biasPhysReg(const SUnit *SU, bool isTop) {
unsigned UnscheduledOper = isTop ? 0 : 1;
// If we have already scheduled the physreg produce/consumer, immediately
// schedule the copy.
- if (TargetRegisterInfo::isPhysicalRegister(
- MI->getOperand(ScheduledOper).getReg()))
+ if (Register::isPhysicalRegister(MI->getOperand(ScheduledOper).getReg()))
return 1;
// If the physreg is at the boundary, defer it. Otherwise schedule it
// immediately to free the dependent. We can hoist the copy later.
bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
- if (TargetRegisterInfo::isPhysicalRegister(
- MI->getOperand(UnscheduledOper).getReg()))
+ if (Register::isPhysicalRegister(MI->getOperand(UnscheduledOper).getReg()))
return AtBoundary ? -1 : 1;
}
@@ -2931,7 +2931,7 @@ int biasPhysReg(const SUnit *SU, bool isTop) {
// physical registers.
bool DoBias = true;
for (const MachineOperand &Op : MI->defs()) {
- if (Op.isReg() && !TargetRegisterInfo::isPhysicalRegister(Op.getReg())) {
+ if (Op.isReg() && !Register::isPhysicalRegister(Op.getReg())) {
DoBias = false;
break;
}
@@ -3259,7 +3259,8 @@ void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
// Find already scheduled copies with a single physreg dependence and move
// them just above the scheduled instruction.
for (SDep &Dep : Deps) {
- if (Dep.getKind() != SDep::Data || !TRI->isPhysicalRegister(Dep.getReg()))
+ if (Dep.getKind() != SDep::Data ||
+ !Register::isPhysicalRegister(Dep.getReg()))
continue;
SUnit *DepSU = Dep.getSUnit();
if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
@@ -3298,7 +3299,7 @@ void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
/// default scheduler if the target does not set a default.
ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) {
ScheduleDAGMILive *DAG =
- new ScheduleDAGMILive(C, llvm::make_unique<GenericScheduler>(C));
+ new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C));
// Register DAG post-processors.
//
// FIXME: extend the mutation API to allow earlier mutations to instantiate
@@ -3450,7 +3451,7 @@ void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
}
ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) {
- return new ScheduleDAGMI(C, llvm::make_unique<PostGenericScheduler>(C),
+ return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C),
/*RemoveKillFlags=*/true);
}
@@ -3561,10 +3562,10 @@ public:
} // end anonymous namespace
static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
- return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(true));
+ return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true));
}
static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
- return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(false));
+ return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false));
}
static MachineSchedRegistry ILPMaxRegistry(
@@ -3658,7 +3659,7 @@ static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
assert((TopDown || !ForceTopDown) &&
"-misched-topdown incompatible with -misched-bottomup");
return new ScheduleDAGMILive(
- C, llvm::make_unique<InstructionShuffler>(Alternate, TopDown));
+ C, std::make_unique<InstructionShuffler>(Alternate, TopDown));
}
static MachineSchedRegistry ShufflerRegistry(