summaryrefslogtreecommitdiff
path: root/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp')
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp215
1 files changed, 126 insertions, 89 deletions
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 70b1fa77a0991..49f304c8cc869 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -1,4 +1,4 @@
-//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
+//===- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler ------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,23 +16,47 @@
//===----------------------------------------------------------------------===//
#include "ScheduleDAGSDNodes.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/IR/DataLayout.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <climits>
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "pre-RA-sched"
@@ -46,6 +70,7 @@ static RegisterScheduler
burrListDAGScheduler("list-burr",
"Bottom-up register reduction list scheduling",
createBURRListDAGScheduler);
+
static RegisterScheduler
sourceListDAGScheduler("source",
"Similar to list-burr but schedules in source "
@@ -105,6 +130,7 @@ static cl::opt<unsigned> AvgIPC(
cl::desc("Average inst/cycle whan no target itinerary exists."));
namespace {
+
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
/// implementation. This supports both top-down and bottom-up scheduling.
@@ -112,7 +138,6 @@ namespace {
class ScheduleDAGRRList : public ScheduleDAGSDNodes {
private:
/// NeedLatency - True if the scheduler will make use of latency information.
- ///
bool NeedLatency;
/// AvailableQueue - The priority queue to use for the available SUnits.
@@ -122,13 +147,13 @@ private:
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// added to the AvailableQueue.
- std::vector<SUnit*> PendingQueue;
+ std::vector<SUnit *> PendingQueue;
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
/// CurCycle - The current scheduler state corresponds to this cycle.
- unsigned CurCycle;
+ unsigned CurCycle = 0;
/// MinAvailableCycle - Cycle of the soonest available instruction.
unsigned MinAvailableCycle;
@@ -147,7 +172,9 @@ private:
// Collect interferences between physical register use/defs.
// Each interference is an SUnit and set of physical registers.
SmallVector<SUnit*, 4> Interferences;
- typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
+
+ using LRegsMapT = DenseMap<SUnit *, SmallVector<unsigned, 4>>;
+
LRegsMapT LRegsMap;
/// Topo - A topological ordering for SUnits which permits fast IsReachable
@@ -163,9 +190,8 @@ public:
SchedulingPriorityQueue *availqueue,
CodeGenOpt::Level OptLevel)
: ScheduleDAGSDNodes(mf),
- NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
+ NeedLatency(needlatency), AvailableQueue(availqueue),
Topo(SUnits, nullptr) {
-
const TargetSubtargetInfo &STI = mf.getSubtarget();
if (DisableSchedCycles || !NeedLatency)
HazardRec = new ScheduleHazardRecognizer();
@@ -267,6 +293,7 @@ private:
return !NeedLatency;
}
};
+
} // end anonymous namespace
/// GetCostForDef - Looks up the register class and cost for a given definition.
@@ -319,13 +346,13 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DEBUG(dbgs()
- << "********** List Scheduling BB#" << BB->getNumber()
- << " '" << BB->getName() << "' **********\n");
+ DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)
+ << " '" << BB->getName() << "' **********\n");
CurCycle = 0;
IssueCount = 0;
- MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
+ MinAvailableCycle =
+ DisableSchedCycles ? 0 : std::numeric_limits<unsigned>::max();
NumLiveRegs = 0;
// Allocate slots for each physical register, plus one for a special register
// to track the virtual resource of a calling sequence.
@@ -409,7 +436,7 @@ static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
unsigned NestLevel,
const TargetInstrInfo *TII) {
SDNode *N = Outer;
- for (;;) {
+ while (true) {
if (N == Inner)
return true;
// For a TokenFactor, examine each operand. There may be multiple ways
@@ -456,7 +483,7 @@ static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
static SDNode *
FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
const TargetInstrInfo *TII) {
- for (;;) {
+ while (true) {
// For a TokenFactor, examine each operand. There may be multiple ways
// to get to the CALLSEQ_BEGIN, but we need to find the path with the
// most nesting in order to ensure that we find the corresponding match.
@@ -550,6 +577,7 @@ void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
unsigned NestLevel = 0;
unsigned MaxNest = 0;
SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
+ assert(N && "Must find call sequence start");
SUnit *Def = &SUnits[N->getNodeId()];
CallSeqEndForStart[Def] = SU;
@@ -571,7 +599,7 @@ void ScheduleDAGRRList::ReleasePending() {
// If the available queue is empty, it is safe to reset MinAvailableCycle.
if (AvailableQueue->empty())
- MinAvailableCycle = UINT_MAX;
+ MinAvailableCycle = std::numeric_limits<unsigned>::max();
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
@@ -791,7 +819,8 @@ void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
AvailableQueue->remove(PredSU);
}
- assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
+ assert(PredSU->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
+ "NumSuccsLeft will overflow!");
++PredSU->NumSuccsLeft;
}
@@ -821,9 +850,13 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
SUNode = SUNode->getGluedNode()) {
if (SUNode->isMachineOpcode() &&
SUNode->getMachineOpcode() == TII->getCallFrameSetupOpcode()) {
+ SUnit *SeqEnd = CallSeqEndForStart[SU];
+ assert(SeqEnd && "Call sequence start/end must be known");
+ assert(!LiveRegDefs[CallResource]);
+ assert(!LiveRegGens[CallResource]);
++NumLiveRegs;
LiveRegDefs[CallResource] = SU;
- LiveRegGens[CallResource] = CallSeqEndForStart[SU];
+ LiveRegGens[CallResource] = SeqEnd;
}
}
@@ -835,6 +868,8 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
if (SUNode->isMachineOpcode() &&
SUNode->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
+ assert(LiveRegDefs[CallResource]);
+ assert(LiveRegGens[CallResource]);
--NumLiveRegs;
LiveRegDefs[CallResource] = nullptr;
LiveRegGens[CallResource] = nullptr;
@@ -891,7 +926,7 @@ void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
if (LookAhead == 0)
return;
- std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
+ std::vector<SUnit *>::const_iterator I = (Sequence.end() - LookAhead);
unsigned HazardCycle = (*I)->getHeight();
for (auto E = Sequence.end(); I != E; ++I) {
SUnit *SU = *I;
@@ -1319,8 +1354,7 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
// If we're in the middle of scheduling a call, don't begin scheduling
// another call. Also, don't allow any physical registers to be live across
// the call.
- if ((Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) ||
- (Node->getMachineOpcode() == TII->getCallFrameSetupOpcode())) {
+ if (Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
// Check the special calling-sequence resource.
unsigned CallResource = TRI->getNumRegs();
if (LiveRegDefs[CallResource]) {
@@ -1390,27 +1424,32 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
/// (3) No Interferences: may unschedule to break register interferences.
SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
- while (CurSU) {
- SmallVector<unsigned, 4> LRegs;
- if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
- break;
- DEBUG(dbgs() << " Interfering reg " <<
- (LRegs[0] == TRI->getNumRegs() ? "CallResource"
- : TRI->getName(LRegs[0]))
- << " SU #" << CurSU->NodeNum << '\n');
- std::pair<LRegsMapT::iterator, bool> LRegsPair =
- LRegsMap.insert(std::make_pair(CurSU, LRegs));
- if (LRegsPair.second) {
- CurSU->isPending = true; // This SU is not in AvailableQueue right now.
- Interferences.push_back(CurSU);
- }
- else {
- assert(CurSU->isPending && "Interferences are pending");
- // Update the interference with current live regs.
- LRegsPair.first->second = LRegs;
+ auto FindAvailableNode = [&]() {
+ while (CurSU) {
+ SmallVector<unsigned, 4> LRegs;
+ if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
+ break;
+ DEBUG(dbgs() << " Interfering reg ";
+ if (LRegs[0] == TRI->getNumRegs())
+ dbgs() << "CallResource";
+ else
+ dbgs() << printReg(LRegs[0], TRI);
+ dbgs() << " SU #" << CurSU->NodeNum << '\n');
+ std::pair<LRegsMapT::iterator, bool> LRegsPair =
+ LRegsMap.insert(std::make_pair(CurSU, LRegs));
+ if (LRegsPair.second) {
+ CurSU->isPending = true; // This SU is not in AvailableQueue right now.
+ Interferences.push_back(CurSU);
+ }
+ else {
+ assert(CurSU->isPending && "Interferences are pending");
+ // Update the interference with current live regs.
+ LRegsPair.first->second = LRegs;
+ }
+ CurSU = AvailableQueue->pop();
}
- CurSU = AvailableQueue->pop();
- }
+ };
+ FindAvailableNode();
if (CurSU)
return CurSU;
@@ -1423,7 +1462,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// Try unscheduling up to the point where it's safe to schedule
// this node.
SUnit *BtSU = nullptr;
- unsigned LiveCycle = UINT_MAX;
+ unsigned LiveCycle = std::numeric_limits<unsigned>::max();
for (unsigned Reg : LRegs) {
if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
BtSU = LiveRegGens[Reg];
@@ -1447,13 +1486,16 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// If one or more successors has been unscheduled, then the current
// node is no longer available.
- if (!TrySU->isAvailable || !TrySU->NodeQueueId)
+ if (!TrySU->isAvailable || !TrySU->NodeQueueId) {
+ DEBUG(dbgs() << "TrySU not available; choosing node from queue\n");
CurSU = AvailableQueue->pop();
- else {
+ } else {
+ DEBUG(dbgs() << "TrySU available\n");
// Available and in AvailableQueue
AvailableQueue->remove(TrySU);
CurSU = TrySU;
}
+ FindAvailableNode();
// Interferences has been mutated. We must break.
break;
}
@@ -1540,7 +1582,8 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
while (AvailableQueue->empty() && !PendingQueue.empty()) {
// Advance the cycle to free resources. Skip ahead to the next ready SU.
- assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
+ assert(MinAvailableCycle < std::numeric_limits<unsigned>::max() &&
+ "MinAvailableCycle uninitialized");
AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
}
}
@@ -1553,17 +1596,11 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
#endif
}
-//===----------------------------------------------------------------------===//
-// RegReductionPriorityQueue Definition
-//===----------------------------------------------------------------------===//
-//
-// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
-// to reduce register pressure.
-//
namespace {
+
class RegReductionPQBase;
-struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+struct queue_sort {
bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
};
@@ -1571,6 +1608,7 @@ struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
template<class SF>
struct reverse_sort : public queue_sort {
SF &SortFunc;
+
reverse_sort(SF &sf) : SortFunc(sf) {}
bool operator()(SUnit* left, SUnit* right) const {
@@ -1590,6 +1628,7 @@ struct bu_ls_rr_sort : public queue_sort {
};
RegReductionPQBase *SPQ;
+
bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
bool operator()(SUnit* left, SUnit* right) const;
@@ -1603,8 +1642,8 @@ struct src_ls_rr_sort : public queue_sort {
};
RegReductionPQBase *SPQ;
- src_ls_rr_sort(RegReductionPQBase *spq)
- : SPQ(spq) {}
+
+ src_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
bool operator()(SUnit* left, SUnit* right) const;
};
@@ -1617,8 +1656,8 @@ struct hybrid_ls_rr_sort : public queue_sort {
};
RegReductionPQBase *SPQ;
- hybrid_ls_rr_sort(RegReductionPQBase *spq)
- : SPQ(spq) {}
+
+ hybrid_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
@@ -1634,8 +1673,8 @@ struct ilp_ls_rr_sort : public queue_sort {
};
RegReductionPQBase *SPQ;
- ilp_ls_rr_sort(RegReductionPQBase *spq)
- : SPQ(spq) {}
+
+ ilp_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
bool isReady(SUnit *SU, unsigned CurCycle) const;
@@ -1644,8 +1683,8 @@ struct ilp_ls_rr_sort : public queue_sort {
class RegReductionPQBase : public SchedulingPriorityQueue {
protected:
- std::vector<SUnit*> Queue;
- unsigned CurQueueId;
+ std::vector<SUnit *> Queue;
+ unsigned CurQueueId = 0;
bool TracksRegPressure;
bool SrcOrder;
@@ -1656,13 +1695,12 @@ protected:
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
const TargetLowering *TLI;
- ScheduleDAGRRList *scheduleDAG;
+ ScheduleDAGRRList *scheduleDAG = nullptr;
// SethiUllmanNumbers - The SethiUllman number for each node.
std::vector<unsigned> SethiUllmanNumbers;
/// RegPressure - Tracking current reg pressure per register class.
- ///
std::vector<unsigned> RegPressure;
/// RegLimit - Tracking the number of allocatable registers per register
@@ -1677,9 +1715,8 @@ public:
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
const TargetLowering *tli)
- : SchedulingPriorityQueue(hasReadyFilter),
- CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
- MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
+ : SchedulingPriorityQueue(hasReadyFilter), TracksRegPressure(tracksrp),
+ SrcOrder(srcorder), MF(mf), TII(tii), TRI(tri), TLI(tli) {
if (TracksRegPressure) {
unsigned NumRC = TRI->getNumRegClasses();
RegLimit.resize(NumRC);
@@ -1730,7 +1767,7 @@ public:
void remove(SUnit *SU) override {
assert(!Queue.empty() && "Queue is empty!");
assert(SU->NodeQueueId != 0 && "Not in queue!");
- std::vector<SUnit *>::iterator I = find(Queue, SU);
+ std::vector<SUnit *>::iterator I = llvm::find(Queue, SU);
if (I != std::prev(Queue.end()))
std::swap(*I, Queue.back());
Queue.pop_back();
@@ -1759,7 +1796,7 @@ protected:
};
template<class SF>
-static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
+static SUnit *popFromQueueImpl(std::vector<SUnit *> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
for (auto I = std::next(Q.begin()), E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
@@ -1772,7 +1809,7 @@ static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
}
template<class SF>
-SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
+SUnit *popFromQueue(std::vector<SUnit *> &Q, SF &Picker, ScheduleDAG *DAG) {
#ifndef NDEBUG
if (DAG->StressSched) {
reverse_sort<SF> RPicker(Picker);
@@ -1783,6 +1820,13 @@ SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
return popFromQueueImpl(Q, Picker);
}
+//===----------------------------------------------------------------------===//
+// RegReductionPriorityQueue Definition
+//===----------------------------------------------------------------------===//
+//
+// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
+// to reduce register pressure.
+//
template<class SF>
class RegReductionPriorityQueue : public RegReductionPQBase {
SF Picker;
@@ -1815,7 +1859,7 @@ public:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump(ScheduleDAG *DAG) const override {
// Emulate pop() without clobbering NodeQueueIds.
- std::vector<SUnit*> DumpQueue = Queue;
+ std::vector<SUnit *> DumpQueue = Queue;
SF DumpPicker = Picker;
while (!DumpQueue.empty()) {
SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
@@ -1826,17 +1870,11 @@ public:
#endif
};
-typedef RegReductionPriorityQueue<bu_ls_rr_sort>
-BURegReductionPriorityQueue;
-
-typedef RegReductionPriorityQueue<src_ls_rr_sort>
-SrcRegReductionPriorityQueue;
+using BURegReductionPriorityQueue = RegReductionPriorityQueue<bu_ls_rr_sort>;
+using SrcRegReductionPriorityQueue = RegReductionPriorityQueue<src_ls_rr_sort>;
+using HybridBURRPriorityQueue = RegReductionPriorityQueue<hybrid_ls_rr_sort>;
+using ILPBURRPriorityQueue = RegReductionPriorityQueue<ilp_ls_rr_sort>;
-typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
-HybridBURRPriorityQueue;
-
-typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
-ILPBURRPriorityQueue;
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -2855,7 +2893,6 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
/// This results in the store being scheduled immediately
/// after N, which shortens the U->N live range, reducing
/// register pressure.
-///
void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
// Visit all the nodes in topological order, working top-down.
for (SUnit &SU : *SUnits) {
@@ -3022,7 +3059,7 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() {
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-llvm::ScheduleDAGSDNodes *
+ScheduleDAGSDNodes *
llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
@@ -3036,7 +3073,7 @@ llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
return SD;
}
-llvm::ScheduleDAGSDNodes *
+ScheduleDAGSDNodes *
llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
@@ -3050,7 +3087,7 @@ llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
return SD;
}
-llvm::ScheduleDAGSDNodes *
+ScheduleDAGSDNodes *
llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
@@ -3066,7 +3103,7 @@ llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
return SD;
}
-llvm::ScheduleDAGSDNodes *
+ScheduleDAGSDNodes *
llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel) {
const TargetSubtargetInfo &STI = IS->MF->getSubtarget();