aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/ScheduleDAGInstrs.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/ScheduleDAGInstrs.cpp')
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp66
1 files changed, 54 insertions, 12 deletions
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index 99406ed1496a..d5ad7e92299d 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -1,9 +1,8 @@
//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -115,7 +114,7 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
RemoveKillFlags(RemoveKillFlags),
UnknownValue(UndefValue::get(
- Type::getVoidTy(mf.getFunction().getContext()))) {
+ Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) {
DbgValues.clear();
const TargetSubtargetInfo &ST = mf.getSubtarget();
@@ -132,7 +131,8 @@ static bool getUnderlyingObjectsForInstr(const MachineInstr *MI,
const DataLayout &DL) {
auto allMMOsOkay = [&]() {
for (const MachineMemOperand *MMO : MI->memoperands()) {
- if (MMO->isVolatile())
+ // TODO: Figure out whether isAtomic is really necessary (see D57601).
+ if (MMO->isVolatile() || MMO->isAtomic())
return false;
if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
@@ -743,6 +743,14 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// done.
Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/);
+ // Track all instructions that may raise floating-point exceptions.
+ // These do not depend on one other (or normal loads or stores), but
+ // must not be rescheduled across global barriers. Note that we don't
+ // really need a "map" here since we don't track those MIs by value;
+ // using the same Value2SUsMap data type here is simply a matter of
+ // convenience.
+ Value2SUsMap FPExceptions;
+
// Remove any stale debug info; sometimes BuildSchedGraph is called again
// without emitting the info from the previous call.
DbgValues.clear();
@@ -870,10 +878,26 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
addBarrierChain(Loads);
addBarrierChain(NonAliasStores);
addBarrierChain(NonAliasLoads);
+ addBarrierChain(FPExceptions);
continue;
}
+ // Instructions that may raise FP exceptions may not be moved
+ // across any global barriers.
+ if (MI.mayRaiseFPException()) {
+ if (BarrierChain)
+ BarrierChain->addPredBarrier(SU);
+
+ FPExceptions.insert(SU, UnknownValue);
+
+ if (FPExceptions.size() >= HugeRegion) {
+ LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";);
+ Value2SUsMap empty;
+ reduceHugeMemNodeMaps(FPExceptions, empty, getReductionSize());
+ }
+ }
+
// If it's not a store or a variant load, we're done.
if (!MI.mayStore() &&
!(MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)))
@@ -968,6 +992,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
Uses.clear();
CurrentVRegDefs.clear();
CurrentVRegUses.clear();
+
+ Topo.MarkDirty();
}
raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) {
@@ -1089,22 +1115,21 @@ void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) {
if (!MI.isBundled()) {
toggleKills(MRI, LiveRegs, MI, true);
} else {
- MachineBasicBlock::instr_iterator First = MI.getIterator();
- if (MI.isBundle()) {
+ MachineBasicBlock::instr_iterator Bundle = MI.getIterator();
+ if (MI.isBundle())
toggleKills(MRI, LiveRegs, MI, false);
- ++First;
- }
+
// Some targets make the (questionable) assumtion that the instructions
// inside the bundle are ordered and consequently only the last use of
// a register inside the bundle can kill it.
- MachineBasicBlock::instr_iterator I = std::next(First);
+ MachineBasicBlock::instr_iterator I = std::next(Bundle);
while (I->isBundledWithSucc())
++I;
do {
if (!I->isDebugInstr())
toggleKills(MRI, LiveRegs, *I, true);
--I;
- } while(I != First);
+ } while (I != Bundle);
}
}
}
@@ -1146,6 +1171,23 @@ std::string ScheduleDAGInstrs::getDAGName() const {
return "dag." + BB->getFullName();
}
+bool ScheduleDAGInstrs::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
+ return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
+}
+
+bool ScheduleDAGInstrs::addEdge(SUnit *SuccSU, const SDep &PredDep) {
+ if (SuccSU != &ExitSU) {
+ // Do not use WillCreateCycle, it assumes SD scheduling.
+ // If Pred is reachable from Succ, then the edge creates a cycle.
+ if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
+ return false;
+ Topo.AddPredQueued(SuccSU, PredDep.getSUnit());
+ }
+ SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
+ // Return true regardless of whether a new edge needed to be inserted.
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// SchedDFSResult Implementation
//===----------------------------------------------------------------------===//