diff options
Diffstat (limited to 'lib/CodeGen/ScheduleDAGInstrs.cpp')
| -rw-r--r-- | lib/CodeGen/ScheduleDAGInstrs.cpp | 374 | 
1 files changed, 283 insertions, 91 deletions
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 7f1f9c4e7be1..0f8b21c1c1f3 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -12,7 +12,6 @@  //  //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "misched"  #include "llvm/CodeGen/ScheduleDAGInstrs.h"  #include "llvm/ADT/MapVector.h"  #include "llvm/ADT/SmallPtrSet.h" @@ -21,6 +20,7 @@  #include "llvm/Analysis/ValueTracking.h"  #include "llvm/CodeGen/LiveIntervalAnalysis.h"  #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h"  #include "llvm/CodeGen/MachineMemOperand.h"  #include "llvm/CodeGen/MachineRegisterInfo.h"  #include "llvm/CodeGen/PseudoSourceValue.h" @@ -40,17 +40,24 @@  using namespace llvm; +#define DEBUG_TYPE "misched" +  static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,      cl::ZeroOrMore, cl::init(false),      cl::desc("Enable use of AA during MI GAD construction")); +static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, +    cl::init(true), cl::desc("Enable use of TBAA during MI GAD construction")); +  ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,                                       const MachineLoopInfo &mli,                                       const MachineDominatorTree &mdt,                                       bool IsPostRAFlag, +                                     bool RemoveKillFlags,                                       LiveIntervals *lis)    : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis), -    IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) { +    IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags), +    CanHandleTerminators(false), FirstDbgValue(nullptr) {    assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");    DbgValues.clear();    assert(!(IsPostRA && MRI.getNumVirtRegs()) && @@ -92,7 +99,7 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {  /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.  static void getUnderlyingObjects(const Value *V,                                   SmallVectorImpl<Value *> &Objects) { -  SmallPtrSet<const Value*, 16> Visited; +  SmallPtrSet<const Value *, 16> Visited;    SmallVector<const Value *, 4> Working(1, V);    do {      V = Working.pop_back_val(); @@ -118,7 +125,8 @@ static void getUnderlyingObjects(const Value *V,    } while (!Working.empty());  } -typedef SmallVector<PointerIntPair<const Value *, 1, bool>, 4> +typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType; +typedef SmallVector<PointerIntPair<ValueType, 1, bool>, 4>  UnderlyingObjectsVector;  /// getUnderlyingObjectsForInstr - If this machine instr has memory reference @@ -128,10 +136,23 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI,                                           const MachineFrameInfo *MFI,                                           UnderlyingObjectsVector &Objects) {    if (!MI->hasOneMemOperand() || -      !(*MI->memoperands_begin())->getValue() || +      (!(*MI->memoperands_begin())->getValue() && +       !(*MI->memoperands_begin())->getPseudoValue()) ||        (*MI->memoperands_begin())->isVolatile())      return; +  if (const PseudoSourceValue *PSV = +      (*MI->memoperands_begin())->getPseudoValue()) { +    // For now, ignore PseudoSourceValues which may alias LLVM IR values +    // because the code that uses this function has no way to cope with +    // such aliases. +    if (!PSV->isAliased(MFI)) { +      bool MayAlias = PSV->mayAlias(MFI); +      Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); +    } +    return; +  } +    const Value *V = (*MI->memoperands_begin())->getValue();    if (!V)      return; @@ -141,26 +162,14 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI,    for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();           I != IE; ++I) { -    bool MayAlias = true;      V = *I; -    if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { -      // For now, ignore PseudoSourceValues which may alias LLVM IR values -      // because the code that uses this function has no way to cope with -      // such aliases. - -      if (PSV->isAliased(MFI)) { -        Objects.clear(); -        return; -      } - -      MayAlias = PSV->mayAlias(MFI); -    } else if (!isIdentifiedObject(V)) { +    if (!isIdentifiedObject(V)) {        Objects.clear();        return;      } -    Objects.push_back(UnderlyingObjectsVector::value_type(V, MayAlias)); +    Objects.push_back(UnderlyingObjectsVector::value_type(V, true));    }  } @@ -170,7 +179,7 @@ void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {  void ScheduleDAGInstrs::finishBlock() {    // Subclasses should no longer refer to the old block. -  BB = 0; +  BB = nullptr;  }  /// Initialize the DAG and common scheduler state for the current scheduling @@ -202,7 +211,7 @@ void ScheduleDAGInstrs::exitRegion() {  /// are too high to be hidden by the branch or when the liveout registers  /// used by instructions in the fallthrough block.  void ScheduleDAGInstrs::addSchedBarrierDeps() { -  MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0; +  MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : nullptr;    ExitSU.setInstr(ExitMI);    bool AllDepKnown = ExitMI &&      (ExitMI->isCall() || ExitMI->isBarrier()); @@ -259,7 +268,7 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {        // Adjust the dependence latency using operand def/use information,        // then allow the target to perform its own adjustments.        int UseOp = I->OpIdx; -      MachineInstr *RegUse = 0; +      MachineInstr *RegUse = nullptr;        SDep Dep;        if (UseOp < 0)          Dep = SDep(SU, SDep::Artificial); @@ -284,8 +293,8 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {  /// this SUnit to following instructions in the same scheduling region that  /// depend the physical register referenced at OperIdx.  void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { -  const MachineInstr *MI = SU->getInstr(); -  const MachineOperand &MO = MI->getOperand(OperIdx); +  MachineInstr *MI = SU->getInstr(); +  MachineOperand &MO = MI->getOperand(OperIdx);    // Optionally add output and anti dependencies. For anti    // dependencies we use a latency of 0 because for a multi-issue @@ -323,6 +332,8 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {      // retrieve the existing SUnits list for this register's uses.      // Push this SUnit on the use list.      Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg())); +    if (RemoveKillFlags) +      MO.setIsKill(false);    }    else {      addPhysRegDataDeps(SU, OperIdx); @@ -468,6 +479,15 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,    if ((*MI->memoperands_begin())->isVolatile() ||         MI->hasUnmodeledSideEffects())      return true; + +  if ((*MI->memoperands_begin())->getPseudoValue()) { +    // Similarly to getUnderlyingObjectForInstr: +    // For now, ignore PseudoSourceValues which may alias LLVM IR values +    // because the code that uses this function has no way to cope with +    // such aliases. +    return true; +  } +    const Value *V = (*MI->memoperands_begin())->getValue();    if (!V)      return true; @@ -476,19 +496,8 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,    getUnderlyingObjects(V, Objs);    for (SmallVectorImpl<Value *>::iterator I = Objs.begin(),           IE = Objs.end(); I != IE; ++I) { -    V = *I; - -    if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { -      // Similarly to getUnderlyingObjectForInstr: -      // For now, ignore PseudoSourceValues which may alias LLVM IR values -      // because the code that uses this function has no way to cope with -      // such aliases. -      if (PSV->isAliased(MFI)) -        return true; -    } -      // Does this pointer refer to a distinct and identifiable object? -    if (!isIdentifiedObject(V)) +    if (!isIdentifiedObject(*I))        return true;    } @@ -507,6 +516,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,    if (MIa == MIb)      return false; +  // FIXME: Need to handle multiple memory operands to support all targets. +  if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) +    return true; +    if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI))      return true; @@ -522,9 +535,8 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,    MachineMemOperand *MMOa = *MIa->memoperands_begin();    MachineMemOperand *MMOb = *MIb->memoperands_begin(); -  // FIXME: Need to handle multiple memory operands to support all targets. -  if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) -    llvm_unreachable("Multiple memory operands."); +  if (!MMOa->getValue() || !MMOb->getValue()) +    return true;    // The following interface to AA is fashioned after DAGCombiner::isAlias    // and operates with MachineMemOperand offset with some important @@ -550,10 +562,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,    int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;    AliasAnalysis::AliasResult AAResult = AA->alias( -  AliasAnalysis::Location(MMOa->getValue(), Overlapa, -                          MMOa->getTBAAInfo()), -  AliasAnalysis::Location(MMOb->getValue(), Overlapb, -                          MMOb->getTBAAInfo())); +      AliasAnalysis::Location(MMOa->getValue(), Overlapa, +                              UseTBAA ? MMOa->getTBAAInfo() : nullptr), +      AliasAnalysis::Location(MMOb->getValue(), Overlapb, +                              UseTBAA ? MMOb->getTBAAInfo() : nullptr));    return (AAResult != AliasAnalysis::NoAlias);  } @@ -687,10 +699,36 @@ void ScheduleDAGInstrs::initSUnits() {      // Assign the Latency field of SU using target-provided information.      SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); + +    // If this SUnit uses a reserved or unbuffered resource, mark it as such. +    // +    // Reserved resources block an instruction from issuing and stall the +    // entire pipeline. These are identified by BufferSize=0. +    // +    // Unbuffered resources prevent execution of subsequent instructions that +    // require the same resources. This is used for in-order execution pipelines +    // within an out-of-order core. These are identified by BufferSize=1. +    if (SchedModel.hasInstrSchedModel()) { +      const MCSchedClassDesc *SC = getSchedClass(SU); +      for (TargetSchedModel::ProcResIter +             PI = SchedModel.getWriteProcResBegin(SC), +             PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { +        switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) { +        case 0: +          SU->hasReservedResource = true; +          break; +        case 1: +          SU->isUnbuffered = true; +          break; +        default: +          break; +        } +      } +    }    }  } -/// If RegPressure is non null, compute register pressure as a side effect. The +/// If RegPressure is non-null, compute register pressure as a side effect. The  /// DAG builder is an efficient place to do it because it already visits  /// operands.  void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, @@ -699,7 +737,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,    const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();    bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI                                                         : ST.useAA(); -  AliasAnalysis *AAForDep = UseAA ? AA : 0; +  AliasAnalysis *AAForDep = UseAA ? AA : nullptr;    MISUnitMap.clear();    ScheduleDAG::clearDAG(); @@ -714,20 +752,20 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,    // to top.    // Remember where a generic side-effecting instruction is as we procede. -  SUnit *BarrierChain = 0, *AliasChain = 0; +  SUnit *BarrierChain = nullptr, *AliasChain = nullptr;    // Memory references to specific known memory locations are tracked    // so that they can be given more precise dependencies. We track    // separately the known memory locations that may alias and those    // that are known not to alias -  MapVector<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; -  MapVector<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; +  MapVector<ValueType, std::vector<SUnit *> > AliasMemDefs, NonAliasMemDefs; +  MapVector<ValueType, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;    std::set<SUnit*> RejectMemNodes;    // Remove any stale debug info; sometimes BuildSchedGraph is called again    // without emitting the info from the previous call.    DbgValues.clear(); -  FirstDbgValue = NULL; +  FirstDbgValue = nullptr;    assert(Defs.empty() && Uses.empty() &&           "Only BuildGraph should update Defs/Uses"); @@ -744,13 +782,13 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,    addSchedBarrierDeps();    // Walk the list of instructions, from bottom moving up. -  MachineInstr *DbgMI = NULL; +  MachineInstr *DbgMI = nullptr;    for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;         MII != MIE; --MII) { -    MachineInstr *MI = prior(MII); +    MachineInstr *MI = std::prev(MII);      if (MI && DbgMI) {        DbgValues.push_back(std::make_pair(DbgMI, MI)); -      DbgMI = NULL; +      DbgMI = nullptr;      }      if (MI->isDebugValue()) { @@ -761,13 +799,15 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,      assert(SU && "No SUnit mapped to this MI");      if (RPTracker) { -      PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0; -      RPTracker->recede(/*LiveUses=*/0, PDiff); -      assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI"); +      PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : nullptr; +      RPTracker->recede(/*LiveUses=*/nullptr, PDiff); +      assert(RPTracker->getPos() == std::prev(MII) && +             "RPTracker can't find MI");      } -    assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) && -           "Cannot schedule terminators or labels!"); +    assert( +        (CanHandleTerminators || (!MI->isTerminator() && !MI->isPosition())) && +        "Cannot schedule terminators or labels!");      // Add register-based dependencies (data, anti, and output).      bool HasVRegDef = false; @@ -815,11 +855,13 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,      if (isGlobalMemoryObject(AA, MI)) {        // Be conservative with these and add dependencies on all memory        // references, even those that are known to not alias. -      for (MapVector<const Value *, SUnit *>::iterator I = +      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =               NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { -        I->second->addPred(SDep(SU, SDep::Barrier)); +        for (unsigned i = 0, e = I->second.size(); i != e; ++i) { +          I->second[i]->addPred(SDep(SU, SDep::Barrier)); +        }        } -      for (MapVector<const Value *, std::vector<SUnit *> >::iterator I = +      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =               NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {          for (unsigned i = 0, e = I->second.size(); i != e; ++i) {            SDep Dep(SU, SDep::Barrier); @@ -853,10 +895,12 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,        for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)          addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,                             TrueMemOrderLatency); -      for (MapVector<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), -           E = AliasMemDefs.end(); I != E; ++I) -        addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes); -      for (MapVector<const Value *, std::vector<SUnit *> >::iterator I = +      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I = +           AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) { +        for (unsigned i = 0, e = I->second.size(); i != e; ++i) +          addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes); +      } +      for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =             AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {          for (unsigned i = 0, e = I->second.size(); i != e; ++i)            addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes, @@ -879,7 +923,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,        bool MayAlias = false;        for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end();             K != KE; ++K) { -        const Value *V = K->getPointer(); +        ValueType V = K->getPointer();          bool ThisMayAlias = K->getInt();          if (ThisMayAlias)            MayAlias = true; @@ -887,24 +931,34 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,          // A store to a specific PseudoSourceValue. Add precise dependencies.          // Record the def in MemDefs, first adding a dep if there is          // an existing def. -        MapVector<const Value *, SUnit *>::iterator I = +        MapVector<ValueType, std::vector<SUnit *> >::iterator I =            ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); -        MapVector<const Value *, SUnit *>::iterator IE = +        MapVector<ValueType, std::vector<SUnit *> >::iterator IE =            ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());          if (I != IE) { -          addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes, -                             0, true); -          I->second = SU; +          for (unsigned i = 0, e = I->second.size(); i != e; ++i) +            addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes, +                               0, true); + +          // If we're not using AA, then we only need one store per object. +          if (!AAForDep) +            I->second.clear(); +          I->second.push_back(SU);          } else { -          if (ThisMayAlias) -            AliasMemDefs[V] = SU; -          else -            NonAliasMemDefs[V] = SU; +          if (ThisMayAlias) { +            if (!AAForDep) +              AliasMemDefs[V].clear(); +            AliasMemDefs[V].push_back(SU); +          } else { +            if (!AAForDep) +              NonAliasMemDefs[V].clear(); +            NonAliasMemDefs[V].push_back(SU); +          }          }          // Handle the uses in MemUses, if there are any. -        MapVector<const Value *, std::vector<SUnit *> >::iterator J = +        MapVector<ValueType, std::vector<SUnit *> >::iterator J =            ((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); -        MapVector<const Value *, std::vector<SUnit *> >::iterator JE = +        MapVector<ValueType, std::vector<SUnit *> >::iterator JE =            ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());          if (J != JE) {            for (unsigned i = 0, e = J->second.size(); i != e; ++i) @@ -933,11 +987,6 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,        // we have lost all RejectMemNodes below barrier.        if (BarrierChain)          BarrierChain->addPred(SDep(SU, SDep::Barrier)); - -      if (!ExitSU.isPred(SU)) -        // Push store's up a bit to avoid them getting in between cmp -        // and branches. -        ExitSU.addPred(SDep(SU, SDep::Artificial));      } else if (MI->mayLoad()) {        bool MayAlias = true;        if (MI->isInvariantLoad(AA)) { @@ -949,9 +998,11 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,          if (Objs.empty()) {            // A load with no underlying object. Depend on all            // potentially aliasing stores. -          for (MapVector<const Value *, SUnit *>::iterator I = +          for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =                   AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) -            addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes); +            for (unsigned i = 0, e = I->second.size(); i != e; ++i) +              addChainDependency(AAForDep, MFI, SU, I->second[i], +                                 RejectMemNodes);            PendingLoads.push_back(SU);            MayAlias = true; @@ -961,20 +1012,21 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,          for (UnderlyingObjectsVector::iterator               J = Objs.begin(), JE = Objs.end(); J != JE; ++J) { -          const Value *V = J->getPointer(); +          ValueType V = J->getPointer();            bool ThisMayAlias = J->getInt();            if (ThisMayAlias)              MayAlias = true;            // A load from a specific PseudoSourceValue. Add precise dependencies. -          MapVector<const Value *, SUnit *>::iterator I = +          MapVector<ValueType, std::vector<SUnit *> >::iterator I =              ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); -          MapVector<const Value *, SUnit *>::iterator IE = +          MapVector<ValueType, std::vector<SUnit *> >::iterator IE =              ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());            if (I != IE) -            addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes, -                               0, true); +            for (unsigned i = 0, e = I->second.size(); i != e; ++i) +              addChainDependency(AAForDep, MFI, SU, I->second[i], +                                 RejectMemNodes, 0, true);            if (ThisMayAlias)              AliasMemUses[V].push_back(SU);            else @@ -999,6 +1051,145 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,    PendingLoads.clear();  } +/// \brief Initialize register live-range state for updating kills. +void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) { +  // Start with no live registers. +  LiveRegs.reset(); + +  // Examine the live-in regs of all successors. +  for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), +       SE = BB->succ_end(); SI != SE; ++SI) { +    for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), +         E = (*SI)->livein_end(); I != E; ++I) { +      unsigned Reg = *I; +      // Repeat, for reg and all subregs. +      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); +           SubRegs.isValid(); ++SubRegs) +        LiveRegs.set(*SubRegs); +    } +  } +} + +bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) { +  // Setting kill flag... +  if (!MO.isKill()) { +    MO.setIsKill(true); +    return false; +  } + +  // If MO itself is live, clear the kill flag... +  if (LiveRegs.test(MO.getReg())) { +    MO.setIsKill(false); +    return false; +  } + +  // If any subreg of MO is live, then create an imp-def for that +  // subreg and keep MO marked as killed. +  MO.setIsKill(false); +  bool AllDead = true; +  const unsigned SuperReg = MO.getReg(); +  MachineInstrBuilder MIB(MF, MI); +  for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { +    if (LiveRegs.test(*SubRegs)) { +      MIB.addReg(*SubRegs, RegState::ImplicitDefine); +      AllDead = false; +    } +  } + +  if(AllDead) +    MO.setIsKill(true); +  return false; +} + +// FIXME: Reuse the LivePhysRegs utility for this. +void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) { +  DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); + +  LiveRegs.resize(TRI->getNumRegs()); +  BitVector killedRegs(TRI->getNumRegs()); + +  startBlockForKills(MBB); + +  // Examine block from end to start... +  unsigned Count = MBB->size(); +  for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); +       I != E; --Count) { +    MachineInstr *MI = --I; +    if (MI->isDebugValue()) +      continue; + +    // Update liveness.  Registers that are defed but not used in this +    // instruction are now dead. Mark register and all subregs as they +    // are completely defined. +    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { +      MachineOperand &MO = MI->getOperand(i); +      if (MO.isRegMask()) +        LiveRegs.clearBitsNotInMask(MO.getRegMask()); +      if (!MO.isReg()) continue; +      unsigned Reg = MO.getReg(); +      if (Reg == 0) continue; +      if (!MO.isDef()) continue; +      // Ignore two-addr defs. +      if (MI->isRegTiedToUseOperand(i)) continue; + +      // Repeat for reg and all subregs. +      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); +           SubRegs.isValid(); ++SubRegs) +        LiveRegs.reset(*SubRegs); +    } + +    // Examine all used registers and set/clear kill flag. When a +    // register is used multiple times we only set the kill flag on +    // the first use. Don't set kill flags on undef operands. +    killedRegs.reset(); +    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { +      MachineOperand &MO = MI->getOperand(i); +      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; +      unsigned Reg = MO.getReg(); +      if ((Reg == 0) || MRI.isReserved(Reg)) continue; + +      bool kill = false; +      if (!killedRegs.test(Reg)) { +        kill = true; +        // A register is not killed if any subregs are live... +        for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { +          if (LiveRegs.test(*SubRegs)) { +            kill = false; +            break; +          } +        } + +        // If subreg is not live, then register is killed if it became +        // live in this instruction +        if (kill) +          kill = !LiveRegs.test(Reg); +      } + +      if (MO.isKill() != kill) { +        DEBUG(dbgs() << "Fixing " << MO << " in "); +        // Warning: toggleKillFlag may invalidate MO. +        toggleKillFlag(MI, MO); +        DEBUG(MI->dump()); +      } + +      killedRegs.set(Reg); +    } + +    // Mark any used register (that is not using undef) and subregs as +    // now live... +    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { +      MachineOperand &MO = MI->getOperand(i); +      if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; +      unsigned Reg = MO.getReg(); +      if ((Reg == 0) || MRI.isReserved(Reg)) continue; + +      for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); +           SubRegs.isValid(); ++SubRegs) +        LiveRegs.set(*SubRegs); +    } +  } +} +  void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {  #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)    SU->getInstr()->dump(); @@ -1234,7 +1425,7 @@ public:    const SDep *backtrack() {      DFSStack.pop_back(); -    return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second); +    return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second);    }    const SUnit *getCurr() const { return DFSStack.back().first; } @@ -1317,7 +1508,7 @@ void SchedDFSResult::scheduleTree(unsigned SubtreeID) {    }  } -#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD  void ILPValue::print(raw_ostream &OS) const {    OS << InstrCount << " / " << Length << " = ";    if (!Length) @@ -1326,16 +1517,17 @@ void ILPValue::print(raw_ostream &OS) const {      OS << format("%g", ((double)InstrCount / Length));  } +LLVM_DUMP_METHOD  void ILPValue::dump() const {    dbgs() << *this << '\n';  }  namespace llvm { +LLVM_DUMP_METHOD  raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {    Val.print(OS);    return OS;  }  } // namespace llvm -#endif // !NDEBUG || LLVM_ENABLE_DUMP  | 
