summaryrefslogtreecommitdiff
path: root/lib/CodeGen/RegAllocFast.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/RegAllocFast.cpp')
-rw-r--r--lib/CodeGen/RegAllocFast.cpp767
1 files changed, 391 insertions, 376 deletions
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 7b57c6cbcdb83..eb3a4e481f5d3 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -54,7 +54,7 @@ using namespace llvm;
STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumCopies, "Number of copies coalesced");
+STATISTIC(NumCoalesced, "Number of copies coalesced");
static RegisterRegAlloc
fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
@@ -88,7 +88,7 @@ namespace {
unsigned short LastOpNum = 0; ///< OpNum on LastUse.
bool Dirty = false; ///< Register needs spill.
- explicit LiveReg(unsigned v) : VirtReg(v) {}
+ explicit LiveReg(unsigned VirtReg) : VirtReg(VirtReg) {}
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
@@ -96,14 +96,13 @@ namespace {
};
using LiveRegMap = SparseSet<LiveReg>;
-
/// This map contains entries for each virtual register that is currently
/// available in a physical register.
LiveRegMap LiveVirtRegs;
- DenseMap<unsigned, SmallVector<MachineInstr *, 4>> LiveDbgValueMap;
+ DenseMap<unsigned, SmallVector<MachineInstr *, 2>> LiveDbgValueMap;
- /// Track the state of a physical register.
+ /// State of a physical register.
enum RegState {
/// A disabled register is not available for allocation, but an alias may
/// be in use. A register can only be moved out of the disabled state if
@@ -123,18 +122,18 @@ namespace {
/// register. In that case, LiveVirtRegs contains the inverse mapping.
};
- /// One of the RegState enums, or a virtreg.
+ /// Maps each physical register to a RegState enum or a virtual register.
std::vector<unsigned> PhysRegState;
SmallVector<unsigned, 16> VirtDead;
SmallVector<MachineInstr *, 32> Coalesced;
- /// Set of register units.
- using UsedInInstrSet = SparseSet<unsigned>;
-
+ using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
/// Set of register units that are used in the current instruction, and so
/// cannot be allocated.
- UsedInInstrSet UsedInInstr;
+ RegUnitSet UsedInInstr;
+
+ void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
/// Mark a physreg as used in this instruction.
void markRegUsedInInstr(MCPhysReg PhysReg) {
@@ -150,12 +149,8 @@ namespace {
return false;
}
- /// This flag is set when LiveRegMap will be cleared completely after
- /// spilling all live registers. LiveRegMap entries should not be erased.
- bool isBulkSpilling = false;
-
enum : unsigned {
- spillClean = 1,
+ spillClean = 50,
spillDirty = 100,
spillImpossible = ~0u
};
@@ -180,16 +175,18 @@ namespace {
private:
bool runOnMachineFunction(MachineFunction &MF) override;
+
void allocateBasicBlock(MachineBasicBlock &MBB);
+ void allocateInstruction(MachineInstr &MI);
+ void handleDebugValue(MachineInstr &MI);
void handleThroughOperands(MachineInstr &MI,
SmallVectorImpl<unsigned> &VirtDead);
- int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass &RC);
bool isLastUseOfLocalReg(const MachineOperand &MO) const;
void addKillFlag(const LiveReg &LRI);
- void killVirtReg(LiveRegMap::iterator LRI);
+ void killVirtReg(LiveReg &LR);
void killVirtReg(unsigned VirtReg);
- void spillVirtReg(MachineBasicBlock::iterator MI, LiveRegMap::iterator);
+ void spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR);
void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg);
void usePhysReg(MachineOperand &MO);
@@ -206,15 +203,19 @@ namespace {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
}
- LiveRegMap::iterator assignVirtToPhysReg(unsigned VirtReg, MCPhysReg PhysReg);
- LiveRegMap::iterator allocVirtReg(MachineInstr &MI, LiveRegMap::iterator,
- unsigned Hint);
- LiveRegMap::iterator defineVirtReg(MachineInstr &MI, unsigned OpNum,
- unsigned VirtReg, unsigned Hint);
- LiveRegMap::iterator reloadVirtReg(MachineInstr &MI, unsigned OpNum,
- unsigned VirtReg, unsigned Hint);
+ void allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint);
+ MCPhysReg defineVirtReg(MachineInstr &MI, unsigned OpNum, unsigned VirtReg,
+ unsigned Hint);
+ LiveReg &reloadVirtReg(MachineInstr &MI, unsigned OpNum, unsigned VirtReg,
+ unsigned Hint);
void spillAll(MachineBasicBlock::iterator MI);
- bool setPhysReg(MachineInstr &MI, unsigned OpNum, MCPhysReg PhysReg);
+ bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
+
+ int getStackSpaceFor(unsigned VirtReg);
+ void spill(MachineBasicBlock::iterator Before, unsigned VirtReg,
+ MCPhysReg AssignedReg, bool Kill);
+ void reload(MachineBasicBlock::iterator Before, unsigned VirtReg,
+ MCPhysReg PhysReg);
void dumpState();
};
@@ -226,10 +227,13 @@ char RegAllocFast::ID = 0;
INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
false)
+void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
+ PhysRegState[PhysReg] = NewState;
+}
+
/// This allocates space for the specified virtual register to be held on the
/// stack.
-int RegAllocFast::getStackSpaceFor(unsigned VirtReg,
- const TargetRegisterClass &RC) {
+int RegAllocFast::getStackSpaceFor(unsigned VirtReg) {
// Find the location Reg would belong...
int SS = StackSlotForVirtReg[VirtReg];
// Already has space allocated?
@@ -237,6 +241,7 @@ int RegAllocFast::getStackSpaceFor(unsigned VirtReg,
return SS;
// Allocate a new stack object for this spill location...
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
unsigned Size = TRI->getSpillSize(RC);
unsigned Align = TRI->getSpillAlignment(RC);
int FrameIdx = MFI->CreateSpillStackObject(Size, Align);
@@ -246,6 +251,46 @@ int RegAllocFast::getStackSpaceFor(unsigned VirtReg,
return FrameIdx;
}
+/// Insert spill instruction for \p AssignedReg before \p Before. Update
+/// DBG_VALUEs with \p VirtReg operands with the stack slot.
+void RegAllocFast::spill(MachineBasicBlock::iterator Before, unsigned VirtReg,
+ MCPhysReg AssignedReg, bool Kill) {
+ LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI)
+ << " in " << printReg(AssignedReg, TRI));
+ int FI = getStackSpaceFor(VirtReg);
+ LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n');
+
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI);
+ ++NumStores;
+
+ // If this register is used by DBG_VALUE then insert new DBG_VALUE to
+ // identify spilled location as the place to find corresponding variable's
+ // value.
+ SmallVectorImpl<MachineInstr *> &LRIDbgValues = LiveDbgValueMap[VirtReg];
+ for (MachineInstr *DBG : LRIDbgValues) {
+ MachineInstr *NewDV = buildDbgValueForSpill(*MBB, Before, *DBG, FI);
+ assert(NewDV->getParent() == MBB && "dangling parent pointer");
+ (void)NewDV;
+ LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
+ }
+ // Now this register is spilled there is should not be any DBG_VALUE
+ // pointing to this register because they are all pointing to spilled value
+ // now.
+ LRIDbgValues.clear();
+}
+
+/// Insert reload instruction for \p PhysReg before \p Before.
+void RegAllocFast::reload(MachineBasicBlock::iterator Before, unsigned VirtReg,
+ MCPhysReg PhysReg) {
+ LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
+ << printReg(PhysReg, TRI) << '\n');
+ int FI = getStackSpaceFor(VirtReg);
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI);
+ ++NumLoads;
+}
+
/// Return true if MO is the only remaining reference to its virtual register,
/// and it is guaranteed to be a block-local register.
bool RegAllocFast::isLastUseOfLocalReg(const MachineOperand &MO) const {
@@ -281,14 +326,12 @@ void RegAllocFast::addKillFlag(const LiveReg &LR) {
}
/// Mark virtreg as no longer available.
-void RegAllocFast::killVirtReg(LiveRegMap::iterator LRI) {
- addKillFlag(*LRI);
- assert(PhysRegState[LRI->PhysReg] == LRI->VirtReg &&
+void RegAllocFast::killVirtReg(LiveReg &LR) {
+ addKillFlag(LR);
+ assert(PhysRegState[LR.PhysReg] == LR.VirtReg &&
"Broken RegState mapping");
- PhysRegState[LRI->PhysReg] = regFree;
- // Erase from LiveVirtRegs unless we're spilling in bulk.
- if (!isBulkSpilling)
- LiveVirtRegs.erase(LRI);
+ setPhysRegState(LR.PhysReg, regFree);
+ LR.PhysReg = 0;
}
/// Mark virtreg as no longer available.
@@ -296,8 +339,8 @@ void RegAllocFast::killVirtReg(unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"killVirtReg needs a virtual register");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- if (LRI != LiveVirtRegs.end())
- killVirtReg(LRI);
+ if (LRI != LiveVirtRegs.end() && LRI->PhysReg)
+ killVirtReg(*LRI);
}
/// This method spills the value specified by VirtReg into the corresponding
@@ -307,63 +350,41 @@ void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Spilling a physical register is illegal!");
LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register");
- spillVirtReg(MI, LRI);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Spilling unmapped virtual register");
+ spillVirtReg(MI, *LRI);
}
/// Do the actual work of spilling.
-void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
- LiveRegMap::iterator LRI) {
- LiveReg &LR = *LRI;
- assert(PhysRegState[LR.PhysReg] == LRI->VirtReg && "Broken RegState mapping");
+void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR) {
+ assert(PhysRegState[LR.PhysReg] == LR.VirtReg && "Broken RegState mapping");
if (LR.Dirty) {
// If this physreg is used by the instruction, we want to kill it on the
// instruction, not on the spill.
bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI;
LR.Dirty = false;
- LLVM_DEBUG(dbgs() << "Spilling " << printReg(LRI->VirtReg, TRI) << " in "
- << printReg(LR.PhysReg, TRI));
- const TargetRegisterClass &RC = *MRI->getRegClass(LRI->VirtReg);
- int FI = getStackSpaceFor(LRI->VirtReg, RC);
- LLVM_DEBUG(dbgs() << " to stack slot #" << FI << "\n");
- TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, &RC, TRI);
- ++NumStores; // Update statistics
-
- // If this register is used by DBG_VALUE then insert new DBG_VALUE to
- // identify spilled location as the place to find corresponding variable's
- // value.
- SmallVectorImpl<MachineInstr *> &LRIDbgValues =
- LiveDbgValueMap[LRI->VirtReg];
- for (MachineInstr *DBG : LRIDbgValues) {
- MachineInstr *NewDV = buildDbgValueForSpill(*MBB, MI, *DBG, FI);
- assert(NewDV->getParent() == MBB && "dangling parent pointer");
- (void)NewDV;
- LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:"
- << "\n"
- << *NewDV);
- }
- // Now this register is spilled there is should not be any DBG_VALUE
- // pointing to this register because they are all pointing to spilled value
- // now.
- LRIDbgValues.clear();
+
+ spill(MI, LR.VirtReg, LR.PhysReg, SpillKill);
+
if (SpillKill)
LR.LastUse = nullptr; // Don't kill register again
}
- killVirtReg(LRI);
+ killVirtReg(LR);
}
/// Spill all dirty virtregs without killing them.
void RegAllocFast::spillAll(MachineBasicBlock::iterator MI) {
- if (LiveVirtRegs.empty()) return;
- isBulkSpilling = true;
+ if (LiveVirtRegs.empty())
+ return;
// The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
// of spilling here is deterministic, if arbitrary.
- for (LiveRegMap::iterator I = LiveVirtRegs.begin(), E = LiveVirtRegs.end();
- I != E; ++I)
- spillVirtReg(MI, I);
+ for (LiveReg &LR : LiveVirtRegs) {
+ if (!LR.PhysReg)
+ continue;
+ spillVirtReg(MI, LR);
+ }
LiveVirtRegs.clear();
- isBulkSpilling = false;
}
/// Handle the direct use of a physical register. Check that the register is
@@ -417,12 +438,12 @@ void RegAllocFast::usePhysReg(MachineOperand &MO) {
case regFree:
if (TRI->isSuperRegister(PhysReg, Alias)) {
// Leave the superregister in the working set.
- PhysRegState[Alias] = regFree;
+ setPhysRegState(Alias, regFree);
MO.getParent()->addRegisterKilled(Alias, TRI, true);
return;
}
// Some other alias was in the working set - clear it.
- PhysRegState[Alias] = regDisabled;
+ setPhysRegState(Alias, regDisabled);
break;
default:
llvm_unreachable("Instruction uses an alias of an allocated register");
@@ -430,7 +451,7 @@ void RegAllocFast::usePhysReg(MachineOperand &MO) {
}
// All aliases are disabled, bring register into working set.
- PhysRegState[PhysReg] = regFree;
+ setPhysRegState(PhysReg, regFree);
MO.setIsKill();
}
@@ -448,12 +469,12 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
LLVM_FALLTHROUGH;
case regFree:
case regReserved:
- PhysRegState[PhysReg] = NewState;
+ setPhysRegState(PhysReg, NewState);
return;
}
// This is a disabled register, disable all aliases.
- PhysRegState[PhysReg] = NewState;
+ setPhysRegState(PhysReg, NewState);
for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
MCPhysReg Alias = *AI;
switch (unsigned VirtReg = PhysRegState[Alias]) {
@@ -464,7 +485,7 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
LLVM_FALLTHROUGH;
case regFree:
case regReserved:
- PhysRegState[Alias] = regDisabled;
+ setPhysRegState(Alias, regDisabled);
if (TRI->isSuperRegister(PhysReg, Alias))
return;
break;
@@ -472,9 +493,9 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
}
}
-/// Return the cost of spilling clearing out PhysReg and aliases so it is
-/// free for allocation. Returns 0 when PhysReg is free or disabled with all
-/// aliases disabled - it can be allocated directly.
+/// Return the cost of spilling clearing out PhysReg and aliases so it is free
+/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases
+/// disabled - it can be allocated directly.
/// \returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
if (isRegUsedInInstr(PhysReg)) {
@@ -492,9 +513,10 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
<< printReg(PhysReg, TRI) << " is reserved already.\n");
return spillImpossible;
default: {
- LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
- assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
- return I->Dirty ? spillDirty : spillClean;
+ LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Missing VirtReg entry");
+ return LRI->Dirty ? spillDirty : spillClean;
}
}
@@ -512,9 +534,10 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
case regReserved:
return spillImpossible;
default: {
- LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
- assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
- Cost += I->Dirty ? spillDirty : spillClean;
+ LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Missing VirtReg entry");
+ Cost += LRI->Dirty ? spillDirty : spillClean;
break;
}
}
@@ -526,31 +549,27 @@ unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
/// proper container for VirtReg now. The physical register must not be used
/// for anything else when this is called.
void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) {
- LLVM_DEBUG(dbgs() << "Assigning " << printReg(LR.VirtReg, TRI) << " to "
- << printReg(PhysReg, TRI) << "\n");
- PhysRegState[PhysReg] = LR.VirtReg;
- assert(!LR.PhysReg && "Already assigned a physreg");
+ unsigned VirtReg = LR.VirtReg;
+ LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
+ << printReg(PhysReg, TRI) << '\n');
+ assert(LR.PhysReg == 0 && "Already assigned a physreg");
+ assert(PhysReg != 0 && "Trying to assign no register");
LR.PhysReg = PhysReg;
-}
-
-RegAllocFast::LiveRegMap::iterator
-RegAllocFast::assignVirtToPhysReg(unsigned VirtReg, MCPhysReg PhysReg) {
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && "VirtReg disappeared");
- assignVirtToPhysReg(*LRI, PhysReg);
- return LRI;
+ setPhysRegState(PhysReg, VirtReg);
}
/// Allocates a physical register for VirtReg.
-RegAllocFast::LiveRegMap::iterator RegAllocFast::allocVirtReg(MachineInstr &MI,
- LiveRegMap::iterator LRI, unsigned Hint) {
- const unsigned VirtReg = LRI->VirtReg;
+void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, unsigned Hint) {
+ const unsigned VirtReg = LR.VirtReg;
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Can only allocate virtual registers");
- // Take hint when possible.
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
+ << " in class " << TRI->getRegClassName(&RC) << '\n');
+
+ // Take hint when possible.
if (TargetRegisterInfo::isPhysicalRegister(Hint) &&
MRI->isAllocatable(Hint) && RC.contains(Hint)) {
// Ignore the hint if we would have to spill a dirty register.
@@ -558,67 +577,62 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::allocVirtReg(MachineInstr &MI,
if (Cost < spillDirty) {
if (Cost)
definePhysReg(MI, Hint, regFree);
- // definePhysReg may kill virtual registers and modify LiveVirtRegs.
- // That invalidates LRI, so run a new lookup for VirtReg.
- return assignVirtToPhysReg(VirtReg, Hint);
+ assignVirtToPhysReg(LR, Hint);
+ return;
}
}
// First try to find a completely free register.
- ArrayRef<MCPhysReg> AO = RegClassInfo.getOrder(&RC);
- for (MCPhysReg PhysReg : AO) {
+ ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
+ for (MCPhysReg PhysReg : AllocationOrder) {
if (PhysRegState[PhysReg] == regFree && !isRegUsedInInstr(PhysReg)) {
- assignVirtToPhysReg(*LRI, PhysReg);
- return LRI;
+ assignVirtToPhysReg(LR, PhysReg);
+ return;
}
}
- LLVM_DEBUG(dbgs() << "Allocating " << printReg(VirtReg) << " from "
- << TRI->getRegClassName(&RC) << "\n");
-
- unsigned BestReg = 0;
+ MCPhysReg BestReg = 0;
unsigned BestCost = spillImpossible;
- for (MCPhysReg PhysReg : AO) {
+ for (MCPhysReg PhysReg : AllocationOrder) {
+ LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
unsigned Cost = calcSpillCost(PhysReg);
- LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << "\n");
- LLVM_DEBUG(dbgs() << "\tCost: " << Cost << "\n");
- LLVM_DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
- // Cost is 0 when all aliases are already disabled.
+ LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
+ // Immediate take a register with cost 0.
if (Cost == 0) {
- assignVirtToPhysReg(*LRI, PhysReg);
- return LRI;
+ assignVirtToPhysReg(LR, PhysReg);
+ return;
+ }
+ if (Cost < BestCost) {
+ BestReg = PhysReg;
+ BestCost = Cost;
}
- if (Cost < BestCost)
- BestReg = PhysReg, BestCost = Cost;
}
- if (BestReg) {
- definePhysReg(MI, BestReg, regFree);
- // definePhysReg may kill virtual registers and modify LiveVirtRegs.
- // That invalidates LRI, so run a new lookup for VirtReg.
- return assignVirtToPhysReg(VirtReg, BestReg);
+ if (!BestReg) {
+ // Nothing we can do: Report an error and keep going with an invalid
+ // allocation.
+ if (MI.isInlineAsm())
+ MI.emitError("inline assembly requires more registers than available");
+ else
+ MI.emitError("ran out of registers during register allocation");
+ definePhysReg(MI, *AllocationOrder.begin(), regFree);
+ assignVirtToPhysReg(LR, *AllocationOrder.begin());
+ return;
}
- // Nothing we can do. Report an error and keep going with a bad allocation.
- if (MI.isInlineAsm())
- MI.emitError("inline assembly requires more registers than available");
- else
- MI.emitError("ran out of registers during register allocation");
- definePhysReg(MI, *AO.begin(), regFree);
- return assignVirtToPhysReg(VirtReg, *AO.begin());
+ definePhysReg(MI, BestReg, regFree);
+ assignVirtToPhysReg(LR, BestReg);
}
/// Allocates a register for VirtReg and mark it as dirty.
-RegAllocFast::LiveRegMap::iterator RegAllocFast::defineVirtReg(MachineInstr &MI,
- unsigned OpNum,
- unsigned VirtReg,
- unsigned Hint) {
+MCPhysReg RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
+ unsigned VirtReg, unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
- if (New) {
+ if (!LRI->PhysReg) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
MRI->hasOneNonDBGUse(VirtReg)) {
@@ -627,7 +641,7 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::defineVirtReg(MachineInstr &MI,
if (UseMI.isCopyLike())
Hint = UseMI.getOperand(0).getReg();
}
- LRI = allocVirtReg(MI, LRI, Hint);
+ allocVirtReg(MI, *LRI, Hint);
} else if (LRI->LastUse) {
// Redefining a live register - kill at the last use, unless it is this
// instruction defining VirtReg multiple times.
@@ -639,40 +653,35 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::defineVirtReg(MachineInstr &MI,
LRI->LastOpNum = OpNum;
LRI->Dirty = true;
markRegUsedInInstr(LRI->PhysReg);
- return LRI;
+ return LRI->PhysReg;
}
/// Make sure VirtReg is available in a physreg and return it.
-RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI,
- unsigned OpNum,
- unsigned VirtReg,
- unsigned Hint) {
+RegAllocFast::LiveReg &RegAllocFast::reloadVirtReg(MachineInstr &MI,
+ unsigned OpNum,
+ unsigned VirtReg,
+ unsigned Hint) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
MachineOperand &MO = MI.getOperand(OpNum);
- if (New) {
- LRI = allocVirtReg(MI, LRI, Hint);
- const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
- LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into "
- << printReg(LRI->PhysReg, TRI) << "\n");
- TII->loadRegFromStackSlot(*MBB, MI, LRI->PhysReg, FrameIndex, &RC, TRI);
- ++NumLoads;
+ if (!LRI->PhysReg) {
+ allocVirtReg(MI, *LRI, Hint);
+ reload(MI, VirtReg, LRI->PhysReg);
} else if (LRI->Dirty) {
if (isLastUseOfLocalReg(MO)) {
- LLVM_DEBUG(dbgs() << "Killing last use: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Killing last use: " << MO << '\n');
if (MO.isUse())
MO.setIsKill();
else
MO.setIsDead();
} else if (MO.isKill()) {
- LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << '\n');
MO.setIsKill(false);
} else if (MO.isDead()) {
- LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << '\n');
MO.setIsDead(false);
}
} else if (MO.isKill()) {
@@ -680,25 +689,24 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI,
// register would be killed immediately, and there might be a second use:
// %foo = OR killed %x, %x
// This would cause a second reload of %x into a different register.
- LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << '\n');
MO.setIsKill(false);
} else if (MO.isDead()) {
- LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << '\n');
MO.setIsDead(false);
}
assert(LRI->PhysReg && "Register not assigned");
LRI->LastUse = &MI;
LRI->LastOpNum = OpNum;
markRegUsedInInstr(LRI->PhysReg);
- return LRI;
+ return *LRI;
}
/// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This
/// may invalidate any operand pointers. Return true if the operand kills its
/// register.
-bool RegAllocFast::setPhysReg(MachineInstr &MI, unsigned OpNum,
+bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
MCPhysReg PhysReg) {
- MachineOperand &MO = MI.getOperand(OpNum);
bool Dead = MO.isDead();
if (!MO.getSubReg()) {
MO.setReg(PhysReg);
@@ -761,7 +769,7 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
SmallVector<unsigned, 8> PartialDefs;
LLVM_DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
+ MachineOperand &MO = MI.getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
@@ -770,17 +778,17 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
<< ") is tied to operand " << MI.findTiedOperandIdx(I)
<< ".\n");
- LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0);
- MCPhysReg PhysReg = LRI->PhysReg;
- setPhysReg(MI, I, PhysReg);
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
+ MCPhysReg PhysReg = LR.PhysReg;
+ setPhysReg(MI, MO, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
} else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) {
- LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
+ LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << '\n');
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
- LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0);
- PartialDefs.push_back(LRI->PhysReg);
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
+ PartialDefs.push_back(LR.PhysReg);
}
}
@@ -793,9 +801,8 @@ void RegAllocFast::handleThroughOperands(MachineInstr &MI,
if (!MO.isEarlyClobber())
continue;
// Note: defineVirtReg may invalidate MO.
- LiveRegMap::iterator LRI = defineVirtReg(MI, I, Reg, 0);
- MCPhysReg PhysReg = LRI->PhysReg;
- if (setPhysReg(MI, I, PhysReg))
+ MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, 0);
+ if (setPhysReg(MI, MI.getOperand(I), PhysReg))
VirtDead.push_back(Reg);
}
@@ -828,11 +835,12 @@ void RegAllocFast::dumpState() {
break;
default: {
dbgs() << '=' << printReg(PhysRegState[Reg]);
- LiveRegMap::iterator I = findLiveVirtReg(PhysRegState[Reg]);
- assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
- if (I->Dirty)
+ LiveRegMap::iterator LRI = findLiveVirtReg(PhysRegState[Reg]);
+ assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
+ "Missing VirtReg entry");
+ if (LRI->Dirty)
dbgs() << "*";
- assert(I->PhysReg == Reg && "Bad inverse map");
+ assert(LRI->PhysReg == Reg && "Bad inverse map");
break;
}
}
@@ -841,6 +849,8 @@ void RegAllocFast::dumpState() {
// Check that LiveVirtRegs is the inverse.
for (LiveRegMap::iterator i = LiveVirtRegs.begin(),
e = LiveVirtRegs.end(); i != e; ++i) {
+ if (!i->PhysReg)
+ continue;
assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) &&
"Bad map key");
assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) &&
@@ -850,6 +860,199 @@ void RegAllocFast::dumpState() {
}
#endif
+void RegAllocFast::allocateInstruction(MachineInstr &MI) {
+ const MCInstrDesc &MCID = MI.getDesc();
+
+ // If this is a copy, we may be able to coalesce.
+ unsigned CopySrcReg = 0;
+ unsigned CopyDstReg = 0;
+ unsigned CopySrcSub = 0;
+ unsigned CopyDstSub = 0;
+ if (MI.isCopy()) {
+ CopyDstReg = MI.getOperand(0).getReg();
+ CopySrcReg = MI.getOperand(1).getReg();
+ CopyDstSub = MI.getOperand(0).getSubReg();
+ CopySrcSub = MI.getOperand(1).getSubReg();
+ }
+
+ // Track registers used by instruction.
+ UsedInInstr.clear();
+
+ // First scan.
+ // Mark physreg uses and early clobbers as used.
+ // Find the end of the virtreg operands
+ unsigned VirtOpEnd = 0;
+ bool hasTiedOps = false;
+ bool hasEarlyClobbers = false;
+ bool hasPartialRedefs = false;
+ bool hasPhysDefs = false;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ // Make sure MRI knows about registers clobbered by regmasks.
+ if (MO.isRegMask()) {
+ MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+ continue;
+ }
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg) continue;
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ VirtOpEnd = i+1;
+ if (MO.isUse()) {
+ hasTiedOps = hasTiedOps ||
+ MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
+ } else {
+ if (MO.isEarlyClobber())
+ hasEarlyClobbers = true;
+ if (MO.getSubReg() && MI.readsVirtualRegister(Reg))
+ hasPartialRedefs = true;
+ }
+ continue;
+ }
+ if (!MRI->isAllocatable(Reg)) continue;
+ if (MO.isUse()) {
+ usePhysReg(MO);
+ } else if (MO.isEarlyClobber()) {
+ definePhysReg(MI, Reg,
+ (MO.isImplicit() || MO.isDead()) ? regFree : regReserved);
+ hasEarlyClobbers = true;
+ } else
+ hasPhysDefs = true;
+ }
+
+ // The instruction may have virtual register operands that must be allocated
+ // the same register at use-time and def-time: early clobbers and tied
+ // operands. If there are also physical defs, these registers must avoid
+ // both physical defs and uses, making them more constrained than normal
+ // operands.
+ // Similarly, if there are multiple defs and tied operands, we must make
+ // sure the same register is allocated to uses and defs.
+ // We didn't detect inline asm tied operands above, so just make this extra
+ // pass for all inline asm.
+ if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
+ (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
+ handleThroughOperands(MI, VirtDead);
+ // Don't attempt coalescing when we have funny stuff going on.
+ CopyDstReg = 0;
+ // Pretend we have early clobbers so the use operands get marked below.
+ // This is not necessary for the common case of a single tied use.
+ hasEarlyClobbers = true;
+ }
+
+ // Second scan.
+ // Allocate virtreg uses.
+ for (unsigned I = 0; I != VirtOpEnd; ++I) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
+ if (MO.isUse()) {
+ LiveReg &LR = reloadVirtReg(MI, I, Reg, CopyDstReg);
+ MCPhysReg PhysReg = LR.PhysReg;
+ CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0;
+ if (setPhysReg(MI, MO, PhysReg))
+ killVirtReg(LR);
+ }
+ }
+
+ // Track registers defined by instruction - early clobbers and tied uses at
+ // this point.
+ UsedInInstr.clear();
+ if (hasEarlyClobbers) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ // Look for physreg defs and tied uses.
+ if (!MO.isDef() && !MO.isTied()) continue;
+ markRegUsedInInstr(Reg);
+ }
+ }
+
+ unsigned DefOpEnd = MI.getNumOperands();
+ if (MI.isCall()) {
+ // Spill all virtregs before a call. This serves one purpose: If an
+ // exception is thrown, the landing pad is going to expect to find
+ // registers in their spill slots.
+ // Note: although this is appealing to just consider all definitions
+ // as call-clobbered, this is not correct because some of those
+ // definitions may be used later on and we do not want to reuse
+ // those for virtual registers in between.
+ LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n");
+ spillAll(MI);
+ }
+
+ // Third scan.
+ // Allocate defs and collect dead defs.
+ for (unsigned I = 0; I != DefOpEnd; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
+ unsigned Reg = MO.getReg();
+
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (!MRI->isAllocatable(Reg)) continue;
+ definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
+ continue;
+ }
+ MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, CopySrcReg);
+ if (setPhysReg(MI, MI.getOperand(I), PhysReg)) {
+ VirtDead.push_back(Reg);
+ CopyDstReg = 0; // cancel coalescing;
+ } else
+ CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0;
+ }
+
+ // Kill dead defs after the scan to ensure that multiple defs of the same
+ // register are allocated identically. We didn't need to do this for uses
+ // because we are crerating our own kill flags, and they are always at the
+ // last use.
+ for (unsigned VirtReg : VirtDead)
+ killVirtReg(VirtReg);
+ VirtDead.clear();
+
+ LLVM_DEBUG(dbgs() << "<< " << MI);
+ if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) {
+ LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
+ Coalesced.push_back(&MI);
+ }
+}
+
+void RegAllocFast::handleDebugValue(MachineInstr &MI) {
+ MachineOperand &MO = MI.getOperand(0);
+
+ // Ignore DBG_VALUEs that aren't based on virtual registers. These are
+ // mostly constants and frame indices.
+ if (!MO.isReg())
+ return;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return;
+
+ // See if this virtual register has already been allocated to a physical
+ // register or spilled to a stack slot.
+ LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
+ if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
+ setPhysReg(MI, MO, LRI->PhysReg);
+ } else {
+ int SS = StackSlotForVirtReg[Reg];
+ if (SS != -1) {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ updateDbgValueForSpill(MI, SS);
+ LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << MI);
+ return;
+ }
+
+ // We can't allocate a physreg for a DebugValue, sorry!
+ LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
+ MO.setReg(0);
+ }
+
+ // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
+ // that future spills of Reg will have DBG_VALUEs.
+ LiveDbgValueMap[Reg].push_back(&MI);
+}
+
void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
this->MBB = &MBB;
LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
@@ -869,206 +1072,19 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
// Otherwise, sequentially allocate each instruction in the MBB.
for (MachineInstr &MI : MBB) {
- const MCInstrDesc &MCID = MI.getDesc();
- LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState());
+ LLVM_DEBUG(
+ dbgs() << "\n>> " << MI << "Regs:";
+ dumpState()
+ );
- // Debug values are not allowed to change codegen in any way.
+ // Special handling for debug values. Note that they are not allowed to
+ // affect codegen of the other instructions in any way.
if (MI.isDebugValue()) {
- MachineInstr *DebugMI = &MI;
- MachineOperand &MO = DebugMI->getOperand(0);
-
- // Ignore DBG_VALUEs that aren't based on virtual registers. These are
- // mostly constants and frame indices.
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
-
- // See if this virtual register has already been allocated to a physical
- // register or spilled to a stack slot.
- LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
- if (LRI != LiveVirtRegs.end())
- setPhysReg(*DebugMI, 0, LRI->PhysReg);
- else {
- int SS = StackSlotForVirtReg[Reg];
- if (SS != -1) {
- // Modify DBG_VALUE now that the value is in a spill slot.
- updateDbgValueForSpill(*DebugMI, SS);
- LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:"
- << "\t" << *DebugMI);
- continue;
- }
-
- // We can't allocate a physreg for a DebugValue, sorry!
- LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
- MO.setReg(0);
- }
-
- // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
- // that future spills of Reg will have DBG_VALUEs.
- LiveDbgValueMap[Reg].push_back(DebugMI);
+ handleDebugValue(MI);
continue;
}
- if (MI.isDebugLabel())
- continue;
-
- // If this is a copy, we may be able to coalesce.
- unsigned CopySrcReg = 0;
- unsigned CopyDstReg = 0;
- unsigned CopySrcSub = 0;
- unsigned CopyDstSub = 0;
- if (MI.isCopy()) {
- CopyDstReg = MI.getOperand(0).getReg();
- CopySrcReg = MI.getOperand(1).getReg();
- CopyDstSub = MI.getOperand(0).getSubReg();
- CopySrcSub = MI.getOperand(1).getSubReg();
- }
-
- // Track registers used by instruction.
- UsedInInstr.clear();
-
- // First scan.
- // Mark physreg uses and early clobbers as used.
- // Find the end of the virtreg operands
- unsigned VirtOpEnd = 0;
- bool hasTiedOps = false;
- bool hasEarlyClobbers = false;
- bool hasPartialRedefs = false;
- bool hasPhysDefs = false;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- // Make sure MRI knows about registers clobbered by regmasks.
- if (MO.isRegMask()) {
- MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
- continue;
- }
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (!Reg) continue;
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- VirtOpEnd = i+1;
- if (MO.isUse()) {
- hasTiedOps = hasTiedOps ||
- MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
- } else {
- if (MO.isEarlyClobber())
- hasEarlyClobbers = true;
- if (MO.getSubReg() && MI.readsVirtualRegister(Reg))
- hasPartialRedefs = true;
- }
- continue;
- }
- if (!MRI->isAllocatable(Reg)) continue;
- if (MO.isUse()) {
- usePhysReg(MO);
- } else if (MO.isEarlyClobber()) {
- definePhysReg(MI, Reg,
- (MO.isImplicit() || MO.isDead()) ? regFree : regReserved);
- hasEarlyClobbers = true;
- } else
- hasPhysDefs = true;
- }
-
- // The instruction may have virtual register operands that must be allocated
- // the same register at use-time and def-time: early clobbers and tied
- // operands. If there are also physical defs, these registers must avoid
- // both physical defs and uses, making them more constrained than normal
- // operands.
- // Similarly, if there are multiple defs and tied operands, we must make
- // sure the same register is allocated to uses and defs.
- // We didn't detect inline asm tied operands above, so just make this extra
- // pass for all inline asm.
- if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
- (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
- handleThroughOperands(MI, VirtDead);
- // Don't attempt coalescing when we have funny stuff going on.
- CopyDstReg = 0;
- // Pretend we have early clobbers so the use operands get marked below.
- // This is not necessary for the common case of a single tied use.
- hasEarlyClobbers = true;
- }
-
- // Second scan.
- // Allocate virtreg uses.
- for (unsigned I = 0; I != VirtOpEnd; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
- if (MO.isUse()) {
- LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, CopyDstReg);
- MCPhysReg PhysReg = LRI->PhysReg;
- CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0;
- if (setPhysReg(MI, I, PhysReg))
- killVirtReg(LRI);
- }
- }
-
- // Track registers defined by instruction - early clobbers and tied uses at
- // this point.
- UsedInInstr.clear();
- if (hasEarlyClobbers) {
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
- // Look for physreg defs and tied uses.
- if (!MO.isDef() && !MO.isTied()) continue;
- markRegUsedInInstr(Reg);
- }
- }
-
- unsigned DefOpEnd = MI.getNumOperands();
- if (MI.isCall()) {
- // Spill all virtregs before a call. This serves one purpose: If an
- // exception is thrown, the landing pad is going to expect to find
- // registers in their spill slots.
- // Note: although this is appealing to just consider all definitions
- // as call-clobbered, this is not correct because some of those
- // definitions may be used later on and we do not want to reuse
- // those for virtual registers in between.
- LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n");
- spillAll(MI);
- }
-
- // Third scan.
- // Allocate defs and collect dead defs.
- for (unsigned I = 0; I != DefOpEnd; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
- continue;
- unsigned Reg = MO.getReg();
-
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- if (!MRI->isAllocatable(Reg)) continue;
- definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
- continue;
- }
- LiveRegMap::iterator LRI = defineVirtReg(MI, I, Reg, CopySrcReg);
- MCPhysReg PhysReg = LRI->PhysReg;
- if (setPhysReg(MI, I, PhysReg)) {
- VirtDead.push_back(Reg);
- CopyDstReg = 0; // cancel coalescing;
- } else
- CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0;
- }
-
- // Kill dead defs after the scan to ensure that multiple defs of the same
- // register are allocated identically. We didn't need to do this for uses
- // because we are crerating our own kill flags, and they are always at the
- // last use.
- for (unsigned VirtReg : VirtDead)
- killVirtReg(VirtReg);
- VirtDead.clear();
-
- if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) {
- LLVM_DEBUG(dbgs() << "-- coalescing: " << MI);
- Coalesced.push_back(&MI);
- } else {
- LLVM_DEBUG(dbgs() << "<< " << MI);
- }
+ allocateInstruction(MI);
}
// Spill all physical registers holding virtual registers now.
@@ -1079,12 +1095,11 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
// LiveVirtRegs might refer to the instrs.
for (MachineInstr *MI : Coalesced)
MBB.erase(MI);
- NumCopies += Coalesced.size();
+ NumCoalesced += Coalesced.size();
LLVM_DEBUG(MBB.dump());
}
-/// Allocates registers for a function.
bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
<< "********** Function: " << MF.getName() << '\n');