summaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp17
-rw-r--r--lib/CodeGen/InlineSpiller.cpp7
-rw-r--r--lib/CodeGen/RegAllocBase.cpp9
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp21
4 files changed, 32 insertions, 22 deletions
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 45dc13d58de7..dc02a00e0fcc 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -4016,14 +4016,18 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
return true;
}
+// Max number of memory uses to look at before aborting the search to conserve
+// compile time.
+static constexpr int MaxMemoryUsesToScan = 20;
+
/// Recursively walk all the uses of I until we find a memory use.
/// If we find an obviously non-foldable instruction, return true.
/// Add the ultimately found memory instructions to MemoryUses.
static bool FindAllMemoryUses(
Instruction *I,
SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
- SmallPtrSetImpl<Instruction *> &ConsideredInsts,
- const TargetLowering &TLI, const TargetRegisterInfo &TRI) {
+ SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
+ const TargetRegisterInfo &TRI, int SeenInsts = 0) {
// If we already considered this instruction, we're done.
if (!ConsideredInsts.insert(I).second)
return false;
@@ -4036,8 +4040,12 @@ static bool FindAllMemoryUses(
// Loop over all the uses, recursively processing them.
for (Use &U : I->uses()) {
- Instruction *UserI = cast<Instruction>(U.getUser());
+ // Conservatively return true if we're seeing a large number or a deep chain
+ // of users. This avoids excessive compilation times in pathological cases.
+ if (SeenInsts++ >= MaxMemoryUsesToScan)
+ return true;
+ Instruction *UserI = cast<Instruction>(U.getUser());
if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
continue;
@@ -4082,7 +4090,8 @@ static bool FindAllMemoryUses(
continue;
}
- if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI))
+ if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI,
+ SeenInsts))
return true;
}
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 4e6a3ec21866..eda4f74c7874 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -643,8 +643,11 @@ void InlineSpiller::reMaterializeAll() {
Edit->eraseVirtReg(Reg);
continue;
}
- assert((LIS.hasInterval(Reg) && !LIS.getInterval(Reg).empty()) &&
- "Reg with empty interval has reference");
+
+ assert(LIS.hasInterval(Reg) &&
+ (!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
+ "Empty and not used live-range?!");
+
RegsToSpill[ResultPos++] = Reg;
}
RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
diff --git a/lib/CodeGen/RegAllocBase.cpp b/lib/CodeGen/RegAllocBase.cpp
index a7b7a9f8ab15..7b4fbace2c1c 100644
--- a/lib/CodeGen/RegAllocBase.cpp
+++ b/lib/CodeGen/RegAllocBase.cpp
@@ -133,18 +133,19 @@ void RegAllocBase::allocatePhysRegs() {
if (AvailablePhysReg)
Matrix->assign(*VirtReg, AvailablePhysReg);
- for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
- I != E; ++I) {
- LiveInterval *SplitVirtReg = &LIS->getInterval(*I);
+ for (unsigned Reg : SplitVRegs) {
+ assert(LIS->hasInterval(Reg));
+
+ LiveInterval *SplitVirtReg = &LIS->getInterval(Reg);
assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
+ assert(SplitVirtReg->empty() && "Non-empty but used interval");
DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
aboutToRemoveInterval(*SplitVirtReg);
LIS->removeInterval(SplitVirtReg->reg);
continue;
}
DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
- assert(!SplitVirtReg->empty() && "expecting non-empty interval");
assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
"expect split value in virtual register");
enqueue(SplitVirtReg);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index d41054b15bbc..0cad20db0964 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2965,7 +2965,12 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) {
else if (N.getOpcode() == ISD::SIGN_EXTEND)
N = N.getOperand(0);
- return (N.getOpcode() == ISD::SETCC);
+ if (isLogicalMaskOp(N.getOpcode()))
+ return isSETCCorConvertedSETCC(N.getOperand(0)) &&
+ isSETCCorConvertedSETCC(N.getOperand(1));
+
+ return (N.getOpcode() == ISD::SETCC ||
+ ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
}
#endif
@@ -2973,28 +2978,20 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) {
// to ToMaskVT if needed with vector extension or truncation.
SDValue DAGTypeLegalizer::convertMask(SDValue InMask, EVT MaskVT,
EVT ToMaskVT) {
- LLVMContext &Ctx = *DAG.getContext();
-
// Currently a SETCC or a AND/OR/XOR with two SETCCs are handled.
- unsigned InMaskOpc = InMask->getOpcode();
-
// FIXME: This code seems to be too restrictive, we might consider
// generalizing it or dropping it.
- assert((InMaskOpc == ISD::SETCC ||
- ISD::isBuildVectorOfConstantSDNodes(InMask.getNode()) ||
- (isLogicalMaskOp(InMaskOpc) &&
- isSETCCorConvertedSETCC(InMask->getOperand(0)) &&
- isSETCCorConvertedSETCC(InMask->getOperand(1)))) &&
- "Unexpected mask argument.");
+ assert(isSETCCorConvertedSETCC(InMask) && "Unexpected mask argument.");
// Make a new Mask node, with a legal result VT.
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0; i < InMask->getNumOperands(); ++i)
Ops.push_back(InMask->getOperand(i));
- SDValue Mask = DAG.getNode(InMaskOpc, SDLoc(InMask), MaskVT, Ops);
+ SDValue Mask = DAG.getNode(InMask->getOpcode(), SDLoc(InMask), MaskVT, Ops);
// If MaskVT has smaller or bigger elements than ToMaskVT, a vector sign
// extend or truncate is needed.
+ LLVMContext &Ctx = *DAG.getContext();
unsigned MaskScalarBits = MaskVT.getScalarSizeInBits();
unsigned ToMaskScalBits = ToMaskVT.getScalarSizeInBits();
if (MaskScalarBits < ToMaskScalBits) {