summaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp23
1 files changed, 15 insertions, 8 deletions
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index e8a95d35482c..dc5d02aa3a3c 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -240,9 +240,6 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
/// division. If so, update S with Factor divided out and return true.
/// S need not be evenly divisible if a reasonable remainder can be
/// computed.
-/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
-/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
-/// check to see if the divide was folded.
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
const SCEV *Factor, ScalarEvolution &SE,
const DataLayout &DL) {
@@ -417,7 +414,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// without the other.
SplitAddRecs(Ops, Ty, SE);
- Type *IntPtrTy = DL.getIntPtrType(PTy);
+ Type *IntIdxTy = DL.getIndexType(PTy);
// Descend down the pointer's type and attempt to convert the other
// operands into GEP indices, at each level. The first index in a GEP
@@ -429,7 +426,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
if (ElTy->isSized()) {
- const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
+ const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
if (!ElSize->isZero()) {
SmallVector<const SCEV *, 8> NewOps;
for (const SCEV *Op : Ops) {
@@ -1486,7 +1483,18 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
}
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
- if (!CanonicalMode) return expandAddRecExprLiterally(S);
+ // In canonical mode we compute the addrec as an expression of a canonical IV
+ // using evaluateAtIteration and expand the resulting SCEV expression. This
+ // way we avoid introducing new IVs to carry on the comutation of the addrec
+ // throughout the loop.
+ //
+ // For nested addrecs evaluateAtIteration might need a canonical IV of a
+ // type wider than the addrec itself. Emitting a canonical IV of the
+ // proper type might produce non-legal types, for example expanding an i64
+ // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
+ // back to non-canonical mode for nested addrecs.
+ if (!CanonicalMode || (S->getNumOperands() > 2))
+ return expandAddRecExprLiterally(S);
Type *Ty = SE.getEffectiveSCEVType(S->getType());
const Loop *L = S->getLoop();
@@ -2094,11 +2102,10 @@ SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
for (BasicBlock *BB : ExitingBlocks) {
ICmpInst::Predicate Pred;
Instruction *LHS, *RHS;
- BasicBlock *TrueBB, *FalseBB;
if (!match(BB->getTerminator(),
m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
- TrueBB, FalseBB)))
+ m_BasicBlock(), m_BasicBlock())))
continue;
if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))