summaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-29 16:25:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-29 16:25:25 +0000
commitab44ce3d598882e51a25eb82eb7ae6308de85ae6 (patch)
tree568d786a59d49bef961dcb9bd09d422701b9da5b /lib/Analysis
parentb5630dbadf9a2a06754194387d6b0fd9962a67f1 (diff)
Notes
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/ConstantFolding.cpp7
-rw-r--r--lib/Analysis/InstructionSimplify.cpp152
-rw-r--r--lib/Analysis/Lint.cpp8
-rw-r--r--lib/Analysis/LoopPass.cpp23
-rw-r--r--lib/Analysis/ScalarEvolution.cpp74
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp20
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp6
8 files changed, 177 insertions, 117 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 0ca712bbfe70..79517ec6a3a8 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -687,11 +687,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
// bits.
if (Opc == Instruction::And) {
- unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
- KnownBits Known0(BitWidth);
- KnownBits Known1(BitWidth);
- computeKnownBits(Op0, Known0, DL);
- computeKnownBits(Op1, Known1, DL);
+ KnownBits Known0 = computeKnownBits(Op0, DL);
+ KnownBits Known1 = computeKnownBits(Op1, DL);
if ((Known1.One | Known0.Zero).isAllOnesValue()) {
// All the bits of Op0 that the 'and' could be masking are already zero.
return Op0;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 2e72d5aa8269..122442bafb11 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -688,9 +688,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
if (isNUW)
return Op0;
- unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (Known.Zero.isMaxSignedValue()) {
// Op1 is either 0 or the minimum signed value. If the sub is NSW, then
// Op1 must be 0 because negating the minimum signed value is undefined.
@@ -1309,15 +1307,13 @@ static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
// If any bits in the shift amount make that value greater than or equal to
// the number of bits in the type, the shift is undefined.
- unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
- if (Known.One.getLimitedValue() >= BitWidth)
+ KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+ if (Known.One.getLimitedValue() >= Known.getBitWidth())
return UndefValue::get(Op0->getType());
// If all valid bits in the shift amount are known zero, the first operand is
// unchanged.
- unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth);
+ unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
if (Known.countMinTrailingZeros() >= NumValidShiftBits)
return Op0;
@@ -1343,9 +1339,7 @@ static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
// The low bit cannot be shifted out of an exact shift if it is set.
if (isExact) {
- unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
- KnownBits Op0Known(BitWidth);
- computeKnownBits(Op0, Op0Known, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
if (Op0Known.One[0])
return Op0;
}
@@ -1428,6 +1422,8 @@ Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
}
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
ICmpInst *UnsignedICmp, bool IsAnd) {
Value *X, *Y;
@@ -1560,20 +1556,8 @@ static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
return nullptr;
}
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
- if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
- return X;
-
- if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
- return X;
-
- if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
- return X;
-
+static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
// (icmp (add V, C0), C1) & (icmp V, C0)
- Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
@@ -1587,6 +1571,7 @@ static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (AddInst->getOperand(1) != Op1->getOperand(1))
return nullptr;
+ Type *ITy = Op0->getType();
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
@@ -1617,18 +1602,29 @@ static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
- if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
+static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+ if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
+ return X;
+ if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
return X;
- if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
+ if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
+ return X;
+ if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
return X;
- if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
+ return X;
+
+ if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1))
+ return X;
+ if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0))
return X;
+ return nullptr;
+}
+
+static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
// (icmp (add V, C0), C1) | (icmp V, C0)
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
@@ -1674,19 +1670,24 @@ static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
-static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
- bool IsAnd, CastInst *Cast) {
- Value *V =
- IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
- if (!V)
- return nullptr;
- if (!Cast)
- return V;
+static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+ if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
+ return X;
+ if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
+ return X;
- // If we looked through casts, we can only handle a constant simplification
- // because we are not allowed to create a cast instruction here.
- if (auto *C = dyn_cast<Constant>(V))
- return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
+ if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
+ return X;
+ if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
+ return X;
+
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
+ return X;
+
+ if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1))
+ return X;
+ if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0))
+ return X;
return nullptr;
}
@@ -1706,11 +1707,18 @@ static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
if (!Cmp0 || !Cmp1)
return nullptr;
- if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
- return V;
- if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
+ Value *V =
+ IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
+ if (!V)
+ return nullptr;
+ if (!Cast0)
return V;
+ // If we looked through casts, we can only handle a constant simplification
+ // because we are not allowed to create a cast instruction here.
+ if (auto *C = dyn_cast<Constant>(V))
+ return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
+
return nullptr;
}
@@ -1927,37 +1935,27 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
MaxRecurse))
return V;
- // (A & C)|(B & D)
- Value *C = nullptr, *D = nullptr;
- if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
- match(Op1, m_And(m_Value(B), m_Value(D)))) {
- ConstantInt *C1 = dyn_cast<ConstantInt>(C);
- ConstantInt *C2 = dyn_cast<ConstantInt>(D);
- if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
+ // (A & C1)|(B & C2)
+ const APInt *C1, *C2;
+ if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
+ match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
+ if (*C1 == ~*C2) {
// (A & C1)|(B & C2)
// If we have: ((V + N) & C1) | (V & C2)
// .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
// replace with V+N.
- Value *V1, *V2;
- if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
- match(A, m_Add(m_Value(V1), m_Value(V2)))) {
+ Value *N;
+ if (C2->isMask() && // C2 == 0+1+
+ match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
// Add commutes, try both ways.
- if (V1 == B &&
- MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
- return A;
- if (V2 == B &&
- MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return A;
}
// Or commutes, try both ways.
- if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
- match(B, m_Add(m_Value(V1), m_Value(V2)))) {
+ if (C1->isMask() &&
+ match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
// Add commutes, try both ways.
- if (V1 == A &&
- MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
- return B;
- if (V2 == A &&
- MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return B;
}
}
@@ -3372,9 +3370,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (ICmpInst::isEquality(Pred)) {
const APInt *RHSVal;
if (match(RHS, m_APInt(RHSVal))) {
- unsigned BitWidth = RHSVal->getBitWidth();
- KnownBits LHSKnown(BitWidth);
- computeKnownBits(LHS, LHSKnown, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
if (LHSKnown.Zero.intersects(*RHSVal) ||
!LHSKnown.One.isSubsetOf(*RHSVal))
return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
@@ -3539,6 +3535,10 @@ static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
if (V == Op)
return RepOp;
+ // We cannot replace a constant, and shouldn't even try.
+ if (isa<Constant>(Op))
+ return nullptr;
+
auto *I = dyn_cast<Instruction>(V);
if (!I)
return nullptr;
@@ -4444,19 +4444,21 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow: {
// X + undef -> undef
- if (isa<UndefValue>(RHS))
+ if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
return UndefValue::get(ReturnType);
return nullptr;
}
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow: {
+ // 0 * X -> { 0, false }
// X * 0 -> { 0, false }
- if (match(RHS, m_Zero()))
+ if (match(LHS, m_Zero()) || match(RHS, m_Zero()))
return Constant::getNullValue(ReturnType);
+ // undef * X -> { 0, false }
// X * undef -> { 0, false }
- if (match(RHS, m_Undef()))
+ if (match(LHS, m_Undef()) || match(RHS, m_Undef()))
return Constant::getNullValue(ReturnType);
return nullptr;
@@ -4680,9 +4682,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
// In general, it is possible for computeKnownBits to determine all bits in a
// value even when the operands are not all constants.
if (!Result && I->getType()->isIntOrIntVectorTy()) {
- unsigned BitWidth = I->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
+ KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
if (Known.isConstant())
Result = ConstantInt::get(I->getType(), Known.getConstant());
}
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 471ccb62970d..e6391792bc23 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -534,9 +534,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
VectorType *VecTy = dyn_cast<VectorType>(V->getType());
if (!VecTy) {
- unsigned BitWidth = V->getType()->getIntegerBitWidth();
- KnownBits Known(BitWidth);
- computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
+ KnownBits Known = computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
return Known.isZero();
}
@@ -550,14 +548,12 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
// For a vector, KnownZero will only be true if all values are zero, so check
// this per component
- unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth();
for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
Constant *Elem = C->getAggregateElement(I);
if (isa<UndefValue>(Elem))
return true;
- KnownBits Known(BitWidth);
- computeKnownBits(Elem, Known, DL);
+ KnownBits Known = computeKnownBits(Elem, DL);
if (Known.isZero())
return true;
}
diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp
index 0b5f6266e373..e988f6444a58 100644
--- a/lib/Analysis/LoopPass.cpp
+++ b/lib/Analysis/LoopPass.cpp
@@ -73,30 +73,23 @@ LPPassManager::LPPassManager()
CurrentLoop = nullptr;
}
-// Inset loop into loop nest (LoopInfo) and loop queue (LQ).
-Loop &LPPassManager::addLoop(Loop *ParentLoop) {
- // Create a new loop. LI will take ownership.
- Loop *L = new Loop();
-
- // Insert into the loop nest and the loop queue.
- if (!ParentLoop) {
+// Insert loop into loop nest (LoopInfo) and loop queue (LQ).
+void LPPassManager::addLoop(Loop &L) {
+ if (!L.getParentLoop()) {
// This is the top level loop.
- LI->addTopLevelLoop(L);
- LQ.push_front(L);
- return *L;
+ LQ.push_front(&L);
+ return;
}
- ParentLoop->addChildLoop(L);
// Insert L into the loop queue after the parent loop.
for (auto I = LQ.begin(), E = LQ.end(); I != E; ++I) {
- if (*I == L->getParentLoop()) {
+ if (*I == L.getParentLoop()) {
// deque does not support insert after.
++I;
- LQ.insert(I, 1, L);
- break;
+ LQ.insert(I, 1, &L);
+ return;
}
}
- return *L;
}
/// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 78ded8141c08..d280fda0a162 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -2178,6 +2178,63 @@ StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
return Flags;
}
+bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L,
+ DominatorTree &DT, LoopInfo &LI) {
+ if (!isLoopInvariant(S, L))
+ return false;
+ // If a value depends on a SCEVUnknown which is defined after the loop, we
+ // conservatively assume that we cannot calculate it at the loop's entry.
+ struct FindDominatedSCEVUnknown {
+ bool Found = false;
+ const Loop *L;
+ DominatorTree &DT;
+ LoopInfo &LI;
+
+ FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI)
+ : L(L), DT(DT), LI(LI) {}
+
+ bool checkSCEVUnknown(const SCEVUnknown *SU) {
+ if (auto *I = dyn_cast<Instruction>(SU->getValue())) {
+ if (DT.dominates(L->getHeader(), I->getParent()))
+ Found = true;
+ else
+ assert(DT.dominates(I->getParent(), L->getHeader()) &&
+ "No dominance relationship between SCEV and loop?");
+ }
+ return false;
+ }
+
+ bool follow(const SCEV *S) {
+ switch (static_cast<SCEVTypes>(S->getSCEVType())) {
+ case scConstant:
+ return false;
+ case scAddRecExpr:
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr:
+ case scUDivExpr:
+ return true;
+ case scUnknown:
+ return checkSCEVUnknown(cast<SCEVUnknown>(S));
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ }
+ return false;
+ }
+
+ bool isDone() { return Found; }
+ };
+
+ FindDominatedSCEVUnknown FSU(L, DT, LI);
+ SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU);
+ ST.visitAll(S);
+ return !FSU.Found;
+}
+
/// Get a canonical add expression, or something simpler if possible.
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags,
@@ -2459,7 +2516,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (isLoopInvariant(Ops[i], AddRecLoop)) {
+ if (isAvailableAtLoopEntry(Ops[i], AddRecLoop, DT, LI)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -2734,7 +2791,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (isLoopInvariant(Ops[i], AddRecLoop)) {
+ if (isAvailableAtLoopEntry(Ops[i], AddRecLoop, DT, LI)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -4648,10 +4705,7 @@ uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- unsigned BitWidth = getTypeSizeInBits(U->getType());
- KnownBits Known(BitWidth);
- computeKnownBits(U->getValue(), Known, getDataLayout(), 0, &AC,
- nullptr, &DT);
+ KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
return Known.countMinTrailingZeros();
}
@@ -4831,8 +4885,7 @@ ScalarEvolution::getRange(const SCEV *S,
const DataLayout &DL = getDataLayout();
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
- KnownBits Known(BitWidth);
- computeKnownBits(U->getValue(), Known, DL, 0, &AC, nullptr, &DT);
+ KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
if (Known.One != ~Known.Zero + 1)
ConservativeResult =
ConservativeResult.intersectWith(ConstantRange(Known.One,
@@ -9537,8 +9590,11 @@ struct SCEVCollectAddRecMultiplies {
bool HasAddRec = false;
SmallVector<const SCEV *, 0> Operands;
for (auto Op : Mul->operands()) {
- if (isa<SCEVUnknown>(Op)) {
+ const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
+ if (Unknown && !isa<CallInst>(Unknown->getValue())) {
Operands.push_back(Op);
+ } else if (Unknown) {
+ HasAddRec = true;
} else {
bool ContainsAddRec;
SCEVHasAddRec ContiansAddRec(ContainsAddRec);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 86cbd79aa84e..f9b9df2bc707 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1305,12 +1305,17 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
Type *ExpandTy = PostLoopScale ? IntTy : STy;
+ // We can't use a pointer type for the addrec if the pointer type is
+ // non-integral.
+ Type *AddRecPHIExpandTy =
+ DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
+
// In some cases, we decide to reuse an existing phi node but need to truncate
// it and/or invert the step.
Type *TruncTy = nullptr;
bool InvertStep = false;
- PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
- TruncTy, InvertStep);
+ PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
+ IntTy, TruncTy, InvertStep);
// Accommodate post-inc mode, if necessary.
Value *Result;
@@ -1383,8 +1388,15 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Re-apply any non-loop-dominating offset.
if (PostLoopOffset) {
if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
- const SCEV *const OffsetArray[1] = { PostLoopOffset };
- Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
+ if (Result->getType()->isIntegerTy()) {
+ Value *Base = expandCodeFor(PostLoopOffset, ExpandTy);
+ const SCEV *const OffsetArray[1] = {SE.getUnknown(Result)};
+ Result = expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Base);
+ } else {
+ const SCEV *const OffsetArray[1] = {PostLoopOffset};
+ Result =
+ expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Result);
+ }
} else {
Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateAdd(Result,
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index 8a5d10473662..7a8d4f3be24f 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -149,6 +149,10 @@ bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType) const {
return TTIImpl->isLegalMaskedGather(DataType);
}
+bool TargetTransformInfo::prefersVectorizedAddressing() const {
+ return TTIImpl->prefersVectorizedAddressing();
+}
+
int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset,
bool HasBaseReg,
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 8e6c1096eec8..bd79cd56a18b 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -149,8 +149,10 @@ static KnownBits computeKnownBits(const Value *V, unsigned Depth,
KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::computeKnownBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
+ const DominatorTree *DT,
+ OptimizationRemarkEmitter *ORE) {
+ return ::computeKnownBits(V, Depth,
+ Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
}
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,