summaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/ConstantFolding.cpp9
-rw-r--r--lib/Analysis/InstructionSimplify.cpp361
-rw-r--r--lib/Analysis/LazyValueInfo.cpp14
-rw-r--r--lib/Analysis/Lint.cpp4
-rw-r--r--lib/Analysis/ModuleSummaryAnalysis.cpp43
-rw-r--r--lib/Analysis/ScalarEvolution.cpp128
-rw-r--r--lib/Analysis/TargetLibraryInfo.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp79
8 files changed, 360 insertions, 282 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 863fbdba7e67f..130e917e49d75 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -701,11 +701,10 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
return Op1;
}
- APInt KnownZero = Known0.Zero | Known1.Zero;
- APInt KnownOne = Known0.One & Known1.One;
- if ((KnownZero | KnownOne).isAllOnesValue()) {
- return ConstantInt::get(Op0->getType(), KnownOne);
- }
+ Known0.Zero |= Known1.Zero;
+ Known0.One &= Known1.One;
+ if (Known0.isConstant())
+ return ConstantInt::get(Op0->getType(), Known0.getConstant());
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 7aa6abf8fa488..4a713f441ce87 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -1495,36 +1495,87 @@ static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
-static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
+ ICmpInst::Predicate Pred0, Pred1;
+ Value *A ,*B;
+ if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
+ !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
+ return nullptr;
+
+ // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
+ // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
+ // can eliminate Op0 from this 'or'.
+ if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
+ return Op1;
+
+ // Check for any combination of predicates that cover the entire range of
+ // possibilities.
+ if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
+ (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
+ return getTrue(Op0->getType());
+
+ return nullptr;
+}
+
+/// Test if a pair of compares with a shared operand and 2 constants has an
+/// empty set intersection, full set union, or if one compare is a superset of
+/// the other.
+static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd) {
+ // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
+ if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
+ return nullptr;
+
+ const APInt *C0, *C1;
+ if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
+ !match(Cmp1->getOperand(1), m_APInt(C1)))
+ return nullptr;
+
+ auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
+ auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
+
+ // For and-of-comapares, check if the intersection is empty:
+ // (icmp X, C0) && (icmp X, C1) --> empty set --> false
+ if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
+ return getFalse(Cmp0->getType());
+
+ // For or-of-compares, check if the union is full:
+ // (icmp X, C0) || (icmp X, C1) --> full set --> true
+ if (!IsAnd && Range0.unionWith(Range1).isFullSet())
+ return getTrue(Cmp0->getType());
+
+ // Is one range a superset of the other?
+ // If this is and-of-compares, take the smaller set:
+ // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
+ // If this is or-of-compares, take the larger set:
+ // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
+ if (Range0.contains(Range1))
+ return IsAnd ? Cmp1 : Cmp0;
+ if (Range1.contains(Range0))
+ return IsAnd ? Cmp0 : Cmp1;
+
+ return nullptr;
+}
+
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
return X;
- // FIXME: This should be shared with or-of-icmps.
- // Look for this pattern: (icmp V, C0) & (icmp V, C1)).
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
+ return X;
+
+ // (icmp (add V, C0), C1) & (icmp V, C0)
Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
- if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
- match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
- // Make a constant range that's the intersection of the two icmp ranges.
- // If the intersection is empty, we know that the result is false.
- auto Range0 = ConstantRange::makeExactICmpRegion(Pred0, *C0);
- auto Range1 = ConstantRange::makeExactICmpRegion(Pred1, *C1);
- if (Range0.intersectWith(Range1).isEmptySet())
- return getFalse(ITy);
-
- // If a range is a superset of the other, the smaller set is all we need.
- if (Range0.contains(Range1))
- return Op1;
- if (Range1.contains(Range0))
- return Op0;
- }
-
- // (icmp (add V, C0), C1) & (icmp V, C0)
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
@@ -1565,6 +1616,103 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+ if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
+ return X;
+
+ if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
+ return X;
+
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
+ return X;
+
+ // (icmp (add V, C0), C1) | (icmp V, C0)
+ ICmpInst::Predicate Pred0, Pred1;
+ const APInt *C0, *C1;
+ Value *V;
+ if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
+ return nullptr;
+
+ if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
+ return nullptr;
+
+ auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
+ if (AddInst->getOperand(1) != Op1->getOperand(1))
+ return nullptr;
+
+ Type *ITy = Op0->getType();
+ bool isNSW = AddInst->hasNoSignedWrap();
+ bool isNUW = AddInst->hasNoUnsignedWrap();
+
+ const APInt Delta = *C1 - *C0;
+ if (C0->isStrictlyPositive()) {
+ if (Delta == 2) {
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ if (Delta == 1) {
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ }
+ if (C0->getBoolValue() && isNUW) {
+ if (Delta == 2)
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ if (Delta == 1)
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ }
+
+ return nullptr;
+}
+
+static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd, CastInst *Cast) {
+ Value *V =
+ IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
+ if (!V)
+ return nullptr;
+ if (!Cast)
+ return V;
+
+ // If we looked through casts, we can only handle a constant simplification
+ // because we are not allowed to create a cast instruction here.
+ if (auto *C = dyn_cast<Constant>(V))
+ return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
+
+ return nullptr;
+}
+
+static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
+ // Look through casts of the 'and' operands to find compares.
+ auto *Cast0 = dyn_cast<CastInst>(Op0);
+ auto *Cast1 = dyn_cast<CastInst>(Op1);
+ if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
+ Cast0->getSrcTy() == Cast1->getSrcTy()) {
+ Op0 = Cast0->getOperand(0);
+ Op1 = Cast1->getOperand(0);
+ }
+
+ auto *Cmp0 = dyn_cast<ICmpInst>(Op0);
+ auto *Cmp1 = dyn_cast<ICmpInst>(Op1);
+ if (!Cmp0 || !Cmp1)
+ return nullptr;
+
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
+ return V;
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
+ return V;
+
+ return nullptr;
+}
+
/// Given operands for an And, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1615,32 +1763,8 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Op1;
}
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
-
- // The compares may be hidden behind casts. Look through those and try the
- // same folds as above.
- auto *Cast0 = dyn_cast<CastInst>(Op0);
- auto *Cast1 = dyn_cast<CastInst>(Op1);
- if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
- Cast0->getSrcTy() == Cast1->getSrcTy()) {
- auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
- auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
- if (Cmp0 && Cmp1) {
- Instruction::CastOps CastOpc = Cast0->getOpcode();
- Type *ResultType = Cast0->getType();
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
@@ -1678,86 +1802,6 @@ Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
}
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
- ICmpInst::Predicate Pred0, Pred1;
- Value *A ,*B;
- if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
- !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
- return nullptr;
-
- // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
- // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
- // can eliminate Op0 from this 'or'.
- if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
- return Op1;
-
- // Check for any combination of predicates that cover the entire range of
- // possibilities.
- if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
- (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
- return getTrue(Op0->getType());
-
- return nullptr;
-}
-
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
- if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
- return X;
-
- if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
- return X;
-
- // (icmp (add V, C0), C1) | (icmp V, C0)
- ICmpInst::Predicate Pred0, Pred1;
- const APInt *C0, *C1;
- Value *V;
- if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
- return nullptr;
-
- if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
- return nullptr;
-
- auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
- if (AddInst->getOperand(1) != Op1->getOperand(1))
- return nullptr;
-
- Type *ITy = Op0->getType();
- bool isNSW = AddInst->hasNoSignedWrap();
- bool isNUW = AddInst->hasNoUnsignedWrap();
-
- const APInt Delta = *C1 - *C0;
- if (C0->isStrictlyPositive()) {
- if (Delta == 2) {
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- if (Delta == 1) {
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- }
- if (C0->getBoolValue() && isNUW) {
- if (Delta == 2)
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- if (Delta == 1)
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- }
-
- return nullptr;
-}
-
/// Given operands for an Or, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1826,14 +1870,8 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
return Op0;
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
@@ -4056,20 +4094,13 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
unsigned InVecNumElts = InVecTy->getVectorNumElements();
- auto *Op0Const = dyn_cast<Constant>(Op0);
- auto *Op1Const = dyn_cast<Constant>(Op1);
-
- // If all operands are constant, constant fold the shuffle.
- if (Op0Const && Op1Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
-
SmallVector<int, 32> Indices;
ShuffleVectorInst::getShuffleMask(Mask, Indices);
assert(MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?");
- // If only one of the operands is constant, constant fold the shuffle if the
- // mask does not select elements from the variable operand.
+ // Canonicalization: If mask does not select elements from an input vector,
+ // replace that input vector with undef.
bool MaskSelects0 = false, MaskSelects1 = false;
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (Indices[i] == -1)
@@ -4079,23 +4110,41 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
else
MaskSelects1 = true;
}
- if (!MaskSelects0 && Op1Const)
- return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
- Op1Const, Mask);
- if (!MaskSelects1 && Op0Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const,
- UndefValue::get(InVecTy), Mask);
+ if (!MaskSelects0)
+ Op0 = UndefValue::get(InVecTy);
+ if (!MaskSelects1)
+ Op1 = UndefValue::get(InVecTy);
+
+ auto *Op0Const = dyn_cast<Constant>(Op0);
+ auto *Op1Const = dyn_cast<Constant>(Op1);
+
+ // If all operands are constant, constant fold the shuffle.
+ if (Op0Const && Op1Const)
+ return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
+
+ // Canonicalization: if only one input vector is constant, it shall be the
+ // second one.
+ if (Op0Const && !Op1Const) {
+ std::swap(Op0, Op1);
+ for (int &Idx : Indices) {
+ if (Idx == -1)
+ continue;
+ Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
+ assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
+ "shufflevector mask index out of range");
+ }
+ Mask = ConstantDataVector::get(
+ Mask->getContext(),
+ makeArrayRef(reinterpret_cast<uint32_t *>(Indices.data()),
+ MaskNumElts));
+ }
// A shuffle of a splat is always the splat itself. Legal if the shuffle's
// value type is same as the input vectors' type.
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
- if (!MaskSelects1 && RetTy == InVecTy &&
+ if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op0;
- if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
- if (!MaskSelects0 && RetTy == InVecTy &&
- OpShuf->getMask()->getSplatValue())
- return Op1;
// Don't fold a shuffle with undef mask elements. This may get folded in a
// better way using demanded bits or other analysis.
@@ -4595,8 +4644,8 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
unsigned BitWidth = I->getType()->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
- if ((Known.Zero | Known.One).isAllOnesValue())
- Result = ConstantInt::get(I->getType(), Known.One);
+ if (Known.isConstant())
+ Result = ConstantInt::get(I->getType(), Known.getConstant());
}
/// If called on unreachable code, the above logic may report that the
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index a98383eaf4aa0..a2b9015a8a1d8 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -142,7 +142,7 @@ public:
return Val;
}
- ConstantRange getConstantRange() const {
+ const ConstantRange &getConstantRange() const {
assert(isConstantRange() &&
"Cannot get the constant-range of a non-constant-range!");
return Range;
@@ -250,7 +250,7 @@ public:
if (NewR.isFullSet())
markOverdefined();
else
- markConstantRange(NewR);
+ markConstantRange(std::move(NewR));
}
};
@@ -1079,8 +1079,8 @@ bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
}
if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
- ConstantRange TrueCR = TrueVal.getConstantRange();
- ConstantRange FalseCR = FalseVal.getConstantRange();
+ const ConstantRange &TrueCR = TrueVal.getConstantRange();
+ const ConstantRange &FalseCR = FalseVal.getConstantRange();
Value *LHS = nullptr;
Value *RHS = nullptr;
SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
@@ -1649,7 +1649,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1686,7 +1686,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1712,7 +1712,7 @@ static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 598138246445e..471ccb62970d4 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -537,7 +537,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
unsigned BitWidth = V->getType()->getIntegerBitWidth();
KnownBits Known(BitWidth);
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
- return Known.Zero.isAllOnesValue();
+ return Known.isZero();
}
// Per-component check doesn't work with zeroinitializer
@@ -558,7 +558,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
KnownBits Known(BitWidth);
computeKnownBits(Elem, Known, DL);
- if (Known.Zero.isAllOnesValue())
+ if (Known.isZero())
return true;
}
diff --git a/lib/Analysis/ModuleSummaryAnalysis.cpp b/lib/Analysis/ModuleSummaryAnalysis.cpp
index a83412506a07b..99f900ae3932c 100644
--- a/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -37,7 +37,8 @@ using namespace llvm;
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
// Instruction or a GlobalVariable (which walks its initializer).
-static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
+static void findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
+ SetVector<ValueInfo> &RefEdges,
SmallPtrSet<const User *, 8> &Visited) {
SmallVector<const User *, 32> Worklist;
Worklist.push_back(CurUser);
@@ -61,7 +62,7 @@ static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
// the reference set unless it is a callee. Callees are handled
// specially by WriteFunction and are added to a separate list.
if (!(CS && CS.isCallee(&OI)))
- RefEdges.insert(GV);
+ RefEdges.insert(Index.getOrInsertValueInfo(GV));
continue;
}
Worklist.push_back(Operand);
@@ -198,7 +199,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
if (isa<DbgInfoIntrinsic>(I))
continue;
++NumInsts;
- findRefEdges(&I, RefEdges, Visited);
+ findRefEdges(Index, &I, RefEdges, Visited);
auto CS = ImmutableCallSite(&I);
if (!CS)
continue;
@@ -239,7 +240,9 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
// to record the call edge to the alias in that case. Eventually
// an alias summary will be created to associate the alias and
// aliasee.
- CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
+ CallGraphEdges[Index.getOrInsertValueInfo(
+ cast<GlobalValue>(CalledValue))]
+ .updateHotness(Hotness);
} else {
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
@@ -254,15 +257,16 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
ICallAnalysis.getPromotionCandidatesForInstruction(
&I, NumVals, TotalCount, NumCandidates);
for (auto &Candidate : CandidateProfileData)
- CallGraphEdges[Candidate.Value].updateHotness(
- getHotness(Candidate.Count, PSI));
+ CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
+ .updateHotness(getHotness(Candidate.Count, PSI));
}
}
// Explicit add hot edges to enforce importing for designated GUIDs for
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
- CallGraphEdges[I].updateHotness(CalleeInfo::HotnessType::Hot);
+ CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
+ CalleeInfo::HotnessType::Hot);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
@@ -288,7 +292,7 @@ computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
SetVector<ValueInfo> RefEdges;
SmallPtrSet<const User *, 8> Visited;
- findRefEdges(&V, RefEdges, Visited);
+ findRefEdges(Index, &V, RefEdges, Visited);
bool NonRenamableLocal = isNonRenamableLocal(V);
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
/* LiveRoot = */ false);
@@ -317,12 +321,9 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
// Set LiveRoot flag on entries matching the given value name.
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
- auto SummaryList =
- Index.findGlobalValueSummaryList(GlobalValue::getGUID(Name));
- if (SummaryList == Index.end())
- return;
- for (auto &Summary : SummaryList->second)
- Summary->setLiveRoot();
+ if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
+ for (auto &Summary : VI.getSummaryList())
+ Summary->setLiveRoot();
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
@@ -446,12 +447,16 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
}
for (auto &GlobalList : Index) {
- assert(GlobalList.second.size() == 1 &&
+ // Ignore entries for references that are undefined in the current module.
+ if (GlobalList.second.SummaryList.empty())
+ continue;
+
+ assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected module's index to have one summary per GUID");
- auto &Summary = GlobalList.second[0];
+ auto &Summary = GlobalList.second.SummaryList[0];
bool AllRefsCanBeExternallyReferenced =
llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
- return !CantBePromoted.count(VI.getValue()->getGUID());
+ return !CantBePromoted.count(VI.getGUID());
});
if (!AllRefsCanBeExternallyReferenced) {
Summary->setNotEligibleToImport();
@@ -461,9 +466,7 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
bool AllCallsCanBeExternallyReferenced = llvm::all_of(
FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
- auto GUID = Edge.first.isGUID() ? Edge.first.getGUID()
- : Edge.first.getValue()->getGUID();
- return !CantBePromoted.count(GUID);
+ return !CantBePromoted.count(Edge.first.getGUID());
});
if (!AllCallsCanBeExternallyReferenced)
Summary->setNotEligibleToImport();
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index bd747f7c0b7a1..01dca0793145f 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -2970,7 +2970,7 @@ static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
else if (ABW < BBW)
A = A.zext(BBW);
- return APIntOps::GreatestCommonDivisor(A, B);
+ return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
}
/// Get a canonical unsigned division expression, or something simpler if
@@ -4083,6 +4083,56 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return None;
}
+/// A helper function for createAddRecFromPHI to handle simple cases.
+///
+/// This function tries to find an AddRec expression for the simplest (yet most
+/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
+/// If it fails, createAddRecFromPHI will use a more general, but slow,
+/// technique for finding the AddRec expression.
+const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
+ Value *BEValueV,
+ Value *StartValueV) {
+ const Loop *L = LI.getLoopFor(PN->getParent());
+ assert(L && L->getHeader() == PN->getParent());
+ assert(BEValueV && StartValueV);
+
+ auto BO = MatchBinaryOp(BEValueV, DT);
+ if (!BO)
+ return nullptr;
+
+ if (BO->Opcode != Instruction::Add)
+ return nullptr;
+
+ const SCEV *Accum = nullptr;
+ if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
+ Accum = getSCEV(BO->RHS);
+ else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
+ Accum = getSCEV(BO->LHS);
+
+ if (!Accum)
+ return nullptr;
+
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+ if (BO->IsNUW)
+ Flags = setFlags(Flags, SCEV::FlagNUW);
+ if (BO->IsNSW)
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+
+ const SCEV *StartVal = getSCEV(StartValueV);
+ const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
+
+ // We can add Flags to the post-inc expression only if we
+ // know that it is *undefined behavior* for BEValueV to
+ // overflow.
+ if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
+ if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
+ (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
+
+ return PHISCEV;
+}
+
const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
@@ -4111,10 +4161,16 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (!BEValueV || !StartValueV)
return nullptr;
- // While we are analyzing this PHI node, handle its value symbolically.
- const SCEV *SymbolicName = getUnknown(PN);
assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
"PHI node already processed?");
+
+ // First, try to find AddRec expression without creating a fictituos symbolic
+ // value for PN.
+ if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
+ return S;
+
+ // Handle PHI node value symbolically.
+ const SCEV *SymbolicName = getUnknown(PN);
ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
// Using this symbolic name for the PHI, analyze the value coming around
@@ -4189,7 +4245,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
// We can add Flags to the post-inc expression only if we
- // know that it us *undefined behavior* for BEValueV to
+ // know that it is *undefined behavior* for BEValueV to
// overflow.
if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
@@ -4744,7 +4800,7 @@ ScalarEvolution::getRange(const SCEV *S,
}
}
- return setRange(AddRec, SignHint, ConservativeResult);
+ return setRange(AddRec, SignHint, std::move(ConservativeResult));
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
@@ -4775,10 +4831,10 @@ ScalarEvolution::getRange(const SCEV *S,
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
- return setRange(U, SignHint, ConservativeResult);
+ return setRange(U, SignHint, std::move(ConservativeResult));
}
- return setRange(S, SignHint, ConservativeResult);
+ return setRange(S, SignHint, std::move(ConservativeResult));
}
// Given a StartRange, Step and MaxBECount for an expression compute a range of
@@ -4786,8 +4842,8 @@ ScalarEvolution::getRange(const SCEV *S,
// from StartRange and then is changed by Step up to MaxBECount times. Signed
// argument defines if we treat Step as signed or unsigned.
static ConstantRange getRangeForAffineARHelper(APInt Step,
- ConstantRange StartRange,
- APInt MaxBECount,
+ const ConstantRange &StartRange,
+ const APInt &MaxBECount,
unsigned BitWidth, bool Signed) {
// If either Step or MaxBECount is 0, then the expression won't change, and we
// just need to return the initial range.
@@ -4826,8 +4882,8 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
// if the expression is decreasing and will be increased by Offset otherwise.
APInt StartLower = StartRange.getLower();
APInt StartUpper = StartRange.getUpper() - 1;
- APInt MovedBoundary =
- Descending ? (StartLower - Offset) : (StartUpper + Offset);
+ APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
+ : (StartUpper + std::move(Offset));
// It's possible that the new minimum/maximum value will fall into the initial
// range (due to wrap around). This means that the expression can take any
@@ -4835,21 +4891,18 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
if (StartRange.contains(MovedBoundary))
return ConstantRange(BitWidth, /* isFullSet = */ true);
- APInt NewLower, NewUpper;
- if (Descending) {
- NewLower = MovedBoundary;
- NewUpper = StartUpper;
- } else {
- NewLower = StartLower;
- NewUpper = MovedBoundary;
- }
+ APInt NewLower =
+ Descending ? std::move(MovedBoundary) : std::move(StartLower);
+ APInt NewUpper =
+ Descending ? std::move(StartUpper) : std::move(MovedBoundary);
+ NewUpper += 1;
// If we end up with full range, return a proper full range.
- if (NewLower == NewUpper + 1)
+ if (NewLower == NewUpper)
return ConstantRange(BitWidth, /* isFullSet = */ true);
// No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
- return ConstantRange(NewLower, NewUpper + 1);
+ return ConstantRange(std::move(NewLower), std::move(NewUpper));
}
ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
@@ -7323,7 +7376,6 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
const APInt &M = MC->getAPInt();
const APInt &N = NC->getAPInt();
APInt Two(BitWidth, 2);
- APInt Four(BitWidth, 4);
{
using namespace APIntOps;
@@ -7339,7 +7391,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
// Compute the B^2-4ac term.
APInt SqrtTerm(B);
SqrtTerm *= B;
- SqrtTerm -= Four * (A * C);
+ SqrtTerm -= 4 * (A * C);
if (SqrtTerm.isNegative()) {
// The loop is provably infinite.
@@ -8887,7 +8939,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
if (!Addend)
return false;
- APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
+ const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
// `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
// antecedent "`FoundLHS` `Pred` `FoundRHS`".
@@ -8899,7 +8951,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
// We can also compute the range of values for `LHS` that satisfy the
// consequent, "`LHS` `Pred` `RHS`":
- APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
+ const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
ConstantRange SatisfyingLHSRange =
ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
@@ -8924,7 +8976,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).slt(MaxRHS);
}
APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
@@ -8933,7 +8985,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).ult(MaxRHS);
}
bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
@@ -8950,7 +9002,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).sgt(MinRHS);
}
APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
@@ -8959,7 +9011,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).ugt(MinRHS);
}
const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
@@ -9250,9 +9302,8 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// the upper value of the range must be the first possible exit value.
// If A is negative then the lower of the range is the last possible loop
// value. Also note that we already checked for a full range.
- APInt One(BitWidth,1);
APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
- APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
+ APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
// The exit value should be (End+A)/A.
APInt ExitVal = (End + A).udiv(A);
@@ -9268,7 +9319,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// Ensure that the previous value is in the range. This is a sanity check.
assert(Range.contains(
EvaluateConstantChrecAtConstant(this,
- ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
+ ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
"Linear scev computation is off in a bad way!");
return SE.getConstant(ExitValue);
} else if (isQuadratic()) {
@@ -9574,7 +9625,7 @@ const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const {
+ const SCEV *ElementSize) {
if (Terms.size() < 1 || !ElementSize)
return;
@@ -9590,7 +9641,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
});
// Remove duplicates.
- std::sort(Terms.begin(), Terms.end());
+ array_pod_sort(Terms.begin(), Terms.end());
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first.
@@ -9598,13 +9649,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
return numberOfTerms(LHS) > numberOfTerms(RHS);
});
- ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
-
// Try to divide all terms by the element size. If term is not divisible by
// element size, proceed with the original term.
for (const SCEV *&Term : Terms) {
const SCEV *Q, *R;
- SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
+ SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
if (!Q->isZero())
Term = Q;
}
@@ -9613,7 +9662,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
// Remove constant factors.
for (const SCEV *T : Terms)
- if (const SCEV *NewT = removeConstantFactors(SE, T))
+ if (const SCEV *NewT = removeConstantFactors(*this, T))
NewTerms.push_back(NewT);
DEBUG({
@@ -9622,8 +9671,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
dbgs() << *T << "\n";
});
- if (NewTerms.empty() ||
- !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
+ if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
Sizes.clear();
return;
}
diff --git a/lib/Analysis/TargetLibraryInfo.cpp b/lib/Analysis/TargetLibraryInfo.cpp
index be734fa91425d..848e1b4717b59 100644
--- a/lib/Analysis/TargetLibraryInfo.cpp
+++ b/lib/Analysis/TargetLibraryInfo.cpp
@@ -1176,6 +1176,10 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
+ case LibFunc_wcslen:
+ return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
+ FTy.getReturnType()->isIntegerTy());
+
case LibFunc::NumLibFuncs:
break;
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 6ec175fc84e29..a7f3ff672aefc 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -59,8 +59,8 @@ static cl::opt<bool>
DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
cl::Hidden, cl::init(true));
-/// Returns the bitwidth of the given scalar or pointer type (if unknown returns
-/// 0). For vector types, returns the element type's bitwidth.
+/// Returns the bitwidth of the given scalar or pointer type. For vector types,
+/// returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
@@ -342,7 +342,6 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
@@ -351,7 +350,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(TrailZ);
Known.Zero.setHighBits(LeadZ);
@@ -529,15 +528,13 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.clearAllBits();
- Known.One.setAllBits();
+ Known.setAllOnes();
return;
}
if (match(Arg, m_Not(m_Specific(V))) &&
isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.setAllBits();
- Known.One.clearAllBits();
+ Known.setAllZero();
return;
}
@@ -719,7 +716,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
+ if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
Known.makeNonNegative();
}
@@ -741,7 +738,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
+ if (RHSKnown.isZero() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
Known.makeNegative();
}
@@ -776,8 +773,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
// behavior, or we might have a bug in the compiler. We can't assert/crash, so
// clear out the known bits, try to warn the user, and hope for the best.
if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (Q.ORE) {
auto *CxtI = const_cast<Instruction *>(Q.CxtI);
@@ -813,10 +809,8 @@ static void computeKnownBitsFromShiftOperator(
// If there is conflict between Known.Zero and Known.One, this must be an
// overflowing left shift, so the shift result is undefined. Clear Known
// bits so that other code could propagate this undef.
- if ((Known.Zero & Known.One) != 0) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if ((Known.Zero & Known.One) != 0)
+ Known.resetAll();
return;
}
@@ -826,8 +820,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the LHS, the
// value could be undef, so we don't know anything about it.
if ((~Known.Zero).uge(BitWidth)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
@@ -839,8 +832,7 @@ static void computeKnownBitsFromShiftOperator(
// It would be more-clearly correct to use the two temporaries for this
// calculation. Reusing the APInts here to prevent unnecessary allocations.
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
// If we know the shifter operand is nonzero, we can sometimes infer more
// known bits. However this is expensive to compute, so be lazy about it and
@@ -886,10 +878,8 @@ static void computeKnownBitsFromShiftOperator(
// return anything we'd like, but we need to make sure the sets of known bits
// stay disjoint (it should be better for some other code to actually
// propagate the undef than to pick a value here using known bits).
- if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if (Known.Zero.intersects(Known.One))
+ Known.resetAll();
}
static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
@@ -924,7 +914,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
m_Value(Y))) ||
match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
m_Value(Y))))) {
- Known2.Zero.clearAllBits(); Known2.One.clearAllBits();
+ Known2.resetAll();
computeKnownBits(Y, Known2, Depth + 1, Q);
if (Known2.One.countTrailingOnes() > 0)
Known.Zero.setBit(0);
@@ -965,8 +955,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
unsigned LeadZ = Known2.Zero.countLeadingOnes();
- Known2.One.clearAllBits();
- Known2.Zero.clearAllBits();
+ Known2.resetAll();
computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
unsigned RHSUnknownLeadingOnes = Known2.One.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
@@ -1051,11 +1040,9 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
assert(SrcBitWidth && "SrcBitWidth can't be zero");
- Known.Zero = Known.Zero.zextOrTrunc(SrcBitWidth);
- Known.One = Known.One.zextOrTrunc(SrcBitWidth);
+ Known = Known.zextOrTrunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
- Known.Zero = Known.Zero.zextOrTrunc(BitWidth);
- Known.One = Known.One.zextOrTrunc(BitWidth);
+ Known = Known.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
Known.Zero.setBitsFrom(SrcBitWidth);
@@ -1076,13 +1063,11 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
- Known.Zero = Known.Zero.sext(BitWidth);
- Known.One = Known.One.sext(BitWidth);
+ Known = Known.sext(BitWidth);
break;
}
case Instruction::Shl: {
@@ -1202,8 +1187,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -1504,8 +1488,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
- Known.One.clearAllBits();
- Known.Zero.setAllBits();
+ Known.setAllZero();
return;
}
// Handle a constant vector by taking the intersection of the known bits of
@@ -1532,8 +1515,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Constant *Element = CV->getAggregateElement(i);
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
Elt = ElementCI->getValue();
@@ -1544,7 +1526,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Start out not knowing anything.
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
// We can't imply anything about undefs.
if (isa<UndefValue>(V))
@@ -1590,13 +1572,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
/// Convenience wrapper around computeKnownBits.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q) {
- unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
- if (!BitWidth) {
- KnownZero = false;
- KnownOne = false;
- return;
- }
- KnownBits Bits(BitWidth);
+ KnownBits Bits(getBitWidth(V->getType(), Q.DL));
computeKnownBits(V, Bits, Depth, Q);
KnownOne = Bits.isNegative();
KnownZero = Bits.isNonNegative();
@@ -1847,7 +1823,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
// if the lowest bit is shifted off the end.
- if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
+ if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
@@ -1906,7 +1882,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// If X and Y are both negative (as signed values) then their sum is not
// zero unless both X and Y equal INT_MIN.
- if (BitWidth && XKnownNegative && YKnownNegative) {
+ if (XKnownNegative && YKnownNegative) {
KnownBits Known(BitWidth);
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
@@ -1971,7 +1947,6 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
return true;
}
- if (!BitWidth) return false;
KnownBits Known(BitWidth);
computeKnownBits(V, Known, Depth, Q);
return Known.One != 0;