summaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/ValueTracking.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-01-17 20:45:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-01-17 20:45:01 +0000
commit706b4fc47bbc608932d3b491ae19a3b9cde9497b (patch)
tree4adf86a776049cbf7f69a1929c4babcbbef925eb /llvm/lib/Analysis/ValueTracking.cpp
parent7cc9cf2bf09f069cb2dd947ead05d0b54301fb71 (diff)
Notes
Diffstat (limited to 'llvm/lib/Analysis/ValueTracking.cpp')
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp223
1 files changed, 200 insertions, 23 deletions
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index bbf3899918367..ad6765e2514b4 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -51,6 +51,8 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
@@ -88,7 +90,7 @@ static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
- return DL.getIndexTypeSizeInBits(Ty);
+ return DL.getPointerTypeSizeInBits(Ty);
}
namespace {
@@ -564,17 +566,83 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv,
if (Inv == CxtI)
return false;
- // The context comes first, but they're both in the same block. Make sure
- // there is nothing in between that might interrupt the control flow.
- for (BasicBlock::const_iterator I =
- std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
- I != IE; ++I)
+ // The context comes first, but they're both in the same block.
+ // Make sure there is nothing in between that might interrupt
+ // the control flow, not even CxtI itself.
+ for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
return false;
return !isEphemeralValueOf(Inv, CxtI);
}
+static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
+ // Use of assumptions is context-sensitive. If we don't have a context, we
+ // cannot use them!
+ if (!Q.AC || !Q.CxtI)
+ return false;
+
+ // Note that the patterns below need to be kept in sync with the code
+ // in AssumptionCache::updateAffectedValues.
+
+ auto CmpExcludesZero = [V](ICmpInst *Cmp) {
+ auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
+
+ Value *RHS;
+ CmpInst::Predicate Pred;
+ if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
+ return false;
+ // Canonicalize 'v' to be on the LHS of the comparison.
+ if (Cmp->getOperand(1) != RHS)
+ Pred = CmpInst::getSwappedPredicate(Pred);
+
+ // assume(v u> y) -> assume(v != 0)
+ if (Pred == ICmpInst::ICMP_UGT)
+ return true;
+
+ // assume(v != 0)
+ // We special-case this one to ensure that we handle `assume(v != null)`.
+ if (Pred == ICmpInst::ICMP_NE)
+ return match(RHS, m_Zero());
+
+ // All other predicates - rely on generic ConstantRange handling.
+ ConstantInt *CI;
+ if (!match(RHS, m_ConstantInt(CI)))
+ return false;
+ ConstantRange RHSRange(CI->getValue());
+ ConstantRange TrueValues =
+ ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
+ return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
+ };
+
+ for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
+ if (!AssumeVH)
+ continue;
+ CallInst *I = cast<CallInst>(AssumeVH);
+ assert(I->getFunction() == Q.CxtI->getFunction() &&
+ "Got assumption for the wrong function!");
+ if (Q.isExcluded(I))
+ continue;
+
+ // Warning: This loop can end up being somewhat performance sensitive.
+ // We're running this loop for once for each value queried resulting in a
+ // runtime of ~O(#assumes * #values).
+
+ assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
+ "must be an assume intrinsic");
+
+ Value *Arg = I->getArgOperand(0);
+ ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
+ if (!Cmp)
+ continue;
+
+ if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
+ return true;
+ }
+
+ return false;
+}
+
static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
unsigned Depth, const Query &Q) {
// Use of assumptions is context-sensitive. If we don't have a context, we
@@ -915,7 +983,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the
// LHS, the value could be poison, but bail out because the check below is
// expensive. TODO: Should we just carry on?
- if ((~Known.Zero).uge(BitWidth)) {
+ if (Known.getMaxValue().uge(BitWidth)) {
Known.resetAll();
return;
}
@@ -1135,7 +1203,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// which fall through here.
Type *ScalarTy = SrcTy->getScalarType();
SrcBitWidth = ScalarTy->isPointerTy() ?
- Q.DL.getIndexTypeSizeInBits(ScalarTy) :
+ Q.DL.getPointerTypeSizeInBits(ScalarTy) :
Q.DL.getTypeSizeInBits(ScalarTy);
assert(SrcBitWidth && "SrcBitWidth can't be zero");
@@ -1353,6 +1421,8 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
for (unsigned i = 0; i != 2; ++i) {
Value *L = P->getIncomingValue(i);
Value *R = P->getIncomingValue(!i);
+ Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
+ Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
Operator *LU = dyn_cast<Operator>(L);
if (!LU)
continue;
@@ -1374,13 +1444,22 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
L = LL;
else
continue; // Check for recurrence with L and R flipped.
+
+ // Change the context instruction to the "edge" that flows into the
+ // phi. This is important because that is where the value is actually
+ // "evaluated" even though it is used later somewhere else. (see also
+ // D69571).
+ Query RecQ = Q;
+
// Ok, we have a PHI of the form L op= R. Check for low
// zero bits.
- computeKnownBits(R, Known2, Depth + 1, Q);
+ RecQ.CxtI = RInst;
+ computeKnownBits(R, Known2, Depth + 1, RecQ);
// We need to take the minimum number of known bits
KnownBits Known3(Known);
- computeKnownBits(L, Known3, Depth + 1, Q);
+ RecQ.CxtI = LInst;
+ computeKnownBits(L, Known3, Depth + 1, RecQ);
Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
Known3.countMinTrailingZeros()));
@@ -1436,14 +1515,22 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
Known.Zero.setAllBits();
Known.One.setAllBits();
- for (Value *IncValue : P->incoming_values()) {
+ for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
+ Value *IncValue = P->getIncomingValue(u);
// Skip direct self references.
if (IncValue == P) continue;
+ // Change the context instruction to the "edge" that flows into the
+ // phi. This is important because that is where the value is actually
+ // "evaluated" even though it is used later somewhere else. (see also
+ // D69571).
+ Query RecQ = Q;
+ RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
+
Known2 = KnownBits(BitWidth);
// Recurse, but cap the recursion to one level, because we don't
// want to waste time spinning around in loops.
- computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
+ computeKnownBits(IncValue, Known2, MaxDepth - 1, RecQ);
Known.Zero &= Known2.Zero;
Known.One &= Known2.One;
// If all bits have been ruled out, there's no need to check
@@ -1643,7 +1730,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Type *ScalarTy = V->getType()->getScalarType();
unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
- Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
+ Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
(void)BitWidth;
(void)ExpectedWidth;
@@ -1902,8 +1989,8 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
static bool isKnownNonNullFromDominatingCondition(const Value *V,
const Instruction *CtxI,
const DominatorTree *DT) {
- assert(V->getType()->isPointerTy() && "V must be pointer type");
- assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
+ if (isa<Constant>(V))
+ return false;
if (!CtxI || !DT)
return false;
@@ -1924,6 +2011,15 @@ static bool isKnownNonNullFromDominatingCondition(const Value *V,
Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
return true;
+ // If the value is used as a load/store, then the pointer must be non null.
+ if (V == getLoadStorePointerOperand(U)) {
+ const Instruction *I = cast<Instruction>(U);
+ if (!NullPointerIsDefined(I->getFunction(),
+ V->getType()->getPointerAddressSpace()) &&
+ DT->dominates(I, CtxI))
+ return true;
+ }
+
// Consider only compare instructions uniquely controlling a branch
CmpInst::Predicate Pred;
if (!match(const_cast<User *>(U),
@@ -2050,6 +2146,9 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
}
}
+ if (isKnownNonZeroFromAssume(V, Q))
+ return true;
+
// Some of the tests below are recursive, so bail out if we hit the limit.
if (Depth++ >= MaxDepth)
return false;
@@ -2078,12 +2177,11 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
}
}
+ if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
+ return true;
// Check for recursive pointer simplifications.
if (V->getType()->isPointerTy()) {
- if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
- return true;
-
// Look through bitcast operations, GEPs, and int2ptr instructions as they
// do not alter the value, or at least not the nullness property of the
// value, e.g., int2ptr is allowed to zero/sign extend the value.
@@ -2380,7 +2478,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
Type *ScalarTy = V->getType()->getScalarType();
unsigned TyBits = ScalarTy->isPointerTy() ?
- Q.DL.getIndexTypeSizeInBits(ScalarTy) :
+ Q.DL.getPointerTypeSizeInBits(ScalarTy) :
Q.DL.getTypeSizeInBits(ScalarTy);
unsigned Tmp, Tmp2;
@@ -3095,6 +3193,58 @@ bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
}
+bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
+ unsigned Depth) {
+ assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
+
+ // If we're told that infinities won't happen, assume they won't.
+ if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
+ if (FPMathOp->hasNoInfs())
+ return true;
+
+ // Handle scalar constants.
+ if (auto *CFP = dyn_cast<ConstantFP>(V))
+ return !CFP->isInfinity();
+
+ if (Depth == MaxDepth)
+ return false;
+
+ if (auto *Inst = dyn_cast<Instruction>(V)) {
+ switch (Inst->getOpcode()) {
+ case Instruction::Select: {
+ return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
+ isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
+ }
+ case Instruction::UIToFP:
+ // If the input type fits into the floating type the result is finite.
+ return ilogb(APFloat::getLargest(
+ Inst->getType()->getScalarType()->getFltSemantics())) >=
+ (int)Inst->getOperand(0)->getType()->getScalarSizeInBits();
+ default:
+ break;
+ }
+ }
+
+ // Bail out for constant expressions, but try to handle vector constants.
+ if (!V->getType()->isVectorTy() || !isa<Constant>(V))
+ return false;
+
+ // For vectors, verify that each element is not infinity.
+ unsigned NumElts = V->getType()->getVectorNumElements();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
+ if (!Elt)
+ return false;
+ if (isa<UndefValue>(Elt))
+ continue;
+ auto *CElt = dyn_cast<ConstantFP>(Elt);
+ if (!CElt || CElt->isInfinity())
+ return false;
+ }
+ // All elements were confirmed non-infinity or undefined.
+ return true;
+}
+
bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
unsigned Depth) {
assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
@@ -3114,13 +3264,26 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
if (auto *Inst = dyn_cast<Instruction>(V)) {
switch (Inst->getOpcode()) {
case Instruction::FAdd:
- case Instruction::FMul:
case Instruction::FSub:
+ // Adding positive and negative infinity produces NaN.
+ return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
+ isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
+ (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
+ isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
+
+ case Instruction::FMul:
+ // Zero multiplied with infinity produces NaN.
+ // FIXME: If neither side can be zero fmul never produces NaN.
+ return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
+ isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
+ isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
+ isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
+
case Instruction::FDiv:
- case Instruction::FRem: {
- // TODO: Need isKnownNeverInfinity
+ case Instruction::FRem:
+ // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
return false;
- }
+
case Instruction::Select: {
return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
@@ -4222,6 +4385,20 @@ bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
}
+bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V) {
+ // If the value is a freeze instruction, then it can never
+ // be undef or poison.
+ if (isa<FreezeInst>(V))
+ return true;
+ // TODO: Some instructions are guaranteed to return neither undef
+ // nor poison if their arguments are not poison/undef.
+
+ // TODO: Deal with other Constant subclasses.
+ if (isa<ConstantInt>(V) || isa<GlobalVariable>(V))
+ return true;
+
+ return false;
+}
OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,