aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis/ValueTracking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis/ValueTracking.cpp')
-rw-r--r--lib/Analysis/ValueTracking.cpp658
1 files changed, 386 insertions, 272 deletions
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index c70906dcc629..bbf389991836 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -558,12 +558,18 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv,
return true;
}
+ // Don't let an assume affect itself - this would cause the problems
+ // `isEphemeralValueOf` is trying to prevent, and it would also make
+ // the loop below go out of bounds.
+ if (Inv == CxtI)
+ return false;
+
// The context comes first, but they're both in the same block. Make sure
// there is nothing in between that might interrupt the control flow.
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
+ if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
return false;
return !isEphemeralValueOf(Inv, CxtI);
@@ -1049,7 +1055,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
break;
}
case Instruction::Select: {
- const Value *LHS, *RHS;
+ const Value *LHS = nullptr, *RHS = nullptr;
SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
if (SelectPatternResult::isMinOrMax(SPF)) {
computeKnownBits(RHS, Known, Depth + 1, Q);
@@ -1095,7 +1101,8 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// RHS from matchSelectPattern returns the negation part of abs pattern.
// If the negate has an NSW flag we can assume the sign bit of the result
// will be 0 because that makes abs(INT_MIN) undefined.
- if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
+ if (match(RHS, m_Neg(m_Specific(LHS))) &&
+ Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
MaxHighZeros = 1;
}
@@ -1366,7 +1373,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
else if (LR == I)
L = LL;
else
- break;
+ continue; // Check for recurrence with L and R flipped.
// Ok, we have a PHI of the form L op= R. Check for low
// zero bits.
computeKnownBits(R, Known2, Depth + 1, Q);
@@ -1714,9 +1721,9 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
// Aligned pointers have trailing zeros - refine Known.Zero set
if (V->getType()->isPointerTy()) {
- unsigned Align = V->getPointerAlignment(Q.DL);
+ const MaybeAlign Align = V->getPointerAlignment(Q.DL);
if (Align)
- Known.Zero.setLowBits(countTrailingZeros(Align));
+ Known.Zero.setLowBits(countTrailingZeros(Align->value()));
}
// computeKnownBitsFromAssume strictly refines Known.
@@ -2066,7 +2073,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
if (const auto *Call = dyn_cast<CallBase>(V)) {
if (Call->isReturnNonNull())
return true;
- if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
+ if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
return isKnownNonZero(RP, Depth, Q);
}
}
@@ -2300,7 +2307,7 @@ static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
cast<Operator>(Select)->getOpcode() == Instruction::Select &&
"Input should be a Select!");
- const Value *LHS, *RHS, *LHS2, *RHS2;
+ const Value *LHS = nullptr, *RHS = nullptr;
SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
if (SPF != SPF_SMAX && SPF != SPF_SMIN)
return false;
@@ -2308,6 +2315,7 @@ static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
if (!match(RHS, m_APInt(CLow)))
return false;
+ const Value *LHS2 = nullptr, *RHS2 = nullptr;
SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
if (getInverseMinMaxFlavor(SPF) != SPF2)
return false;
@@ -2384,253 +2392,256 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
if (Depth == MaxDepth)
return 1; // Limit search depth.
- const Operator *U = dyn_cast<Operator>(V);
- switch (Operator::getOpcode(V)) {
- default: break;
- case Instruction::SExt:
- Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
- return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
+ if (auto *U = dyn_cast<Operator>(V)) {
+ switch (Operator::getOpcode(V)) {
+ default: break;
+ case Instruction::SExt:
+ Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
+ return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
- case Instruction::SDiv: {
- const APInt *Denominator;
- // sdiv X, C -> adds log(C) sign bits.
- if (match(U->getOperand(1), m_APInt(Denominator))) {
+ case Instruction::SDiv: {
+ const APInt *Denominator;
+ // sdiv X, C -> adds log(C) sign bits.
+ if (match(U->getOperand(1), m_APInt(Denominator))) {
- // Ignore non-positive denominator.
- if (!Denominator->isStrictlyPositive())
- break;
+ // Ignore non-positive denominator.
+ if (!Denominator->isStrictlyPositive())
+ break;
- // Calculate the incoming numerator bits.
- unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ // Calculate the incoming numerator bits.
+ unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- // Add floor(log(C)) bits to the numerator bits.
- return std::min(TyBits, NumBits + Denominator->logBase2());
+ // Add floor(log(C)) bits to the numerator bits.
+ return std::min(TyBits, NumBits + Denominator->logBase2());
+ }
+ break;
}
- break;
- }
-
- case Instruction::SRem: {
- const APInt *Denominator;
- // srem X, C -> we know that the result is within [-C+1,C) when C is a
- // positive constant. This let us put a lower bound on the number of sign
- // bits.
- if (match(U->getOperand(1), m_APInt(Denominator))) {
-
- // Ignore non-positive denominator.
- if (!Denominator->isStrictlyPositive())
- break;
- // Calculate the incoming numerator bits. SRem by a positive constant
- // can't lower the number of sign bits.
- unsigned NumrBits =
- ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ case Instruction::SRem: {
+ const APInt *Denominator;
+ // srem X, C -> we know that the result is within [-C+1,C) when C is a
+ // positive constant. This let us put a lower bound on the number of sign
+ // bits.
+ if (match(U->getOperand(1), m_APInt(Denominator))) {
- // Calculate the leading sign bit constraints by examining the
- // denominator. Given that the denominator is positive, there are two
- // cases:
- //
- // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
- // (1 << ceilLogBase2(C)).
- //
- // 2. the numerator is negative. Then the result range is (-C,0] and
- // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
- //
- // Thus a lower bound on the number of sign bits is `TyBits -
- // ceilLogBase2(C)`.
+ // Ignore non-positive denominator.
+ if (!Denominator->isStrictlyPositive())
+ break;
- unsigned ResBits = TyBits - Denominator->ceilLogBase2();
- return std::max(NumrBits, ResBits);
+ // Calculate the incoming numerator bits. SRem by a positive constant
+ // can't lower the number of sign bits.
+ unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+
+ // Calculate the leading sign bit constraints by examining the
+ // denominator. Given that the denominator is positive, there are two
+ // cases:
+ //
+ // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
+ // (1 << ceilLogBase2(C)).
+ //
+ // 2. the numerator is negative. Then the result range is (-C,0] and
+ // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
+ //
+ // Thus a lower bound on the number of sign bits is `TyBits -
+ // ceilLogBase2(C)`.
+
+ unsigned ResBits = TyBits - Denominator->ceilLogBase2();
+ return std::max(NumrBits, ResBits);
+ }
+ break;
}
- break;
- }
- case Instruction::AShr: {
- Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- // ashr X, C -> adds C sign bits. Vectors too.
- const APInt *ShAmt;
- if (match(U->getOperand(1), m_APInt(ShAmt))) {
- if (ShAmt->uge(TyBits))
- break; // Bad shift.
- unsigned ShAmtLimited = ShAmt->getZExtValue();
- Tmp += ShAmtLimited;
- if (Tmp > TyBits) Tmp = TyBits;
- }
- return Tmp;
- }
- case Instruction::Shl: {
- const APInt *ShAmt;
- if (match(U->getOperand(1), m_APInt(ShAmt))) {
- // shl destroys sign bits.
+ case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- if (ShAmt->uge(TyBits) || // Bad shift.
- ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
- Tmp2 = ShAmt->getZExtValue();
- return Tmp - Tmp2;
+ // ashr X, C -> adds C sign bits. Vectors too.
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
+ if (ShAmt->uge(TyBits))
+ break; // Bad shift.
+ unsigned ShAmtLimited = ShAmt->getZExtValue();
+ Tmp += ShAmtLimited;
+ if (Tmp > TyBits) Tmp = TyBits;
+ }
+ return Tmp;
}
- break;
- }
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor: // NOT is handled here.
- // Logical binary ops preserve the number of sign bits at the worst.
- Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- if (Tmp != 1) {
- Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
- FirstAnswer = std::min(Tmp, Tmp2);
- // We computed what we know about the sign bits as our first
- // answer. Now proceed to the generic code that uses
- // computeKnownBits, and pick whichever answer is better.
+ case Instruction::Shl: {
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
+ // shl destroys sign bits.
+ Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (ShAmt->uge(TyBits) || // Bad shift.
+ ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
+ Tmp2 = ShAmt->getZExtValue();
+ return Tmp - Tmp2;
+ }
+ break;
}
- break;
-
- case Instruction::Select: {
- // If we have a clamp pattern, we know that the number of sign bits will be
- // the minimum of the clamp min/max range.
- const Value *X;
- const APInt *CLow, *CHigh;
- if (isSignedMinMaxClamp(U, X, CLow, CHigh))
- return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
-
- Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
- if (Tmp == 1) break;
- Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
- return std::min(Tmp, Tmp2);
- }
-
- case Instruction::Add:
- // Add can have at most one carry bit. Thus we know that the output
- // is, at worst, one more bit than the inputs.
- Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- if (Tmp == 1) break;
-
- // Special case decrementing a value (ADD X, -1):
- if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
- if (CRHS->isAllOnesValue()) {
- KnownBits Known(TyBits);
- computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
-
- // If the input is known to be 0 or 1, the output is 0/-1, which is all
- // sign bits set.
- if ((Known.Zero | 1).isAllOnesValue())
- return TyBits;
-
- // If we are subtracting one from a positive number, there is no carry
- // out of the result.
- if (Known.isNonNegative())
- return Tmp;
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: // NOT is handled here.
+ // Logical binary ops preserve the number of sign bits at the worst.
+ Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (Tmp != 1) {
+ Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
+ FirstAnswer = std::min(Tmp, Tmp2);
+ // We computed what we know about the sign bits as our first
+ // answer. Now proceed to the generic code that uses
+ // computeKnownBits, and pick whichever answer is better.
}
+ break;
- Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
- if (Tmp2 == 1) break;
- return std::min(Tmp, Tmp2)-1;
+ case Instruction::Select: {
+ // If we have a clamp pattern, we know that the number of sign bits will
+ // be the minimum of the clamp min/max range.
+ const Value *X;
+ const APInt *CLow, *CHigh;
+ if (isSignedMinMaxClamp(U, X, CLow, CHigh))
+ return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
+
+ Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
+ if (Tmp == 1) break;
+ Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
+ return std::min(Tmp, Tmp2);
+ }
- case Instruction::Sub:
- Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
- if (Tmp2 == 1) break;
-
- // Handle NEG.
- if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
- if (CLHS->isNullValue()) {
- KnownBits Known(TyBits);
- computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
- // If the input is known to be 0 or 1, the output is 0/-1, which is all
- // sign bits set.
- if ((Known.Zero | 1).isAllOnesValue())
- return TyBits;
-
- // If the input is known to be positive (the sign bit is known clear),
- // the output of the NEG has the same number of sign bits as the input.
- if (Known.isNonNegative())
- return Tmp2;
-
- // Otherwise, we treat this like a SUB.
- }
+ case Instruction::Add:
+ // Add can have at most one carry bit. Thus we know that the output
+ // is, at worst, one more bit than the inputs.
+ Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (Tmp == 1) break;
+
+ // Special case decrementing a value (ADD X, -1):
+ if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
+ if (CRHS->isAllOnesValue()) {
+ KnownBits Known(TyBits);
+ computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
+
+ // If the input is known to be 0 or 1, the output is 0/-1, which is
+ // all sign bits set.
+ if ((Known.Zero | 1).isAllOnesValue())
+ return TyBits;
+
+ // If we are subtracting one from a positive number, there is no carry
+ // out of the result.
+ if (Known.isNonNegative())
+ return Tmp;
+ }
- // Sub can have at most one carry bit. Thus we know that the output
- // is, at worst, one more bit than the inputs.
- Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- if (Tmp == 1) break;
- return std::min(Tmp, Tmp2)-1;
+ Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
+ if (Tmp2 == 1) break;
+ return std::min(Tmp, Tmp2) - 1;
- case Instruction::Mul: {
- // The output of the Mul can be at most twice the valid bits in the inputs.
- unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
- if (SignBitsOp0 == 1) break;
- unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
- if (SignBitsOp1 == 1) break;
- unsigned OutValidBits =
- (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
- return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
- }
+ case Instruction::Sub:
+ Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
+ if (Tmp2 == 1) break;
+
+ // Handle NEG.
+ if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
+ if (CLHS->isNullValue()) {
+ KnownBits Known(TyBits);
+ computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
+ // If the input is known to be 0 or 1, the output is 0/-1, which is
+ // all sign bits set.
+ if ((Known.Zero | 1).isAllOnesValue())
+ return TyBits;
+
+ // If the input is known to be positive (the sign bit is known clear),
+ // the output of the NEG has the same number of sign bits as the
+ // input.
+ if (Known.isNonNegative())
+ return Tmp2;
+
+ // Otherwise, we treat this like a SUB.
+ }
- case Instruction::PHI: {
- const PHINode *PN = cast<PHINode>(U);
- unsigned NumIncomingValues = PN->getNumIncomingValues();
- // Don't analyze large in-degree PHIs.
- if (NumIncomingValues > 4) break;
- // Unreachable blocks may have zero-operand PHI nodes.
- if (NumIncomingValues == 0) break;
-
- // Take the minimum of all incoming values. This can't infinitely loop
- // because of our depth threshold.
- Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
- for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
- if (Tmp == 1) return Tmp;
- Tmp = std::min(
- Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
+ // Sub can have at most one carry bit. Thus we know that the output
+ // is, at worst, one more bit than the inputs.
+ Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (Tmp == 1) break;
+ return std::min(Tmp, Tmp2) - 1;
+
+ case Instruction::Mul: {
+ // The output of the Mul can be at most twice the valid bits in the
+ // inputs.
+ unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+ if (SignBitsOp0 == 1) break;
+ unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
+ if (SignBitsOp1 == 1) break;
+ unsigned OutValidBits =
+ (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
+ return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
}
- return Tmp;
- }
- case Instruction::Trunc:
- // FIXME: it's tricky to do anything useful for this, but it is an important
- // case for targets like X86.
- break;
+ case Instruction::PHI: {
+ const PHINode *PN = cast<PHINode>(U);
+ unsigned NumIncomingValues = PN->getNumIncomingValues();
+ // Don't analyze large in-degree PHIs.
+ if (NumIncomingValues > 4) break;
+ // Unreachable blocks may have zero-operand PHI nodes.
+ if (NumIncomingValues == 0) break;
+
+ // Take the minimum of all incoming values. This can't infinitely loop
+ // because of our depth threshold.
+ Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
+ for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
+ if (Tmp == 1) return Tmp;
+ Tmp = std::min(
+ Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
+ }
+ return Tmp;
+ }
- case Instruction::ExtractElement:
- // Look through extract element. At the moment we keep this simple and skip
- // tracking the specific element. But at least we might find information
- // valid for all elements of the vector (for example if vector is sign
- // extended, shifted, etc).
- return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
-
- case Instruction::ShuffleVector: {
- // TODO: This is copied almost directly from the SelectionDAG version of
- // ComputeNumSignBits. It would be better if we could share common
- // code. If not, make sure that changes are translated to the DAG.
-
- // Collect the minimum number of sign bits that are shared by every vector
- // element referenced by the shuffle.
- auto *Shuf = cast<ShuffleVectorInst>(U);
- int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
- int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
- APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
- for (int i = 0; i != NumMaskElts; ++i) {
- int M = Shuf->getMaskValue(i);
- assert(M < NumElts * 2 && "Invalid shuffle mask constant");
- // For undef elements, we don't know anything about the common state of
- // the shuffle result.
- if (M == -1)
- return 1;
- if (M < NumElts)
- DemandedLHS.setBit(M % NumElts);
- else
- DemandedRHS.setBit(M % NumElts);
+ case Instruction::Trunc:
+ // FIXME: it's tricky to do anything useful for this, but it is an
+ // important case for targets like X86.
+ break;
+
+ case Instruction::ExtractElement:
+ // Look through extract element. At the moment we keep this simple and
+ // skip tracking the specific element. But at least we might find
+ // information valid for all elements of the vector (for example if vector
+ // is sign extended, shifted, etc).
+ return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+
+ case Instruction::ShuffleVector: {
+ // TODO: This is copied almost directly from the SelectionDAG version of
+ // ComputeNumSignBits. It would be better if we could share common
+ // code. If not, make sure that changes are translated to the DAG.
+
+ // Collect the minimum number of sign bits that are shared by every vector
+ // element referenced by the shuffle.
+ auto *Shuf = cast<ShuffleVectorInst>(U);
+ int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
+ int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
+ APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
+ for (int i = 0; i != NumMaskElts; ++i) {
+ int M = Shuf->getMaskValue(i);
+ assert(M < NumElts * 2 && "Invalid shuffle mask constant");
+ // For undef elements, we don't know anything about the common state of
+ // the shuffle result.
+ if (M == -1)
+ return 1;
+ if (M < NumElts)
+ DemandedLHS.setBit(M % NumElts);
+ else
+ DemandedRHS.setBit(M % NumElts);
+ }
+ Tmp = std::numeric_limits<unsigned>::max();
+ if (!!DemandedLHS)
+ Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
+ if (!!DemandedRHS) {
+ Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
+ Tmp = std::min(Tmp, Tmp2);
+ }
+ // If we don't know anything, early out and try computeKnownBits
+ // fall-back.
+ if (Tmp == 1)
+ break;
+ assert(Tmp <= V->getType()->getScalarSizeInBits() &&
+ "Failed to determine minimum sign bits");
+ return Tmp;
}
- Tmp = std::numeric_limits<unsigned>::max();
- if (!!DemandedLHS)
- Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
- if (!!DemandedRHS) {
- Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
- Tmp = std::min(Tmp, Tmp2);
}
- // If we don't know anything, early out and try computeKnownBits fall-back.
- if (Tmp == 1)
- break;
- assert(Tmp <= V->getType()->getScalarSizeInBits() &&
- "Failed to determine minimum sign bits");
- return Tmp;
- }
}
// Finally, if we can prove that the top bits of the result are 0's or 1's,
@@ -2655,8 +2666,6 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
/// through SExt instructions only if LookThroughSExt is true.
bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
bool LookThroughSExt, unsigned Depth) {
- const unsigned MaxDepth = 6;
-
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
@@ -3651,23 +3660,28 @@ uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
return Len == ~0ULL ? 1 : Len;
}
-const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
+const Value *
+llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
+ bool MustPreserveNullness) {
assert(Call &&
"getArgumentAliasingToReturnedPointer only works on nonnull calls");
if (const Value *RV = Call->getReturnedArgOperand())
return RV;
// This can be used only as a aliasing property.
- if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
+ if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
+ Call, MustPreserveNullness))
return Call->getArgOperand(0);
return nullptr;
}
bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
- const CallBase *Call) {
+ const CallBase *Call, bool MustPreserveNullness) {
return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
- Call->getIntrinsicID() == Intrinsic::aarch64_tagp;
+ Call->getIntrinsicID() == Intrinsic::aarch64_tagp ||
+ (!MustPreserveNullness &&
+ Call->getIntrinsicID() == Intrinsic::ptrmask);
}
/// \p PN defines a loop-variant pointer to an object. Check if the
@@ -3725,7 +3739,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
// because it should be in sync with CaptureTracking. Not using it may
// cause weird miscompilations where 2 aliasing pointers are assumed to
// noalias.
- if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
+ if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
V = RP;
continue;
}
@@ -3865,6 +3879,18 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
return true;
}
+bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
+ if (!LI.isUnordered())
+ return true;
+ const Function &F = *LI.getFunction();
+ // Speculative load may create a race that did not exist in the source.
+ return F.hasFnAttribute(Attribute::SanitizeThread) ||
+ // Speculative load may load data from dirty regions.
+ F.hasFnAttribute(Attribute::SanitizeAddress) ||
+ F.hasFnAttribute(Attribute::SanitizeHWAddress);
+}
+
+
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const Instruction *CtxI,
const DominatorTree *DT) {
@@ -3909,17 +3935,12 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(Inst);
- if (!LI->isUnordered() ||
- // Speculative load may create a race that did not exist in the source.
- LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
- // Speculative load may load data from dirty regions.
- LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
- LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
+ if (mustSuppressSpeculation(*LI))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
- return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
- LI->getType(), LI->getAlignment(),
- DL, CtxI, DT);
+ return isDereferenceableAndAlignedPointer(
+ LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
+ DL, CtxI, DT);
}
case Instruction::Call: {
auto *CI = cast<const CallInst>(Inst);
@@ -4221,22 +4242,9 @@ OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
}
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
- // A memory operation returns normally if it isn't volatile. A volatile
- // operation is allowed to trap.
- //
- // An atomic operation isn't guaranteed to return in a reasonable amount of
- // time because it's possible for another thread to interfere with it for an
+ // Note: An atomic operation isn't guaranteed to return in a reasonable amount
+ // of time because it's possible for another thread to interfere with it for an
// arbitrary length of time, but programs aren't allowed to rely on that.
- if (const LoadInst *LI = dyn_cast<LoadInst>(I))
- return !LI->isVolatile();
- if (const StoreInst *SI = dyn_cast<StoreInst>(I))
- return !SI->isVolatile();
- if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
- return !CXI->isVolatile();
- if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
- return !RMWI->isVolatile();
- if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
- return !MII->isVolatile();
// If there is no successor, then execution can't transfer to it.
if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
@@ -4277,10 +4285,7 @@ bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
// FIXME: This isn't aggressive enough; a call which only writes to a global
// is guaranteed to return.
- return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
- match(I, m_Intrinsic<Intrinsic::assume>()) ||
- match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
- match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
+ return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory();
}
// Other instructions return normally.
@@ -4572,12 +4577,12 @@ static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
// TODO: Allow FP min/max with nnan/nsz.
assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
- Value *A, *B;
+ Value *A = nullptr, *B = nullptr;
SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
if (!SelectPatternResult::isMinOrMax(L.Flavor))
return {SPF_UNKNOWN, SPNB_NA, false};
- Value *C, *D;
+ Value *C = nullptr, *D = nullptr;
SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
if (L.Flavor != R.Flavor)
return {SPF_UNKNOWN, SPNB_NA, false};
@@ -5627,8 +5632,8 @@ static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
}
static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
- APInt &Upper) {
- const Value *LHS, *RHS;
+ APInt &Upper, const InstrInfoQuery &IIQ) {
+ const Value *LHS = nullptr, *RHS = nullptr;
SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
if (R.Flavor == SPF_UNKNOWN)
return;
@@ -5640,7 +5645,8 @@ static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
// then the result of abs(X) is [0..SIGNED_MAX],
// otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
Lower = APInt::getNullValue(BitWidth);
- if (cast<Instruction>(RHS)->hasNoSignedWrap())
+ if (match(RHS, m_Neg(m_Specific(LHS))) &&
+ IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
Upper = APInt::getSignedMaxValue(BitWidth) + 1;
else
Upper = APInt::getSignedMinValue(BitWidth) + 1;
@@ -5694,7 +5700,7 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
else if (auto *II = dyn_cast<IntrinsicInst>(V))
setLimitsForIntrinsic(*II, Lower, Upper);
else if (auto *SI = dyn_cast<SelectInst>(V))
- setLimitsForSelectPattern(*SI, Lower, Upper);
+ setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
@@ -5704,3 +5710,111 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
return CR;
}
+
+static Optional<int64_t>
+getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
+ // Skip over the first indices.
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (unsigned i = 1; i != Idx; ++i, ++GTI)
+ /*skip along*/;
+
+ // Compute the offset implied by the rest of the indices.
+ int64_t Offset = 0;
+ for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
+ if (!OpC)
+ return None;
+ if (OpC->isZero())
+ continue; // No offset.
+
+ // Handle struct indices, which add their field offset to the pointer.
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+ continue;
+ }
+
+ // Otherwise, we have a sequential type like an array or vector. Multiply
+ // the index by the ElementSize.
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
+ Offset += Size * OpC->getSExtValue();
+ }
+
+ return Offset;
+}
+
+Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
+ const DataLayout &DL) {
+ Ptr1 = Ptr1->stripPointerCasts();
+ Ptr2 = Ptr2->stripPointerCasts();
+
+ // Handle the trivial case first.
+ if (Ptr1 == Ptr2) {
+ return 0;
+ }
+
+ const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
+ const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
+
+ // If one pointer is a GEP see if the GEP is a constant offset from the base,
+ // as in "P" and "gep P, 1".
+ // Also do this iteratively to handle the the following case:
+ // Ptr_t1 = GEP Ptr1, c1
+ // Ptr_t2 = GEP Ptr_t1, c2
+ // Ptr2 = GEP Ptr_t2, c3
+ // where we will return c1+c2+c3.
+ // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
+ // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
+ // are the same, and return the difference between offsets.
+ auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
+ const Value *Ptr) -> Optional<int64_t> {
+ const GEPOperator *GEP_T = GEP;
+ int64_t OffsetVal = 0;
+ bool HasSameBase = false;
+ while (GEP_T) {
+ auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
+ if (!Offset)
+ return None;
+ OffsetVal += *Offset;
+ auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
+ if (Op0 == Ptr) {
+ HasSameBase = true;
+ break;
+ }
+ GEP_T = dyn_cast<GEPOperator>(Op0);
+ }
+ if (!HasSameBase)
+ return None;
+ return OffsetVal;
+ };
+
+ if (GEP1) {
+ auto Offset = getOffsetFromBase(GEP1, Ptr2);
+ if (Offset)
+ return -*Offset;
+ }
+ if (GEP2) {
+ auto Offset = getOffsetFromBase(GEP2, Ptr1);
+ if (Offset)
+ return Offset;
+ }
+
+ // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
+ // base. After that base, they may have some number of common (and
+ // potentially variable) indices. After that they handle some constant
+ // offset, which determines their offset from each other. At this point, we
+ // handle no other case.
+ if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
+ return None;
+
+ // Skip any common indices and track the GEP types.
+ unsigned Idx = 1;
+ for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
+ if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
+ break;
+
+ auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
+ auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
+ if (!Offset1 || !Offset2)
+ return None;
+ return *Offset2 - *Offset1;
+}