aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis/ValueTracking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis/ValueTracking.cpp')
-rw-r--r--lib/Analysis/ValueTracking.cpp1204
1 files changed, 739 insertions, 465 deletions
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 0446426c0e66..c70906dcc629 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -1,9 +1,8 @@
//===- ValueTracking.cpp - Walk computations to compute properties --------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -39,7 +38,6 @@
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
@@ -617,237 +615,242 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Depth == MaxDepth)
continue;
+ ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
+ if (!Cmp)
+ continue;
+
Value *A, *B;
- auto m_V = m_CombineOr(m_Specific(V),
- m_CombineOr(m_PtrToInt(m_Specific(V)),
- m_BitCast(m_Specific(V))));
+ auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
CmpInst::Predicate Pred;
uint64_t C;
- // assume(v = a)
- if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- Known.Zero |= RHSKnown.Zero;
- Known.One |= RHSKnown.One;
- // assume(v & b = a)
- } else if (match(Arg,
- m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits MaskKnown(BitWidth);
- computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
-
- // For those bits in the mask that are known to be one, we can propagate
- // known bits from the RHS to V.
- Known.Zero |= RHSKnown.Zero & MaskKnown.One;
- Known.One |= RHSKnown.One & MaskKnown.One;
- // assume(~(v & b) = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits MaskKnown(BitWidth);
- computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
-
- // For those bits in the mask that are known to be one, we can propagate
- // inverted known bits from the RHS to V.
- Known.Zero |= RHSKnown.One & MaskKnown.One;
- Known.One |= RHSKnown.Zero & MaskKnown.One;
- // assume(v | b = a)
- } else if (match(Arg,
- m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits BKnown(BitWidth);
- computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
-
- // For those bits in B that are known to be zero, we can propagate known
- // bits from the RHS to V.
- Known.Zero |= RHSKnown.Zero & BKnown.Zero;
- Known.One |= RHSKnown.One & BKnown.Zero;
- // assume(~(v | b) = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits BKnown(BitWidth);
- computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
-
- // For those bits in B that are known to be zero, we can propagate
- // inverted known bits from the RHS to V.
- Known.Zero |= RHSKnown.One & BKnown.Zero;
- Known.One |= RHSKnown.Zero & BKnown.Zero;
- // assume(v ^ b = a)
- } else if (match(Arg,
- m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits BKnown(BitWidth);
- computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
-
- // For those bits in B that are known to be zero, we can propagate known
- // bits from the RHS to V. For those bits in B that are known to be one,
- // we can propagate inverted known bits from the RHS to V.
- Known.Zero |= RHSKnown.Zero & BKnown.Zero;
- Known.One |= RHSKnown.One & BKnown.Zero;
- Known.Zero |= RHSKnown.One & BKnown.One;
- Known.One |= RHSKnown.Zero & BKnown.One;
- // assume(~(v ^ b) = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- KnownBits BKnown(BitWidth);
- computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
-
- // For those bits in B that are known to be zero, we can propagate
- // inverted known bits from the RHS to V. For those bits in B that are
- // known to be one, we can propagate known bits from the RHS to V.
- Known.Zero |= RHSKnown.One & BKnown.Zero;
- Known.One |= RHSKnown.Zero & BKnown.Zero;
- Known.Zero |= RHSKnown.Zero & BKnown.One;
- Known.One |= RHSKnown.One & BKnown.One;
- // assume(v << c = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
- C < BitWidth) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- // For those bits in RHS that are known, we can propagate them to known
- // bits in V shifted to the right by C.
- RHSKnown.Zero.lshrInPlace(C);
- Known.Zero |= RHSKnown.Zero;
- RHSKnown.One.lshrInPlace(C);
- Known.One |= RHSKnown.One;
- // assume(~(v << c) = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
- C < BitWidth) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- // For those bits in RHS that are known, we can propagate them inverted
- // to known bits in V shifted to the right by C.
- RHSKnown.One.lshrInPlace(C);
- Known.Zero |= RHSKnown.One;
- RHSKnown.Zero.lshrInPlace(C);
- Known.One |= RHSKnown.Zero;
- // assume(v >> c = a)
- } else if (match(Arg,
- m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
- C < BitWidth) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- // For those bits in RHS that are known, we can propagate them to known
- // bits in V shifted to the right by C.
- Known.Zero |= RHSKnown.Zero << C;
- Known.One |= RHSKnown.One << C;
- // assume(~(v >> c) = a)
- } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
- m_Value(A))) &&
- Pred == ICmpInst::ICMP_EQ &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
- C < BitWidth) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- // For those bits in RHS that are known, we can propagate them inverted
- // to known bits in V shifted to the right by C.
- Known.Zero |= RHSKnown.One << C;
- Known.One |= RHSKnown.Zero << C;
- // assume(v >=_s c) where c is non-negative
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_SGE &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- if (RHSKnown.isNonNegative()) {
- // We know that the sign bit is zero.
- Known.makeNonNegative();
+ switch (Cmp->getPredicate()) {
+ default:
+ break;
+ case ICmpInst::ICMP_EQ:
+ // assume(v = a)
+ if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ Known.Zero |= RHSKnown.Zero;
+ Known.One |= RHSKnown.One;
+ // assume(v & b = a)
+ } else if (match(Cmp,
+ m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits MaskKnown(BitWidth);
+ computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
+
+ // For those bits in the mask that are known to be one, we can propagate
+ // known bits from the RHS to V.
+ Known.Zero |= RHSKnown.Zero & MaskKnown.One;
+ Known.One |= RHSKnown.One & MaskKnown.One;
+ // assume(~(v & b) = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits MaskKnown(BitWidth);
+ computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
+
+ // For those bits in the mask that are known to be one, we can propagate
+ // inverted known bits from the RHS to V.
+ Known.Zero |= RHSKnown.One & MaskKnown.One;
+ Known.One |= RHSKnown.Zero & MaskKnown.One;
+ // assume(v | b = a)
+ } else if (match(Cmp,
+ m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits BKnown(BitWidth);
+ computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
+
+ // For those bits in B that are known to be zero, we can propagate known
+ // bits from the RHS to V.
+ Known.Zero |= RHSKnown.Zero & BKnown.Zero;
+ Known.One |= RHSKnown.One & BKnown.Zero;
+ // assume(~(v | b) = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits BKnown(BitWidth);
+ computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
+
+ // For those bits in B that are known to be zero, we can propagate
+ // inverted known bits from the RHS to V.
+ Known.Zero |= RHSKnown.One & BKnown.Zero;
+ Known.One |= RHSKnown.Zero & BKnown.Zero;
+ // assume(v ^ b = a)
+ } else if (match(Cmp,
+ m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits BKnown(BitWidth);
+ computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
+
+ // For those bits in B that are known to be zero, we can propagate known
+ // bits from the RHS to V. For those bits in B that are known to be one,
+ // we can propagate inverted known bits from the RHS to V.
+ Known.Zero |= RHSKnown.Zero & BKnown.Zero;
+ Known.One |= RHSKnown.One & BKnown.Zero;
+ Known.Zero |= RHSKnown.One & BKnown.One;
+ Known.One |= RHSKnown.Zero & BKnown.One;
+ // assume(~(v ^ b) = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ KnownBits BKnown(BitWidth);
+ computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
+
+ // For those bits in B that are known to be zero, we can propagate
+ // inverted known bits from the RHS to V. For those bits in B that are
+ // known to be one, we can propagate known bits from the RHS to V.
+ Known.Zero |= RHSKnown.One & BKnown.Zero;
+ Known.One |= RHSKnown.Zero & BKnown.Zero;
+ Known.Zero |= RHSKnown.Zero & BKnown.One;
+ Known.One |= RHSKnown.One & BKnown.One;
+ // assume(v << c = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ // For those bits in RHS that are known, we can propagate them to known
+ // bits in V shifted to the right by C.
+ RHSKnown.Zero.lshrInPlace(C);
+ Known.Zero |= RHSKnown.Zero;
+ RHSKnown.One.lshrInPlace(C);
+ Known.One |= RHSKnown.One;
+ // assume(~(v << c) = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ // For those bits in RHS that are known, we can propagate them inverted
+ // to known bits in V shifted to the right by C.
+ RHSKnown.One.lshrInPlace(C);
+ Known.Zero |= RHSKnown.One;
+ RHSKnown.Zero.lshrInPlace(C);
+ Known.One |= RHSKnown.Zero;
+ // assume(v >> c = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ // For those bits in RHS that are known, we can propagate them to known
+ // bits in V shifted to the right by C.
+ Known.Zero |= RHSKnown.Zero << C;
+ Known.One |= RHSKnown.One << C;
+ // assume(~(v >> c) = a)
+ } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
+ m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+ // For those bits in RHS that are known, we can propagate them inverted
+ // to known bits in V shifted to the right by C.
+ Known.Zero |= RHSKnown.One << C;
+ Known.One |= RHSKnown.Zero << C;
}
- // assume(v >_s c) where c is at least -1.
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_SGT &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
- // We know that the sign bit is zero.
- Known.makeNonNegative();
+ break;
+ case ICmpInst::ICMP_SGE:
+ // assume(v >=_s c) where c is non-negative
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
+
+ if (RHSKnown.isNonNegative()) {
+ // We know that the sign bit is zero.
+ Known.makeNonNegative();
+ }
}
- // assume(v <=_s c) where c is negative
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_SLE &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- if (RHSKnown.isNegative()) {
- // We know that the sign bit is one.
- Known.makeNegative();
+ break;
+ case ICmpInst::ICMP_SGT:
+ // assume(v >_s c) where c is at least -1.
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
+
+ if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
+ // We know that the sign bit is zero.
+ Known.makeNonNegative();
+ }
}
- // assume(v <_s c) where c is non-positive
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_SLT &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- if (RHSKnown.isZero() || RHSKnown.isNegative()) {
- // We know that the sign bit is one.
- Known.makeNegative();
+ break;
+ case ICmpInst::ICMP_SLE:
+ // assume(v <=_s c) where c is negative
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
+
+ if (RHSKnown.isNegative()) {
+ // We know that the sign bit is one.
+ Known.makeNegative();
+ }
}
- // assume(v <=_u c)
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_ULE &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- // Whatever high bits in c are zero are known to be zero.
- Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
- // assume(v <_u c)
- } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
- Pred == ICmpInst::ICMP_ULT &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
-
- // If the RHS is known zero, then this assumption must be wrong (nothing
- // is unsigned less than zero). Signal a conflict and get out of here.
- if (RHSKnown.isZero()) {
- Known.Zero.setAllBits();
- Known.One.setAllBits();
- break;
+ break;
+ case ICmpInst::ICMP_SLT:
+ // assume(v <_s c) where c is non-positive
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+
+ if (RHSKnown.isZero() || RHSKnown.isNegative()) {
+ // We know that the sign bit is one.
+ Known.makeNegative();
+ }
}
-
- // Whatever high bits in c are zero are known to be zero (if c is a power
- // of 2, then one more).
- if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
- Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
- else
+ break;
+ case ICmpInst::ICMP_ULE:
+ // assume(v <=_u c)
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+
+ // Whatever high bits in c are zero are known to be zero.
Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
+ }
+ break;
+ case ICmpInst::ICMP_ULT:
+ // assume(v <_u c)
+ if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
+ isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
+
+ // If the RHS is known zero, then this assumption must be wrong (nothing
+ // is unsigned less than zero). Signal a conflict and get out of here.
+ if (RHSKnown.isZero()) {
+ Known.Zero.setAllBits();
+ Known.One.setAllBits();
+ break;
+ }
+
+ // Whatever high bits in c are zero are known to be zero (if c is a power
+ // of 2, then one more).
+ if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
+ Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
+ else
+ Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
+ }
+ break;
}
}
@@ -1129,12 +1132,9 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
Q.DL.getTypeSizeInBits(ScalarTy);
assert(SrcBitWidth && "SrcBitWidth can't be zero");
- Known = Known.zextOrTrunc(SrcBitWidth);
+ Known = Known.zextOrTrunc(SrcBitWidth, false);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
- Known = Known.zextOrTrunc(BitWidth);
- // Any top bits are known to be zero.
- if (BitWidth > SrcBitWidth)
- Known.Zero.setBitsFrom(SrcBitWidth);
+ Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
break;
}
case Instruction::BitCast: {
@@ -1527,6 +1527,37 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
break;
}
+ case Intrinsic::uadd_sat:
+ case Intrinsic::usub_sat: {
+ bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
+ computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+ computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
+
+ // Add: Leading ones of either operand are preserved.
+ // Sub: Leading zeros of LHS and leading ones of RHS are preserved
+ // as leading zeros in the result.
+ unsigned LeadingKnown;
+ if (IsAdd)
+ LeadingKnown = std::max(Known.countMinLeadingOnes(),
+ Known2.countMinLeadingOnes());
+ else
+ LeadingKnown = std::max(Known.countMinLeadingZeros(),
+ Known2.countMinLeadingOnes());
+
+ Known = KnownBits::computeForAddSub(
+ IsAdd, /* NSW */ false, Known, Known2);
+
+ // We select between the operation result and all-ones/zero
+ // respectively, so we can preserve known ones/zeros.
+ if (IsAdd) {
+ Known.One.setHighBits(LeadingKnown);
+ Known.Zero.clearAllBits();
+ } else {
+ Known.Zero.setHighBits(LeadingKnown);
+ Known.One.clearAllBits();
+ }
+ break;
+ }
case Intrinsic::x86_sse42_crc32_64_64:
Known.Zero.setBitsFrom(32);
break;
@@ -1967,6 +1998,15 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// Must be non-zero due to null test above.
return true;
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ // See the comment for IntToPtr/PtrToInt instructions below.
+ if (CE->getOpcode() == Instruction::IntToPtr ||
+ CE->getOpcode() == Instruction::PtrToInt)
+ if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
+ Q.DL.getTypeSizeInBits(CE->getType()))
+ return isKnownNonZero(CE->getOperand(0), Depth, Q);
+ }
+
// For constant vectors, check that all elements are undefined or known
// non-zero to determine that the whole vector is known non-zero.
if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
@@ -2037,11 +2077,33 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
return true;
+ // Look through bitcast operations, GEPs, and int2ptr instructions as they
+ // do not alter the value, or at least not the nullness property of the
+ // value, e.g., int2ptr is allowed to zero/sign extend the value.
+ //
+ // Note that we have to take special care to avoid looking through
+ // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
+ // as casts that can alter the value, e.g., AddrSpaceCasts.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
if (isGEPKnownNonNull(GEP, Depth, Q))
return true;
+
+ if (auto *BCO = dyn_cast<BitCastOperator>(V))
+ return isKnownNonZero(BCO->getOperand(0), Depth, Q);
+
+ if (auto *I2P = dyn_cast<IntToPtrInst>(V))
+ if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
+ Q.DL.getTypeSizeInBits(I2P->getDestTy()))
+ return isKnownNonZero(I2P->getOperand(0), Depth, Q);
}
+ // Similar to int2ptr above, we can look through ptr2int here if the cast
+ // is a no-op or an extend and not a truncate.
+ if (auto *P2I = dyn_cast<PtrToIntInst>(V))
+ if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
+ Q.DL.getTypeSizeInBits(P2I->getDestTy()))
+ return isKnownNonZero(P2I->getOperand(0), Depth, Q);
+
unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
// X | Y != 0 if X != 0 or Y != 0.
@@ -3082,6 +3144,11 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
case Intrinsic::sqrt:
return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
+ case Intrinsic::minnum:
+ case Intrinsic::maxnum:
+ // If either operand is not NaN, the result is not NaN.
+ return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
+ isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
default:
return false;
}
@@ -3107,7 +3174,7 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
return true;
}
-Value *llvm::isBytewiseValue(Value *V) {
+Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
// All byte-wide stores are splatable, even of arbitrary variables.
if (V->getType()->isIntegerTy(8))
@@ -3120,6 +3187,10 @@ Value *llvm::isBytewiseValue(Value *V) {
if (isa<UndefValue>(V))
return UndefInt8;
+ const uint64_t Size = DL.getTypeStoreSize(V->getType());
+ if (!Size)
+ return UndefInt8;
+
Constant *C = dyn_cast<Constant>(V);
if (!C) {
// Conceptually, we could handle things like:
@@ -3146,7 +3217,8 @@ Value *llvm::isBytewiseValue(Value *V) {
else if (CFP->getType()->isDoubleTy())
Ty = Type::getInt64Ty(Ctx);
// Don't handle long double formats, which have strange constraints.
- return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr;
+ return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
+ : nullptr;
}
// We can handle constant integers that are multiple of 8 bits.
@@ -3159,6 +3231,17 @@ Value *llvm::isBytewiseValue(Value *V) {
}
}
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ if (CE->getOpcode() == Instruction::IntToPtr) {
+ auto PS = DL.getPointerSizeInBits(
+ cast<PointerType>(CE->getType())->getAddressSpace());
+ return isBytewiseValue(
+ ConstantExpr::getIntegerCast(CE->getOperand(0),
+ Type::getIntNTy(Ctx, PS), false),
+ DL);
+ }
+ }
+
auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
if (LHS == RHS)
return LHS;
@@ -3174,20 +3257,15 @@ Value *llvm::isBytewiseValue(Value *V) {
if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
Value *Val = UndefInt8;
for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
- if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I)))))
+ if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
return nullptr;
return Val;
}
- if (isa<ConstantVector>(C)) {
- Constant *Splat = cast<ConstantVector>(C)->getSplatValue();
- return Splat ? isBytewiseValue(Splat) : nullptr;
- }
-
- if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
+ if (isa<ConstantAggregate>(C)) {
Value *Val = UndefInt8;
for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
- if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I)))))
+ if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
return nullptr;
return Val;
}
@@ -3363,57 +3441,6 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
return nullptr;
}
-/// Analyze the specified pointer to see if it can be expressed as a base
-/// pointer plus a constant offset. Return the base and offset to the caller.
-Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const DataLayout &DL) {
- unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
- APInt ByteOffset(BitWidth, 0);
-
- // We walk up the defs but use a visited set to handle unreachable code. In
- // that case, we stop after accumulating the cycle once (not that it
- // matters).
- SmallPtrSet<Value *, 16> Visited;
- while (Visited.insert(Ptr).second) {
- if (Ptr->getType()->isVectorTy())
- break;
-
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
- // If one of the values we have visited is an addrspacecast, then
- // the pointer type of this GEP may be different from the type
- // of the Ptr parameter which was passed to this function. This
- // means when we construct GEPOffset, we need to use the size
- // of GEP's pointer type rather than the size of the original
- // pointer type.
- APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
- if (!GEP->accumulateConstantOffset(DL, GEPOffset))
- break;
-
- APInt OrigByteOffset(ByteOffset);
- ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth());
- if (ByteOffset.getMinSignedBits() > 64) {
- // Stop traversal if the pointer offset wouldn't fit into int64_t
- // (this should be removed if Offset is updated to an APInt)
- ByteOffset = OrigByteOffset;
- break;
- }
-
- Ptr = GEP->getPointerOperand();
- } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
- Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
- Ptr = cast<Operator>(Ptr)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
- if (GA->isInterposable())
- break;
- Ptr = GA->getAliasee();
- } else {
- break;
- }
- }
- Offset = ByteOffset.getSExtValue();
- return Ptr;
-}
-
bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
unsigned CharSize) {
// Make sure the GEP has exactly three arguments.
@@ -3638,7 +3665,9 @@ const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
const CallBase *Call) {
return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
- Call->getIntrinsicID() == Intrinsic::strip_invariant_group;
+ Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
+ Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
+ Call->getIntrinsicID() == Intrinsic::aarch64_tagp;
}
/// \p PN defines a loop-variant pointer to an object. Check if the
@@ -3717,26 +3746,27 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
return V;
}
-void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
+void llvm::GetUnderlyingObjects(const Value *V,
+ SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI,
unsigned MaxLookup) {
- SmallPtrSet<Value *, 4> Visited;
- SmallVector<Value *, 4> Worklist;
+ SmallPtrSet<const Value *, 4> Visited;
+ SmallVector<const Value *, 4> Worklist;
Worklist.push_back(V);
do {
- Value *P = Worklist.pop_back_val();
+ const Value *P = Worklist.pop_back_val();
P = GetUnderlyingObject(P, DL, MaxLookup);
if (!Visited.insert(P).second)
continue;
- if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
+ if (auto *SI = dyn_cast<SelectInst>(P)) {
Worklist.push_back(SI->getTrueValue());
Worklist.push_back(SI->getFalseValue());
continue;
}
- if (PHINode *PN = dyn_cast<PHINode>(P)) {
+ if (auto *PN = dyn_cast<PHINode>(P)) {
// If this PHI changes the underlying object in every iteration of the
// loop, don't look through it. Consider:
// int **A;
@@ -3797,10 +3827,10 @@ bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
do {
V = Working.pop_back_val();
- SmallVector<Value *, 4> Objs;
- GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
+ SmallVector<const Value *, 4> Objs;
+ GetUnderlyingObjects(V, Objs, DL);
- for (Value *V : Objs) {
+ for (const Value *V : Objs) {
if (!Visited.insert(V).second)
continue;
if (Operator::getOpcode(V) == Instruction::IntToPtr) {
@@ -3888,7 +3918,8 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
- LI->getAlignment(), DL, CtxI, DT);
+ LI->getType(), LI->getAlignment(),
+ DL, CtxI, DT);
}
case Instruction::Call: {
auto *CI = cast<const CallInst>(Inst);
@@ -3901,6 +3932,7 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
case Instruction::VAArg:
case Instruction::Alloca:
case Instruction::Invoke:
+ case Instruction::CallBr:
case Instruction::PHI:
case Instruction::Store:
case Instruction::Ret:
@@ -3926,51 +3958,46 @@ bool llvm::mayBeMemoryDependent(const Instruction &I) {
return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
}
+/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
+static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
+ switch (OR) {
+ case ConstantRange::OverflowResult::MayOverflow:
+ return OverflowResult::MayOverflow;
+ case ConstantRange::OverflowResult::AlwaysOverflowsLow:
+ return OverflowResult::AlwaysOverflowsLow;
+ case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
+ return OverflowResult::AlwaysOverflowsHigh;
+ case ConstantRange::OverflowResult::NeverOverflows:
+ return OverflowResult::NeverOverflows;
+ }
+ llvm_unreachable("Unknown OverflowResult");
+}
+
+/// Combine constant ranges from computeConstantRange() and computeKnownBits().
+static ConstantRange computeConstantRangeIncludingKnownBits(
+ const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
+ OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
+ KnownBits Known = computeKnownBits(
+ V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
+ ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
+ ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
+ ConstantRange::PreferredRangeType RangeType =
+ ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
+ return CR1.intersectWith(CR2, RangeType);
+}
+
OverflowResult llvm::computeOverflowForUnsignedMul(
const Value *LHS, const Value *RHS, const DataLayout &DL,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
- // Multiplying n * m significant bits yields a result of n + m significant
- // bits. If the total number of significant bits does not exceed the
- // result bit width (minus 1), there is no overflow.
- // This means if we have enough leading zero bits in the operands
- // we can guarantee that the result does not overflow.
- // Ref: "Hacker's Delight" by Henry Warren
- unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
- KnownBits LHSKnown(BitWidth);
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
- UseInstrInfo);
- computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
- UseInstrInfo);
- // Note that underestimating the number of zero bits gives a more
- // conservative answer.
- unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
- RHSKnown.countMinLeadingZeros();
- // First handle the easy case: if we have enough zero bits there's
- // definitely no overflow.
- if (ZeroBits >= BitWidth)
- return OverflowResult::NeverOverflows;
-
- // Get the largest possible values for each operand.
- APInt LHSMax = ~LHSKnown.Zero;
- APInt RHSMax = ~RHSKnown.Zero;
-
- // We know the multiply operation doesn't overflow if the maximum values for
- // each operand will not overflow after we multiply them together.
- bool MaxOverflow;
- (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
- if (!MaxOverflow)
- return OverflowResult::NeverOverflows;
-
- // We know it always overflows if multiplying the smallest possible values for
- // the operands also results in overflow.
- bool MinOverflow;
- (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
- if (MinOverflow)
- return OverflowResult::AlwaysOverflows;
-
- return OverflowResult::MayOverflow;
+ KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
+ KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
+ ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
+ ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
+ return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
}
OverflowResult
@@ -4020,69 +4047,13 @@ OverflowResult llvm::computeOverflowForUnsignedAdd(
const Value *LHS, const Value *RHS, const DataLayout &DL,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
- if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
-
- if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
- // The sign bit is set in both cases: this MUST overflow.
- return OverflowResult::AlwaysOverflows;
- }
-
- if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
- // The sign bit is clear in both cases: this CANNOT overflow.
- return OverflowResult::NeverOverflows;
- }
- }
-
- return OverflowResult::MayOverflow;
-}
-
-/// Return true if we can prove that adding the two values of the
-/// knownbits will not overflow.
-/// Otherwise return false.
-static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
- const KnownBits &RHSKnown) {
- // Addition of two 2's complement numbers having opposite signs will never
- // overflow.
- if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
- (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
- return true;
-
- // If either of the values is known to be non-negative, adding them can only
- // overflow if the second is also non-negative, so we can assume that.
- // Two non-negative numbers will only overflow if there is a carry to the
- // sign bit, so we can check if even when the values are as big as possible
- // there is no overflow to the sign bit.
- if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
- APInt MaxLHS = ~LHSKnown.Zero;
- MaxLHS.clearSignBit();
- APInt MaxRHS = ~RHSKnown.Zero;
- MaxRHS.clearSignBit();
- APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
- return Result.isSignBitClear();
- }
-
- // If either of the values is known to be negative, adding them can only
- // overflow if the second is also negative, so we can assume that.
- // Two negative number will only overflow if there is no carry to the sign
- // bit, so we can check if even when the values are as small as possible
- // there is overflow to the sign bit.
- if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
- APInt MinLHS = LHSKnown.One;
- MinLHS.clearSignBit();
- APInt MinRHS = RHSKnown.One;
- MinRHS.clearSignBit();
- APInt Result = std::move(MinLHS) + std::move(MinRHS);
- return Result.isSignBitSet();
- }
-
- // If we reached here it means that we know nothing about the sign bits.
- // In this case we can't know if there will be an overflow, since by
- // changing the sign bits any two values can be made to overflow.
- return false;
+ ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
+ LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
+ ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
+ RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
+ return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
}
static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
@@ -4114,30 +4085,35 @@ static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
return OverflowResult::NeverOverflows;
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
-
- if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
- return OverflowResult::NeverOverflows;
+ ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
+ LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
+ ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
+ RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
+ OverflowResult OR =
+ mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
+ if (OR != OverflowResult::MayOverflow)
+ return OR;
// The remaining code needs Add to be available. Early returns if not so.
if (!Add)
return OverflowResult::MayOverflow;
// If the sign of Add is the same as at least one of the operands, this add
- // CANNOT overflow. This is particularly useful when the sum is
- // @llvm.assume'ed non-negative rather than proved so from analyzing its
- // operands.
+ // CANNOT overflow. If this can be determined from the known bits of the
+ // operands the above signedAddMayOverflow() check will have already done so.
+ // The only other way to improve on the known bits is from an assumption, so
+ // call computeKnownBitsFromAssume() directly.
bool LHSOrRHSKnownNonNegative =
- (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
+ (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
bool LHSOrRHSKnownNegative =
- (LHSKnown.isNegative() || RHSKnown.isNegative());
+ (LHSRange.isAllNegative() || RHSRange.isAllNegative());
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
- KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
+ KnownBits AddKnown(LHSRange.getBitWidth());
+ computeKnownBitsFromAssume(
+ Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
- (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
+ (AddKnown.isNegative() && LHSOrRHSKnownNegative))
return OverflowResult::NeverOverflows;
- }
}
return OverflowResult::MayOverflow;
@@ -4149,20 +4125,11 @@ OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
- if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
-
- // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
- if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
- return OverflowResult::NeverOverflows;
-
- // If the LHS is non-negative and the RHS negative, we always wrap.
- if (LHSKnown.isNonNegative() && RHSKnown.isNegative())
- return OverflowResult::AlwaysOverflows;
- }
-
- return OverflowResult::MayOverflow;
+ ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
+ LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
+ ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
+ RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
+ return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
}
OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
@@ -4177,37 +4144,19 @@ OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
return OverflowResult::NeverOverflows;
- KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT);
-
- KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT);
-
- // Subtraction of two 2's complement numbers having identical signs will
- // never overflow.
- if ((LHSKnown.isNegative() && RHSKnown.isNegative()) ||
- (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()))
- return OverflowResult::NeverOverflows;
-
- // TODO: implement logic similar to checkRippleForAdd
- return OverflowResult::MayOverflow;
+ ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
+ LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
+ ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
+ RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
+ return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
}
-bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
+bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
const DominatorTree &DT) {
-#ifndef NDEBUG
- auto IID = II->getIntrinsicID();
- assert((IID == Intrinsic::sadd_with_overflow ||
- IID == Intrinsic::uadd_with_overflow ||
- IID == Intrinsic::ssub_with_overflow ||
- IID == Intrinsic::usub_with_overflow ||
- IID == Intrinsic::smul_with_overflow ||
- IID == Intrinsic::umul_with_overflow) &&
- "Not an overflow intrinsic!");
-#endif
-
SmallVector<const BranchInst *, 2> GuardingBranches;
SmallVector<const ExtractValueInst *, 2> Results;
- for (const User *U : II->users()) {
+ for (const User *U : WO->users()) {
if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
@@ -4307,6 +4256,11 @@ bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
if (!CS.doesNotThrow())
return false;
+ // A function which doens't throw and has "willreturn" attribute will
+ // always return.
+ if (CS.hasFnAttr(Attribute::WillReturn))
+ return true;
+
// Non-throwing call sites can loop infinitely, call exit/pthread_exit
// etc. and thus not return. However, LLVM already assumes that
//
@@ -4325,7 +4279,8 @@ bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
// is guaranteed to return.
return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
match(I, m_Intrinsic<Intrinsic::assume>()) ||
- match(I, m_Intrinsic<Intrinsic::sideeffect>());
+ match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
+ match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
}
// Other instructions return normally.
@@ -4333,7 +4288,7 @@ bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
}
bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
- // TODO: This is slightly consdervative for invoke instruction since exiting
+ // TODO: This is slightly conservative for invoke instruction since exiting
// via an exception *is* normal control for them.
for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
@@ -4357,6 +4312,8 @@ bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
}
bool llvm::propagatesFullPoison(const Instruction *I) {
+ // TODO: This should include all instructions apart from phis, selects and
+ // call-like instructions.
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::Sub:
@@ -4409,10 +4366,21 @@ const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
return I->getOperand(1);
default:
+ // Note: It's really tempting to think that a conditional branch or
+ // switch should be listed here, but that's incorrect. It's not
+ // branching off of poison which is UB, it is executing a side effecting
+ // instruction which follows the branch.
return nullptr;
}
}
+bool llvm::mustTriggerUB(const Instruction *I,
+ const SmallSet<const Value *, 16>& KnownPoison) {
+ auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
+ return (NotPoison && KnownPoison.count(NotPoison));
+}
+
+
bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
// We currently only look for uses of poison values within the same basic
// block, as that makes it easier to guarantee that the uses will be
@@ -4436,8 +4404,7 @@ bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
while (Iter++ < MaxDepth) {
for (auto &I : make_range(Begin, End)) {
if (&I != PoisonI) {
- const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
- if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
+ if (mustTriggerUB(&I, YieldsPoison))
return true;
if (!isGuaranteedToTransferExecutionToSuccessor(&I))
return false;
@@ -4926,6 +4893,10 @@ static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
return {SPF_ABS, SPNB_NA, false};
+ // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
+ if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
+ return {SPF_ABS, SPNB_NA, false};
+
// (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
// (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
@@ -5084,11 +5055,19 @@ SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
+ Value *TrueVal = SI->getTrueValue();
+ Value *FalseVal = SI->getFalseValue();
+
+ return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
+ CastOp, Depth);
+}
+
+SelectPatternResult llvm::matchDecomposedSelectPattern(
+ CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
+ Instruction::CastOps *CastOp, unsigned Depth) {
CmpInst::Predicate Pred = CmpI->getPredicate();
Value *CmpLHS = CmpI->getOperand(0);
Value *CmpRHS = CmpI->getOperand(1);
- Value *TrueVal = SI->getTrueValue();
- Value *FalseVal = SI->getFalseValue();
FastMathFlags FMF;
if (isa<FPMathOperator>(CmpI))
FMF = CmpI->getFastMathFlags();
@@ -5430,3 +5409,298 @@ Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
bool CondIsTrue = TrueBB == ContextBB;
return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
}
+
+static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
+ APInt &Upper, const InstrInfoQuery &IIQ) {
+ unsigned Width = Lower.getBitWidth();
+ const APInt *C;
+ switch (BO.getOpcode()) {
+ case Instruction::Add:
+ if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
+ // FIXME: If we have both nuw and nsw, we should reduce the range further.
+ if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
+ // 'add nuw x, C' produces [C, UINT_MAX].
+ Lower = *C;
+ } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
+ if (C->isNegative()) {
+ // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
+ Lower = APInt::getSignedMinValue(Width);
+ Upper = APInt::getSignedMaxValue(Width) + *C + 1;
+ } else {
+ // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
+ Lower = APInt::getSignedMinValue(Width) + *C;
+ Upper = APInt::getSignedMaxValue(Width) + 1;
+ }
+ }
+ }
+ break;
+
+ case Instruction::And:
+ if (match(BO.getOperand(1), m_APInt(C)))
+ // 'and x, C' produces [0, C].
+ Upper = *C + 1;
+ break;
+
+ case Instruction::Or:
+ if (match(BO.getOperand(1), m_APInt(C)))
+ // 'or x, C' produces [C, UINT_MAX].
+ Lower = *C;
+ break;
+
+ case Instruction::AShr:
+ if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
+ // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
+ Lower = APInt::getSignedMinValue(Width).ashr(*C);
+ Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
+ } else if (match(BO.getOperand(0), m_APInt(C))) {
+ unsigned ShiftAmount = Width - 1;
+ if (!C->isNullValue() && IIQ.isExact(&BO))
+ ShiftAmount = C->countTrailingZeros();
+ if (C->isNegative()) {
+ // 'ashr C, x' produces [C, C >> (Width-1)]
+ Lower = *C;
+ Upper = C->ashr(ShiftAmount) + 1;
+ } else {
+ // 'ashr C, x' produces [C >> (Width-1), C]
+ Lower = C->ashr(ShiftAmount);
+ Upper = *C + 1;
+ }
+ }
+ break;
+
+ case Instruction::LShr:
+ if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
+ // 'lshr x, C' produces [0, UINT_MAX >> C].
+ Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
+ } else if (match(BO.getOperand(0), m_APInt(C))) {
+ // 'lshr C, x' produces [C >> (Width-1), C].
+ unsigned ShiftAmount = Width - 1;
+ if (!C->isNullValue() && IIQ.isExact(&BO))
+ ShiftAmount = C->countTrailingZeros();
+ Lower = C->lshr(ShiftAmount);
+ Upper = *C + 1;
+ }
+ break;
+
+ case Instruction::Shl:
+ if (match(BO.getOperand(0), m_APInt(C))) {
+ if (IIQ.hasNoUnsignedWrap(&BO)) {
+ // 'shl nuw C, x' produces [C, C << CLZ(C)]
+ Lower = *C;
+ Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
+ } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
+ if (C->isNegative()) {
+ // 'shl nsw C, x' produces [C << CLO(C)-1, C]
+ unsigned ShiftAmount = C->countLeadingOnes() - 1;
+ Lower = C->shl(ShiftAmount);
+ Upper = *C + 1;
+ } else {
+ // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
+ unsigned ShiftAmount = C->countLeadingZeros() - 1;
+ Lower = *C;
+ Upper = C->shl(ShiftAmount) + 1;
+ }
+ }
+ }
+ break;
+
+ case Instruction::SDiv:
+ if (match(BO.getOperand(1), m_APInt(C))) {
+ APInt IntMin = APInt::getSignedMinValue(Width);
+ APInt IntMax = APInt::getSignedMaxValue(Width);
+ if (C->isAllOnesValue()) {
+ // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
+ // where C != -1 and C != 0 and C != 1
+ Lower = IntMin + 1;
+ Upper = IntMax + 1;
+ } else if (C->countLeadingZeros() < Width - 1) {
+ // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
+ // where C != -1 and C != 0 and C != 1
+ Lower = IntMin.sdiv(*C);
+ Upper = IntMax.sdiv(*C);
+ if (Lower.sgt(Upper))
+ std::swap(Lower, Upper);
+ Upper = Upper + 1;
+ assert(Upper != Lower && "Upper part of range has wrapped!");
+ }
+ } else if (match(BO.getOperand(0), m_APInt(C))) {
+ if (C->isMinSignedValue()) {
+ // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
+ Lower = *C;
+ Upper = Lower.lshr(1) + 1;
+ } else {
+ // 'sdiv C, x' produces [-|C|, |C|].
+ Upper = C->abs() + 1;
+ Lower = (-Upper) + 1;
+ }
+ }
+ break;
+
+ case Instruction::UDiv:
+ if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
+ // 'udiv x, C' produces [0, UINT_MAX / C].
+ Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
+ } else if (match(BO.getOperand(0), m_APInt(C))) {
+ // 'udiv C, x' produces [0, C].
+ Upper = *C + 1;
+ }
+ break;
+
+ case Instruction::SRem:
+ if (match(BO.getOperand(1), m_APInt(C))) {
+ // 'srem x, C' produces (-|C|, |C|).
+ Upper = C->abs();
+ Lower = (-Upper) + 1;
+ }
+ break;
+
+ case Instruction::URem:
+ if (match(BO.getOperand(1), m_APInt(C)))
+ // 'urem x, C' produces [0, C).
+ Upper = *C;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
+ APInt &Upper) {
+ unsigned Width = Lower.getBitWidth();
+ const APInt *C;
+ switch (II.getIntrinsicID()) {
+ case Intrinsic::uadd_sat:
+ // uadd.sat(x, C) produces [C, UINT_MAX].
+ if (match(II.getOperand(0), m_APInt(C)) ||
+ match(II.getOperand(1), m_APInt(C)))
+ Lower = *C;
+ break;
+ case Intrinsic::sadd_sat:
+ if (match(II.getOperand(0), m_APInt(C)) ||
+ match(II.getOperand(1), m_APInt(C))) {
+ if (C->isNegative()) {
+ // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
+ Lower = APInt::getSignedMinValue(Width);
+ Upper = APInt::getSignedMaxValue(Width) + *C + 1;
+ } else {
+ // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
+ Lower = APInt::getSignedMinValue(Width) + *C;
+ Upper = APInt::getSignedMaxValue(Width) + 1;
+ }
+ }
+ break;
+ case Intrinsic::usub_sat:
+ // usub.sat(C, x) produces [0, C].
+ if (match(II.getOperand(0), m_APInt(C)))
+ Upper = *C + 1;
+ // usub.sat(x, C) produces [0, UINT_MAX - C].
+ else if (match(II.getOperand(1), m_APInt(C)))
+ Upper = APInt::getMaxValue(Width) - *C + 1;
+ break;
+ case Intrinsic::ssub_sat:
+ if (match(II.getOperand(0), m_APInt(C))) {
+ if (C->isNegative()) {
+ // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
+ Lower = APInt::getSignedMinValue(Width);
+ Upper = *C - APInt::getSignedMinValue(Width) + 1;
+ } else {
+ // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
+ Lower = *C - APInt::getSignedMaxValue(Width);
+ Upper = APInt::getSignedMaxValue(Width) + 1;
+ }
+ } else if (match(II.getOperand(1), m_APInt(C))) {
+ if (C->isNegative()) {
+ // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
+ Lower = APInt::getSignedMinValue(Width) - *C;
+ Upper = APInt::getSignedMaxValue(Width) + 1;
+ } else {
+ // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
+ Lower = APInt::getSignedMinValue(Width);
+ Upper = APInt::getSignedMaxValue(Width) - *C + 1;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
+ APInt &Upper) {
+ const Value *LHS, *RHS;
+ SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
+ if (R.Flavor == SPF_UNKNOWN)
+ return;
+
+ unsigned BitWidth = SI.getType()->getScalarSizeInBits();
+
+ if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
+ // If the negation part of the abs (in RHS) has the NSW flag,
+ // then the result of abs(X) is [0..SIGNED_MAX],
+ // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
+ Lower = APInt::getNullValue(BitWidth);
+ if (cast<Instruction>(RHS)->hasNoSignedWrap())
+ Upper = APInt::getSignedMaxValue(BitWidth) + 1;
+ else
+ Upper = APInt::getSignedMinValue(BitWidth) + 1;
+ return;
+ }
+
+ if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
+ // The result of -abs(X) is <= 0.
+ Lower = APInt::getSignedMinValue(BitWidth);
+ Upper = APInt(BitWidth, 1);
+ return;
+ }
+
+ const APInt *C;
+ if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
+ return;
+
+ switch (R.Flavor) {
+ case SPF_UMIN:
+ Upper = *C + 1;
+ break;
+ case SPF_UMAX:
+ Lower = *C;
+ break;
+ case SPF_SMIN:
+ Lower = APInt::getSignedMinValue(BitWidth);
+ Upper = *C + 1;
+ break;
+ case SPF_SMAX:
+ Lower = *C;
+ Upper = APInt::getSignedMaxValue(BitWidth) + 1;
+ break;
+ default:
+ break;
+ }
+}
+
+ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
+ assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
+
+ const APInt *C;
+ if (match(V, m_APInt(C)))
+ return ConstantRange(*C);
+
+ InstrInfoQuery IIQ(UseInstrInfo);
+ unsigned BitWidth = V->getType()->getScalarSizeInBits();
+ APInt Lower = APInt(BitWidth, 0);
+ APInt Upper = APInt(BitWidth, 0);
+ if (auto *BO = dyn_cast<BinaryOperator>(V))
+ setLimitsForBinOp(*BO, Lower, Upper, IIQ);
+ else if (auto *II = dyn_cast<IntrinsicInst>(V))
+ setLimitsForIntrinsic(*II, Lower, Upper);
+ else if (auto *SI = dyn_cast<SelectInst>(V))
+ setLimitsForSelectPattern(*SI, Lower, Upper);
+
+ ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
+
+ if (auto *I = dyn_cast<Instruction>(V))
+ if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
+ CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
+
+ return CR;
+}