aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2024-07-27 23:34:35 +0000
committerDimitry Andric <dim@FreeBSD.org>2024-10-23 18:26:01 +0000
commit0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583 (patch)
tree6cf5ab1f05330c6773b1f3f64799d56a9c7a1faa /contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
parent6b9f7133aba44189d9625c352bc2c2a59baf18ef (diff)
parentac9a064cb179f3425b310fa2847f8764ac970a4d (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp432
1 files changed, 262 insertions, 170 deletions
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
index 8dcffe45c644..3a7ae577bb06 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -39,6 +39,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Statepoint.h"
#include "llvm/Support/KnownBits.h"
#include <algorithm>
#include <optional>
@@ -70,8 +71,8 @@ static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
unsigned);
static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
unsigned);
-static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
- const SimplifyQuery &, unsigned);
+static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>,
+ GEPNoWrapFlags, const SimplifyQuery &, unsigned);
static Value *simplifySelectInst(Value *, Value *, Value *,
const SimplifyQuery &, unsigned);
static Value *simplifyInstructionWithOperands(Instruction *I,
@@ -1225,13 +1226,29 @@ static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
return V;
// (X << Y) % X -> 0
- if (Q.IIQ.UseInstrInfo &&
- ((Opcode == Instruction::SRem &&
- match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
- (Opcode == Instruction::URem &&
- match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
- return Constant::getNullValue(Op0->getType());
+ if (Q.IIQ.UseInstrInfo) {
+ if ((Opcode == Instruction::SRem &&
+ match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
+ (Opcode == Instruction::URem &&
+ match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
+ return Constant::getNullValue(Op0->getType());
+ const APInt *C0;
+ if (match(Op1, m_APInt(C0))) {
+ // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
+ // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
+ if (Opcode == Instruction::SRem
+ ? match(Op0,
+ m_NSWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
+ return C.srem(*C0).isZero();
+ })))
+ : match(Op0,
+ m_NUWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
+ return C.urem(*C0).isZero();
+ }))))
+ return Constant::getNullValue(Op0->getType());
+ }
+ }
return nullptr;
}
@@ -1512,7 +1529,7 @@ static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
// -1 >>a X --> -1
// (-1 << X) a>> X --> -1
- // Do not return Op0 because it may contain undef elements if it's a vector.
+ // We could return the original -1 constant to preserve poison elements.
if (match(Op0, m_AllOnes()) ||
match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
return Constant::getAllOnesValue(Op0->getType());
@@ -1585,12 +1602,10 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
if (match(UnsignedICmp,
m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
- EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
return UnsignedICmp;
if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
- EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
return UnsignedICmp;
}
}
@@ -1608,13 +1623,13 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
// X > Y && Y == 0 --> Y == 0 iff X != 0
// X > Y || Y == 0 --> X > Y iff X != 0
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(X, Q))
return IsAnd ? ZeroICmp : UnsignedICmp;
// X <= Y && Y != 0 --> X <= Y iff X != 0
// X <= Y || Y != 0 --> Y != 0 iff X != 0
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(X, Q))
return IsAnd ? UnsignedICmp : ZeroICmp;
// The transforms below here are expected to be handled more generally with
@@ -1853,39 +1868,31 @@ static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
if (LHS0->getType() != RHS0->getType())
return nullptr;
- const DataLayout &DL = Q.DL;
- const TargetLibraryInfo *TLI = Q.TLI;
-
FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
- if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
- (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
- // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
- // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
- // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
- // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
- // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
- // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
- // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
- // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
- if (((LHS1 == RHS0 || LHS1 == RHS1) &&
- isKnownNeverNaN(LHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
- ((LHS0 == RHS0 || LHS0 == RHS1) &&
- isKnownNeverNaN(LHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
- return RHS;
-
- // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
- // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
- // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
- // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
- // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
- // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
- // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
- // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
- if (((RHS1 == LHS0 || RHS1 == LHS1) &&
- isKnownNeverNaN(RHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
- ((RHS0 == LHS0 || RHS0 == LHS1) &&
- isKnownNeverNaN(RHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
- return LHS;
+ if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
+ ((FCmpInst::isOrdered(PredR) && IsAnd) ||
+ (FCmpInst::isUnordered(PredR) && !IsAnd))) {
+ // (fcmp ord X, 0) & (fcmp o** X, Y) --> fcmp o** X, Y
+ // (fcmp uno X, 0) & (fcmp o** X, Y) --> false
+ // (fcmp uno X, 0) | (fcmp u** X, Y) --> fcmp u** X, Y
+ // (fcmp ord X, 0) | (fcmp u** X, Y) --> true
+ if ((LHS0 == RHS0 || LHS0 == RHS1) && match(LHS1, m_PosZeroFP()))
+ return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
+ ? static_cast<Value *>(RHS)
+ : ConstantInt::getBool(LHS->getType(), !IsAnd);
+ }
+
+ if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
+ ((FCmpInst::isOrdered(PredL) && IsAnd) ||
+ (FCmpInst::isUnordered(PredL) && !IsAnd))) {
+ // (fcmp o** X, Y) & (fcmp ord X, 0) --> fcmp o** X, Y
+ // (fcmp o** X, Y) & (fcmp uno X, 0) --> false
+ // (fcmp u** X, Y) | (fcmp uno X, 0) --> fcmp u** X, Y
+ // (fcmp u** X, Y) | (fcmp ord X, 0) --> true
+ if ((RHS0 == LHS0 || RHS0 == LHS1) && match(RHS1, m_PosZeroFP()))
+ return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
+ ? static_cast<Value *>(LHS)
+ : ConstantInt::getBool(LHS->getType(), !IsAnd);
}
return nullptr;
@@ -1968,13 +1975,16 @@ static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
return nullptr;
};
- if (Value *Res =
- simplifyWithOpReplaced(Op1, A, B, Q, /* AllowRefinement */ true,
- /* DropFlags */ nullptr, MaxRecurse))
+ // In the final case (Res == Absorber with inverted predicate), it is safe to
+ // refine poison during simplification, but not undef. For simplicity always
+ // disable undef-based folds here.
+ if (Value *Res = simplifyWithOpReplaced(Op1, A, B, Q.getWithoutUndef(),
+ /* AllowRefinement */ true,
+ /* DropFlags */ nullptr, MaxRecurse))
return Simplify(Res);
- if (Value *Res =
- simplifyWithOpReplaced(Op1, B, A, Q, /* AllowRefinement */ true,
- /* DropFlags */ nullptr, MaxRecurse))
+ if (Value *Res = simplifyWithOpReplaced(Op1, B, A, Q.getWithoutUndef(),
+ /* AllowRefinement */ true,
+ /* DropFlags */ nullptr, MaxRecurse))
return Simplify(Res);
return nullptr;
@@ -2019,7 +2029,7 @@ static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
// (X | ~Y) & (X | Y) --> X
Value *X, *Y;
if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
- match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
+ match(Op1, m_c_Or(m_Specific(X), m_Specific(Y))))
return X;
// If we have a multiplication overflow check that is being 'and'ed with a
@@ -2284,7 +2294,7 @@ static Value *simplifyOrLogic(Value *X, Value *Y) {
// (B ^ ~A) | (A & B) --> B ^ ~A
// (~A ^ B) | (B & A) --> ~A ^ B
// (B ^ ~A) | (B & A) --> B ^ ~A
- if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) &&
+ if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Y, m_c_And(m_Specific(A), m_Specific(B))))
return X;
@@ -2301,31 +2311,29 @@ static Value *simplifyOrLogic(Value *X, Value *Y) {
// (B & ~A) | ~(A | B) --> ~A
// (B & ~A) | ~(B | A) --> ~A
Value *NotA;
- if (match(X,
- m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
- m_Value(B))) &&
+ if (match(X, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
+ m_Value(B))) &&
match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
return NotA;
// The same is true of Logical And
// TODO: This could share the logic of the version above if there was a
// version of LogicalAnd that allowed more than just i1 types.
- if (match(X, m_c_LogicalAnd(
- m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
- m_Value(B))) &&
+ if (match(X, m_c_LogicalAnd(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
+ m_Value(B))) &&
match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B)))))
return NotA;
// ~(A ^ B) | (A & B) --> ~(A ^ B)
// ~(A ^ B) | (B & A) --> ~(A ^ B)
Value *NotAB;
- if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
+ if (match(X, m_CombineAnd(m_Not(m_Xor(m_Value(A), m_Value(B))),
m_Value(NotAB))) &&
match(Y, m_c_And(m_Specific(A), m_Specific(B))))
return NotAB;
// ~(A & B) | (A ^ B) --> ~(A & B)
// ~(A & B) | (B ^ A) --> ~(A & B)
- if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
+ if (match(X, m_CombineAnd(m_Not(m_And(m_Value(A), m_Value(B))),
m_Value(NotAB))) &&
match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
return NotAB;
@@ -2555,9 +2563,8 @@ static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
// The 'not' op must contain a complete -1 operand (no undef elements for
// vector) for the transform to be safe.
Value *NotA;
- if (match(X,
- m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
- m_Value(B))) &&
+ if (match(X, m_c_Or(m_CombineAnd(m_Not(m_Value(A)), m_Value(NotA)),
+ m_Value(B))) &&
match(Y, m_c_And(m_Specific(A), m_Specific(B))))
return NotA;
@@ -2716,8 +2723,6 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
assert(LHS->getType() == RHS->getType() && "Must have same types");
const DataLayout &DL = Q.DL;
const TargetLibraryInfo *TLI = Q.TLI;
- const DominatorTree *DT = Q.DT;
- const Instruction *CxtI = Q.CxtI;
// We can only fold certain predicates on pointer comparisons.
switch (Pred) {
@@ -2822,11 +2827,9 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
// the other operand can not be based on the alloc - if it were, then
// the cmp itself would be a capture.
Value *MI = nullptr;
- if (isAllocLikeFn(LHS, TLI) &&
- llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
+ if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
MI = LHS;
- else if (isAllocLikeFn(RHS, TLI) &&
- llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
+ else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
MI = RHS;
if (MI) {
// FIXME: This is incorrect, see PR54002. While we can assume that the
@@ -2982,12 +2985,12 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
+ if (isKnownNonZero(LHS, Q))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
+ if (isKnownNonZero(LHS, Q))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT: {
@@ -3002,8 +3005,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getTrue(ITy);
- if (LHSKnown.isNonNegative() &&
- isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
return getFalse(ITy);
break;
}
@@ -3019,8 +3021,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getFalse(ITy);
- if (LHSKnown.isNonNegative() &&
- isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
return getTrue(ITy);
break;
}
@@ -3034,21 +3035,20 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
Type *ITy = getCompareTy(RHS); // The return type.
Value *X;
+ const APInt *C;
+ if (!match(RHS, m_APIntAllowPoison(C)))
+ return nullptr;
+
// Sign-bit checks can be optimized to true/false after unsigned
// floating-point casts:
// icmp slt (bitcast (uitofp X)), 0 --> false
// icmp sgt (bitcast (uitofp X)), -1 --> true
- if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
- if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
- return ConstantInt::getFalse(ITy);
- if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
- return ConstantInt::getTrue(ITy);
+ if (match(LHS, m_ElementWiseBitCast(m_UIToFP(m_Value(X))))) {
+ bool TrueIfSigned;
+ if (isSignBitCheck(Pred, *C, TrueIfSigned))
+ return ConstantInt::getBool(ITy, !TrueIfSigned);
}
- const APInt *C;
- if (!match(RHS, m_APIntAllowUndef(C)))
- return nullptr;
-
// Rule out tautological comparisons (eg., ult 0 or uge 0).
ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
if (RHS_CR.isEmptySet())
@@ -3069,9 +3069,9 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
// (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
const APInt *MulC;
if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
- ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
+ ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowPoison(MulC))) &&
*MulC != 0 && C->urem(*MulC) != 0) ||
- (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
+ (match(LHS, m_NSWMul(m_Value(), m_APIntAllowPoison(MulC))) &&
*MulC != 0 && C->srem(*MulC) != 0)))
return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
@@ -3174,7 +3174,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
const APInt *C;
if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
(match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
- if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
+ if (isKnownNonZero(RHS, Q)) {
switch (Pred) {
default:
break;
@@ -3216,7 +3216,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
// (sub C, X) == X, C is odd --> false
// (sub C, X) != X, C is odd --> true
- if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
+ if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
(*C & 1) == 1 && ICmpInst::isEquality(Pred))
return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
@@ -3249,8 +3249,8 @@ static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
Value *X;
const APInt *C1, *C2;
- if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
- !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
+ if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
+ !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
return false;
return (C1->slt(*C2) && C1->isNonNegative()) ||
@@ -3367,7 +3367,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
// (C2 << X) != C --> true
const APInt *C;
if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
- match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
+ match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
// C2 << X can equal zero in some circumstances.
// This simplification might be unsafe if C is zero.
//
@@ -3407,7 +3407,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
- !isKnownNonZero(LBO->getOperand(0), Q.DL))
+ !isKnownNonZero(LBO->getOperand(0), Q))
break;
if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
RBO->getOperand(1), Q, MaxRecurse - 1))
@@ -3732,6 +3732,21 @@ static Value *simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred,
}
}
+/// Helper method to get range from metadata or attribute.
+static std::optional<ConstantRange> getRange(Value *V,
+ const InstrInfoQuery &IIQ) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
+ return getConstantRangeFromMetadata(*MD);
+
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->getRange();
+ else if (const CallBase *CB = dyn_cast<CallBase>(V))
+ return CB->getRange();
+
+ return std::nullopt;
+}
+
/// Given operands for an ICmpInst, see if we can fold the result.
/// If not, this returns null.
static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
@@ -3779,24 +3794,14 @@ static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If both operands have range metadata, use the metadata
// to simplify the comparison.
- if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
- auto RHS_Instr = cast<Instruction>(RHS);
- auto LHS_Instr = cast<Instruction>(LHS);
-
- if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
- Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
- auto RHS_CR = getConstantRangeFromMetadata(
- *RHS_Instr->getMetadata(LLVMContext::MD_range));
- auto LHS_CR = getConstantRangeFromMetadata(
- *LHS_Instr->getMetadata(LLVMContext::MD_range));
-
- if (LHS_CR.icmp(Pred, RHS_CR))
- return ConstantInt::getTrue(RHS->getContext());
-
- if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
- return ConstantInt::getFalse(RHS->getContext());
+ if (std::optional<ConstantRange> RhsCr = getRange(RHS, Q.IIQ))
+ if (std::optional<ConstantRange> LhsCr = getRange(LHS, Q.IIQ)) {
+ if (LhsCr->icmp(Pred, *RhsCr))
+ return ConstantInt::getTrue(ITy);
+
+ if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
+ return ConstantInt::getFalse(ITy);
}
- }
// Compare of cast, for example (zext X) != 0 -> X != 0
if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
@@ -3962,12 +3967,14 @@ static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// LHS >s RHS.
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
+ Q.DL);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
+ Q.DL);
// If LHS is non-negative then LHS <u RHS. If LHS is negative then
// LHS >u RHS.
@@ -4107,14 +4114,21 @@ static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// This catches the 2 variable input case, constants are handled below as a
// class-like compare.
if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
+ KnownFPClass RHSClass =
+ computeKnownFPClass(RHS, fcAllFlags, /*Depth=*/0, Q);
+ KnownFPClass LHSClass =
+ computeKnownFPClass(LHS, fcAllFlags, /*Depth=*/0, Q);
+
if (FMF.noNaNs() ||
- (isKnownNeverNaN(RHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
- isKnownNeverNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT)))
+ (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
+
+ if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
+ return ConstantInt::get(RetTy, Pred == CmpInst::FCMP_UNO);
}
const APFloat *C = nullptr;
- match(RHS, m_APFloatAllowUndef(C));
+ match(RHS, m_APFloatAllowPoison(C));
std::optional<KnownFPClass> FullKnownClassLHS;
// Lazily compute the possible classes for LHS. Avoid computing it twice if
@@ -4123,8 +4137,7 @@ static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
fcAllFlags) {
if (FullKnownClassLHS)
return *FullKnownClassLHS;
- return computeKnownFPClass(LHS, FMF, Q.DL, InterestedFlags, 0, Q.TLI, Q.AC,
- Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo);
+ return computeKnownFPClass(LHS, FMF, InterestedFlags, 0, Q);
};
if (C && Q.CxtI) {
@@ -4290,6 +4303,9 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
bool AllowRefinement,
SmallVectorImpl<Instruction *> *DropFlags,
unsigned MaxRecurse) {
+ assert((AllowRefinement || !Q.CanUseUndef) &&
+ "If AllowRefinement=false then CanUseUndef=false");
+
// Trivial replacement.
if (V == Op)
return RepOp;
@@ -4337,6 +4353,11 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
} else {
NewOps.push_back(InstOp);
}
+
+ // Bail out if any operand is undef and SimplifyQuery disables undef
+ // simplification. Constant folding currently doesn't respect this option.
+ if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
+ return nullptr;
}
if (!AnyReplaced)
@@ -4445,7 +4466,7 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
return nullptr;
}
Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
- if (DropFlags && Res && I->hasPoisonGeneratingFlagsOrMetadata())
+ if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
DropFlags->push_back(I);
return Res;
}
@@ -4457,6 +4478,11 @@ Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
const SimplifyQuery &Q,
bool AllowRefinement,
SmallVectorImpl<Instruction *> *DropFlags) {
+ // If refinement is disabled, also disable undef simplifications (which are
+ // always refinements) in SimplifyQuery.
+ if (!AllowRefinement)
+ return ::simplifyWithOpReplaced(V, Op, RepOp, Q.getWithoutUndef(),
+ AllowRefinement, DropFlags, RecursionLimit);
return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
RecursionLimit);
}
@@ -4596,7 +4622,7 @@ static Value *simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS,
Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q,
unsigned MaxRecurse) {
- if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
+ if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q.getWithoutUndef(),
/* AllowRefinement */ false,
/* DropFlags */ nullptr, MaxRecurse) == TrueVal)
return FalseVal;
@@ -4951,7 +4977,7 @@ Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
/// Given operands for an GetElementPtrInst, see if we can fold the result.
/// If not, this returns null.
static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
- ArrayRef<Value *> Indices, bool InBounds,
+ ArrayRef<Value *> Indices, GEPNoWrapFlags NW,
const SimplifyQuery &Q, unsigned) {
// The type of the GEP pointer operand.
unsigned AS =
@@ -5055,7 +5081,7 @@ static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
// gep (gep V, C), (sub 0, V) -> C
if (match(Indices.back(),
- m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
+ m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
!BasePtrOffset.isZero()) {
auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
return ConstantExpr::getIntToPtr(CI, GEPTy);
@@ -5076,17 +5102,17 @@ static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
return nullptr;
if (!ConstantExpr::isSupportedGetElementPtr(SrcTy))
- return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), InBounds,
- std::nullopt, Indices);
+ return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
+ Indices);
- auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
- InBounds);
+ auto *CE =
+ ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices, NW);
return ConstantFoldConstant(CE, Q.DL);
}
Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
- bool InBounds, const SimplifyQuery &Q) {
- return ::simplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
+ GEPNoWrapFlags NW, const SimplifyQuery &Q) {
+ return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
}
/// Given operands for an InsertValueInst, see if we can fold the result.
@@ -5256,11 +5282,16 @@ static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
Value *CommonValue = nullptr;
+ bool HasPoisonInput = false;
bool HasUndefInput = false;
for (Value *Incoming : IncomingValues) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PN)
continue;
+ if (isa<PoisonValue>(Incoming)) {
+ HasPoisonInput = true;
+ continue;
+ }
if (Q.isUndefValue(Incoming)) {
// Remember that we saw an undef value, but otherwise ignore them.
HasUndefInput = true;
@@ -5271,12 +5302,13 @@ static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
CommonValue = Incoming;
}
- // If CommonValue is null then all of the incoming values were either undef or
- // equal to the phi node itself.
+ // If CommonValue is null then all of the incoming values were either undef,
+ // poison or equal to the phi node itself.
if (!CommonValue)
- return UndefValue::get(PN->getType());
+ return HasUndefInput ? UndefValue::get(PN->getType())
+ : PoisonValue::get(PN->getType());
- if (HasUndefInput) {
+ if (HasPoisonInput || HasUndefInput) {
// If we have a PHI node like phi(X, undef, X), where X is defined by some
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
@@ -5317,6 +5349,14 @@ static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
if (Op->getType() == Ty)
return Op;
+ // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
+ Value *Ptr, *X;
+ if (CastOpc == Instruction::PtrToInt &&
+ match(Op, m_PtrAdd(m_Value(Ptr),
+ m_Sub(m_Value(X), m_PtrToInt(m_Deferred(Ptr))))) &&
+ X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
+ return X;
+
return nullptr;
}
@@ -5356,9 +5396,6 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
}
- // TODO: Look through bitcasts? What if the bitcast changes the vector element
- // size?
-
// The source operand is not a shuffle. Initialize the root vector value for
// this shuffle if that has not been done yet.
if (!RootVec)
@@ -5636,7 +5673,7 @@ simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fadd X, 0 ==> X, when we know X is not -0
if (canIgnoreSNaN(ExBehavior, FMF))
if (match(Op1, m_PosZeroFP()) &&
- (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
+ (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
return Op0;
if (!isDefaultFPEnvironment(ExBehavior, Rounding))
@@ -5698,7 +5735,7 @@ simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fsub X, -0 ==> X, when we know X is not -0
if (canIgnoreSNaN(ExBehavior, FMF))
if (match(Op1, m_NegZeroFP()) &&
- (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
+ (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
return Op0;
// fsub -0.0, (fsub -0.0, X) ==> X
@@ -5766,11 +5803,16 @@ static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
if (FMF.noNaNs() && FMF.noSignedZeros())
return ConstantFP::getZero(Op0->getType());
- // +normal number * (-)0.0 --> (-)0.0
- if (isKnownNeverInfOrNaN(Op0, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
- // TODO: Check SignBit from computeKnownFPClass when it's more complete.
- SignBitMustBeZero(Op0, Q.DL, Q.TLI))
- return Op1;
+ KnownFPClass Known =
+ computeKnownFPClass(Op0, FMF, fcInf | fcNan, /*Depth=*/0, Q);
+ if (Known.isKnownNever(fcInf | fcNan)) {
+ // +normal number * (-)0.0 --> (-)0.0
+ if (Known.SignBit == false)
+ return Op1;
+ // -normal number * (-)0.0 --> -(-)0.0
+ if (Known.SignBit == true)
+ return foldConstant(Instruction::FNeg, Op1, Q);
+ }
}
// sqrt(X) * sqrt(X) --> X, if we can:
@@ -6114,7 +6156,8 @@ static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
if (OffsetInt.srem(4) != 0)
return nullptr;
- Constant *Loaded = ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, OffsetInt, DL);
+ Constant *Loaded =
+ ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
if (!Loaded)
return nullptr;
@@ -6222,7 +6265,7 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
Value *X;
switch (IID) {
case Intrinsic::fabs:
- if (SignBitMustBeZero(Op0, Q.DL, Q.TLI))
+ if (computeKnownFPSignBit(Op0, /*Depth=*/0, Q) == false)
return Op0;
break;
case Intrinsic::bswap:
@@ -6289,11 +6332,11 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X)))))
return X;
break;
- case Intrinsic::experimental_vector_reverse:
- // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
+ case Intrinsic::vector_reverse:
+ // vector.reverse(vector.reverse(x)) -> x
if (match(Op0, m_VecReverse(m_Value(X))))
return X;
- // experimental.vector.reverse(splat(X)) -> splat(X)
+ // vector.reverse(splat(X)) -> splat(X)
if (isSplatValue(Op0))
return Op0;
break;
@@ -6383,11 +6426,10 @@ static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0,
return nullptr;
}
-static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
- const SimplifyQuery &Q,
- const CallBase *Call) {
- Intrinsic::ID IID = F->getIntrinsicID();
- Type *ReturnType = F->getReturnType();
+Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
+ Value *Op0, Value *Op1,
+ const SimplifyQuery &Q,
+ const CallBase *Call) {
unsigned BitWidth = ReturnType->getScalarSizeInBits();
switch (IID) {
case Intrinsic::abs:
@@ -6468,7 +6510,7 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
const APInt *C;
- if (match(Op1, m_APIntAllowUndef(C))) {
+ if (match(Op1, m_APIntAllowPoison(C))) {
// Clamp to limit value. For example:
// umax(i8 %x, i8 255) --> 255
if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
@@ -6510,6 +6552,25 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
break;
}
+ case Intrinsic::scmp:
+ case Intrinsic::ucmp: {
+ // Fold to a constant if the relationship between operands can be
+ // established with certainty
+ if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q, RecursionLimit))
+ return Constant::getNullValue(ReturnType);
+
+ ICmpInst::Predicate PredGT =
+ IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ if (isICmpTrue(PredGT, Op0, Op1, Q, RecursionLimit))
+ return ConstantInt::get(ReturnType, 1);
+
+ ICmpInst::Predicate PredLT =
+ IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ if (isICmpTrue(PredLT, Op0, Op1, Q, RecursionLimit))
+ return ConstantInt::getSigned(ReturnType, -1);
+
+ break;
+ }
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
// X - X -> { 0, false }
@@ -6645,19 +6706,21 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
// float, if the ninf flag is set.
const APFloat *C;
if (match(Op1, m_APFloat(C)) &&
- (C->isInfinity() || (Call->hasNoInfs() && C->isLargest()))) {
+ (C->isInfinity() || (Call && Call->hasNoInfs() && C->isLargest()))) {
// minnum(X, -inf) -> -inf
// maxnum(X, +inf) -> +inf
// minimum(X, -inf) -> -inf if nnan
// maximum(X, +inf) -> +inf if nnan
- if (C->isNegative() == IsMin && (!PropagateNaN || Call->hasNoNaNs()))
+ if (C->isNegative() == IsMin &&
+ (!PropagateNaN || (Call && Call->hasNoNaNs())))
return ConstantFP::get(ReturnType, *C);
// minnum(X, +inf) -> X if nnan
// maxnum(X, -inf) -> X if nnan
// minimum(X, +inf) -> X
// maximum(X, -inf) -> X
- if (C->isNegative() != IsMin && (PropagateNaN || Call->hasNoNaNs()))
+ if (C->isNegative() != IsMin &&
+ (PropagateNaN || (Call && Call->hasNoNaNs())))
return Op0;
}
@@ -6671,8 +6734,6 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
break;
}
case Intrinsic::vector_extract: {
- Type *ReturnType = F->getReturnType();
-
// (extract_vector (insert_vector _, X, 0), 0) -> X
unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
Value *X = nullptr;
@@ -6719,7 +6780,8 @@ static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
if (NumOperands == 2)
- return simplifyBinaryIntrinsic(F, Args[0], Args[1], Q, Call);
+ return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q,
+ Call);
// Handle intrinsics with 3 or more arguments.
switch (IID) {
@@ -6856,6 +6918,27 @@ static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
}
case Intrinsic::experimental_constrained_ldexp:
return simplifyLdexp(Args[0], Args[1], Q, true);
+ case Intrinsic::experimental_gc_relocate: {
+ GCRelocateInst &GCR = *cast<GCRelocateInst>(Call);
+ Value *DerivedPtr = GCR.getDerivedPtr();
+ Value *BasePtr = GCR.getBasePtr();
+
+ // Undef is undef, even after relocation.
+ if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
+ return UndefValue::get(GCR.getType());
+ }
+
+ if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
+ // For now, the assumption is that the relocation of null will be null
+ // for most any collector. If this ever changes, a corresponding hook
+ // should be added to GCStrategy and this code should check it first.
+ if (isa<ConstantPointerNull>(DerivedPtr)) {
+ // Use null-pointer of gc_relocate's type to replace it.
+ return ConstantPointerNull::get(PT);
+ }
+ }
+ return nullptr;
+ }
default:
return nullptr;
}
@@ -6948,8 +7031,8 @@ Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
// If GlobalVariable's initializer is uniform, then return the constant
// regardless of its offset.
- if (Constant *C =
- ConstantFoldLoadFromUniformValue(GV->getInitializer(), LI->getType()))
+ if (Constant *C = ConstantFoldLoadFromUniformValue(GV->getInitializer(),
+ LI->getType(), Q.DL))
return C;
// Try to convert operand into a constant by stripping offsets while looking
@@ -6961,7 +7044,8 @@ Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
if (PtrOp == GV) {
// Index size may have changed due to address space casts.
Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
- return ConstantFoldLoadFromConstPtr(GV, LI->getType(), Offset, Q.DL);
+ return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
+ Q.DL);
}
return nullptr;
@@ -7060,7 +7144,7 @@ static Value *simplifyInstructionWithOperands(Instruction *I,
case Instruction::GetElementPtr: {
auto *GEPI = cast<GetElementPtrInst>(I);
return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
- ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q,
+ ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
MaxRecurse);
}
case Instruction::InsertValue: {
@@ -7119,7 +7203,7 @@ Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) {
/// If called on unreachable code, the instruction may simplify to itself.
/// Make life easier for users by detecting that case here, and returning a
/// safe value instead.
- return Result == I ? UndefValue::get(I->getType()) : Result;
+ return Result == I ? PoisonValue::get(I->getType()) : Result;
}
/// Implementation of recursive simplification through an instruction's
@@ -7141,7 +7225,7 @@ static bool replaceAndRecursivelySimplifyImpl(
SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
- const DataLayout &DL = I->getModule()->getDataLayout();
+ const DataLayout &DL = I->getDataLayout();
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
@@ -7206,7 +7290,7 @@ const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
- return {F.getParent()->getDataLayout(), TLI, DT, AC};
+ return {F.getDataLayout(), TLI, DT, AC};
}
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
@@ -7220,10 +7304,18 @@ const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
- return {F.getParent()->getDataLayout(), TLI, DT, AC};
+ return {F.getDataLayout(), TLI, DT, AC};
}
template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
Function &);
+
+bool SimplifyQuery::isUndefValue(Value *V) const {
+ if (!CanUseUndef)
+ return false;
+
+ return match(V, m_Undef());
+}
+
} // namespace llvm
void InstSimplifyFolder::anchor() {}