diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2019-01-19 10:01:25 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2019-01-19 10:01:25 +0000 |
commit | d8e91e46262bc44006913e6796843909f1ac7bcd (patch) | |
tree | 7d0c143d9b38190e0fa0180805389da22cd834c5 /lib/Analysis/InstructionSimplify.cpp | |
parent | b7eb8e35e481a74962664b63dfb09483b200209a (diff) |
Notes
Diffstat (limited to 'lib/Analysis/InstructionSimplify.cpp')
-rw-r--r-- | lib/Analysis/InstructionSimplify.cpp | 518 |
1 files changed, 420 insertions, 98 deletions
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 7fc7c15a0c25..ccf907c144f0 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -861,8 +861,10 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, // (X / Y) * Y -> X if the division is exact. Value *X = nullptr; - if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y - match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y) + if (Q.IIQ.UseInstrInfo && + (match(Op0, + m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y + match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) return X; // i1 mul -> and. @@ -1035,8 +1037,8 @@ static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { auto *Mul = cast<OverflowingBinaryOperator>(Op0); // If the Mul does not overflow, then we are good to go. - if ((IsSigned && Mul->hasNoSignedWrap()) || - (!IsSigned && Mul->hasNoUnsignedWrap())) + if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || + (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul))) return X; // If X has the form X = A / Y, then X * Y cannot overflow. if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || @@ -1094,10 +1096,11 @@ static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, return Op0; // (X << Y) % X -> 0 - if ((Opcode == Instruction::SRem && - match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || - (Opcode == Instruction::URem && - match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))) + if (Q.IIQ.UseInstrInfo && + ((Opcode == Instruction::SRem && + match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || + (Opcode == Instruction::URem && + match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) return Constant::getNullValue(Op0->getType()); // If the operation is with the result of a select instruction, check whether @@ -1295,7 +1298,8 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, // (X >> A) << A -> X Value *X; - if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) + if (Q.IIQ.UseInstrInfo && + match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) return X; // shl nuw i8 C, %x -> C iff C has sign bit set. @@ -1338,7 +1342,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); const unsigned Width = Op0->getType()->getScalarSizeInBits(); const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); - if (EffWidthY <= ShRAmt->getZExtValue()) + if (ShRAmt->uge(EffWidthY)) return X; } @@ -1365,7 +1369,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, // (X << A) >> A -> X Value *X; - if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) + if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) return X; // Arithmetic shifting an all-sign-bit value is a no-op. @@ -1552,7 +1556,8 @@ static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, return nullptr; } -static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { +static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, + const InstrInfoQuery &IIQ) { // (icmp (add V, C0), C1) & (icmp V, C0) ICmpInst::Predicate Pred0, Pred1; const APInt *C0, *C1; @@ -1563,13 +1568,13 @@ static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) return nullptr; - auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); + auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); if (AddInst->getOperand(1) != Op1->getOperand(1)) return nullptr; Type *ITy = Op0->getType(); - bool isNSW = AddInst->hasNoSignedWrap(); - bool isNUW = AddInst->hasNoUnsignedWrap(); + bool isNSW = IIQ.hasNoSignedWrap(AddInst); + bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); const APInt Delta = *C1 - *C0; if (C0->isStrictlyPositive()) { @@ -1598,7 +1603,8 @@ static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { return nullptr; } -static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) { +static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, + const InstrInfoQuery &IIQ) { if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true)) return X; if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true)) @@ -1615,15 +1621,16 @@ static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) { if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) return X; - if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1)) + if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, IIQ)) return X; - if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0)) + if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, IIQ)) return X; return nullptr; } -static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { +static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, + const InstrInfoQuery &IIQ) { // (icmp (add V, C0), C1) | (icmp V, C0) ICmpInst::Predicate Pred0, Pred1; const APInt *C0, *C1; @@ -1639,8 +1646,8 @@ static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { return nullptr; Type *ITy = Op0->getType(); - bool isNSW = AddInst->hasNoSignedWrap(); - bool isNUW = AddInst->hasNoUnsignedWrap(); + bool isNSW = IIQ.hasNoSignedWrap(AddInst); + bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); const APInt Delta = *C1 - *C0; if (C0->isStrictlyPositive()) { @@ -1669,7 +1676,8 @@ static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) { return nullptr; } -static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { +static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, + const InstrInfoQuery &IIQ) { if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false)) return X; if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false)) @@ -1686,15 +1694,16 @@ static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) return X; - if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1)) + if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, IIQ)) return X; - if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0)) + if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, IIQ)) return X; return nullptr; } -static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { +static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, + FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); if (LHS0->getType() != RHS0->getType()) @@ -1711,8 +1720,8 @@ static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X - if ((isKnownNeverNaN(LHS0) && (LHS1 == RHS0 || LHS1 == RHS1)) || - (isKnownNeverNaN(LHS1) && (LHS0 == RHS0 || LHS0 == RHS1))) + if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || + (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) return RHS; // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y @@ -1723,15 +1732,16 @@ static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X - if ((isKnownNeverNaN(RHS0) && (RHS1 == LHS0 || RHS1 == LHS1)) || - (isKnownNeverNaN(RHS1) && (RHS0 == LHS0 || RHS0 == LHS1))) + if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || + (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) return LHS; } return nullptr; } -static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) { +static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, + Value *Op0, Value *Op1, bool IsAnd) { // Look through casts of the 'and' operands to find compares. auto *Cast0 = dyn_cast<CastInst>(Op0); auto *Cast1 = dyn_cast<CastInst>(Op1); @@ -1745,13 +1755,13 @@ static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) { auto *ICmp0 = dyn_cast<ICmpInst>(Op0); auto *ICmp1 = dyn_cast<ICmpInst>(Op1); if (ICmp0 && ICmp1) - V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1) : - simplifyOrOfICmps(ICmp0, ICmp1); + V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q.IIQ) + : simplifyOrOfICmps(ICmp0, ICmp1, Q.IIQ); auto *FCmp0 = dyn_cast<FCmpInst>(Op0); auto *FCmp1 = dyn_cast<FCmpInst>(Op1); if (FCmp0 && FCmp1) - V = simplifyAndOrOfFCmps(FCmp0, FCmp1, IsAnd); + V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); if (!V) return nullptr; @@ -1831,7 +1841,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, return Op1; } - if (Value *V = simplifyAndOrOfCmps(Op0, Op1, true)) + if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) return V; // Try some generic simplifications for associative operations. @@ -1863,6 +1873,40 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, MaxRecurse)) return V; + // Assuming the effective width of Y is not larger than A, i.e. all bits + // from X and Y are disjoint in (X << A) | Y, + // if the mask of this AND op covers all bits of X or Y, while it covers + // no bits from the other, we can bypass this AND op. E.g., + // ((X << A) | Y) & Mask -> Y, + // if Mask = ((1 << effective_width_of(Y)) - 1) + // ((X << A) | Y) & Mask -> X << A, + // if Mask = ((1 << effective_width_of(X)) - 1) << A + // SimplifyDemandedBits in InstCombine can optimize the general case. + // This pattern aims to help other passes for a common case. + Value *Y, *XShifted; + if (match(Op1, m_APInt(Mask)) && + match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), + m_Value(XShifted)), + m_Value(Y)))) { + const unsigned Width = Op0->getType()->getScalarSizeInBits(); + const unsigned ShftCnt = ShAmt->getLimitedValue(Width); + const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); + const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); + if (EffWidthY <= ShftCnt) { + const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, + Q.DT); + const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros(); + const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); + const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; + // If the mask is extracting all bits from X or Y as is, we can skip + // this AND op. + if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) + return Y; + if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) + return XShifted; + } + } + return nullptr; } @@ -1947,7 +1991,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) return Op0; - if (Value *V = simplifyAndOrOfCmps(Op0, Op1, false)) + if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) return V; // Try some generic simplifications for associative operations. @@ -2108,13 +2152,15 @@ static Constant * computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, const DominatorTree *DT, CmpInst::Predicate Pred, AssumptionCache *AC, const Instruction *CxtI, - Value *LHS, Value *RHS) { + const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) { // First, skip past any trivial no-ops. LHS = LHS->stripPointerCasts(); RHS = RHS->stripPointerCasts(); // A non-null pointer is not equal to a null pointer. - if (llvm::isKnownNonZero(LHS, DL) && isa<ConstantPointerNull>(RHS) && + if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, + IIQ.UseInstrInfo) && + isa<ConstantPointerNull>(RHS) && (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) return ConstantInt::get(GetCompareTy(LHS), !CmpInst::isTrueWhenEqual(Pred)); @@ -2379,12 +2425,12 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, return getTrue(ITy); case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE: - if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) + if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) return getFalse(ITy); break; case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGT: - if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) + if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) return getTrue(ITy); break; case ICmpInst::ICMP_SLT: { @@ -2429,17 +2475,18 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, /// Many binary operators with a constant operand have an easy-to-compute /// range of outputs. This can be used to fold a comparison to always true or /// always false. -static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { +static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper, + const InstrInfoQuery &IIQ) { unsigned Width = Lower.getBitWidth(); const APInt *C; switch (BO.getOpcode()) { case Instruction::Add: if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { // FIXME: If we have both nuw and nsw, we should reduce the range further. - if (BO.hasNoUnsignedWrap()) { + if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) { // 'add nuw x, C' produces [C, UINT_MAX]. Lower = *C; - } else if (BO.hasNoSignedWrap()) { + } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) { if (C->isNegative()) { // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. Lower = APInt::getSignedMinValue(Width); @@ -2472,7 +2519,7 @@ static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; } else if (match(BO.getOperand(0), m_APInt(C))) { unsigned ShiftAmount = Width - 1; - if (!C->isNullValue() && BO.isExact()) + if (!C->isNullValue() && IIQ.isExact(&BO)) ShiftAmount = C->countTrailingZeros(); if (C->isNegative()) { // 'ashr C, x' produces [C, C >> (Width-1)] @@ -2493,7 +2540,7 @@ static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { } else if (match(BO.getOperand(0), m_APInt(C))) { // 'lshr C, x' produces [C >> (Width-1), C]. unsigned ShiftAmount = Width - 1; - if (!C->isNullValue() && BO.isExact()) + if (!C->isNullValue() && IIQ.isExact(&BO)) ShiftAmount = C->countTrailingZeros(); Lower = C->lshr(ShiftAmount); Upper = *C + 1; @@ -2502,7 +2549,7 @@ static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { case Instruction::Shl: if (match(BO.getOperand(0), m_APInt(C))) { - if (BO.hasNoUnsignedWrap()) { + if (IIQ.hasNoUnsignedWrap(&BO)) { // 'shl nuw C, x' produces [C, C << CLZ(C)] Lower = *C; Upper = Lower.shl(Lower.countLeadingZeros()) + 1; @@ -2583,8 +2630,72 @@ static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { } } +/// Some intrinsics with a constant operand have an easy-to-compute range of +/// outputs. This can be used to fold a comparison to always true or always +/// false. +static void setLimitsForIntrinsic(IntrinsicInst &II, APInt &Lower, + APInt &Upper) { + unsigned Width = Lower.getBitWidth(); + const APInt *C; + switch (II.getIntrinsicID()) { + case Intrinsic::uadd_sat: + // uadd.sat(x, C) produces [C, UINT_MAX]. + if (match(II.getOperand(0), m_APInt(C)) || + match(II.getOperand(1), m_APInt(C))) + Lower = *C; + break; + case Intrinsic::sadd_sat: + if (match(II.getOperand(0), m_APInt(C)) || + match(II.getOperand(1), m_APInt(C))) { + if (C->isNegative()) { + // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. + Lower = APInt::getSignedMinValue(Width); + Upper = APInt::getSignedMaxValue(Width) + *C + 1; + } else { + // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. + Lower = APInt::getSignedMinValue(Width) + *C; + Upper = APInt::getSignedMaxValue(Width) + 1; + } + } + break; + case Intrinsic::usub_sat: + // usub.sat(C, x) produces [0, C]. + if (match(II.getOperand(0), m_APInt(C))) + Upper = *C + 1; + // usub.sat(x, C) produces [0, UINT_MAX - C]. + else if (match(II.getOperand(1), m_APInt(C))) + Upper = APInt::getMaxValue(Width) - *C + 1; + break; + case Intrinsic::ssub_sat: + if (match(II.getOperand(0), m_APInt(C))) { + if (C->isNegative()) { + // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. + Lower = APInt::getSignedMinValue(Width); + Upper = *C - APInt::getSignedMinValue(Width) + 1; + } else { + // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. + Lower = *C - APInt::getSignedMaxValue(Width); + Upper = APInt::getSignedMaxValue(Width) + 1; + } + } else if (match(II.getOperand(1), m_APInt(C))) { + if (C->isNegative()) { + // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: + Lower = APInt::getSignedMinValue(Width) - *C; + Upper = APInt::getSignedMaxValue(Width) + 1; + } else { + // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. + Lower = APInt::getSignedMinValue(Width); + Upper = APInt::getSignedMaxValue(Width) - *C + 1; + } + } + break; + default: + break; + } +} + static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, - Value *RHS) { + Value *RHS, const InstrInfoQuery &IIQ) { Type *ITy = GetCompareTy(RHS); // The return type. Value *X; @@ -2615,13 +2726,15 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, APInt Lower = APInt(Width, 0); APInt Upper = APInt(Width, 0); if (auto *BO = dyn_cast<BinaryOperator>(LHS)) - setLimitsForBinOp(*BO, Lower, Upper); + setLimitsForBinOp(*BO, Lower, Upper, IIQ); + else if (auto *II = dyn_cast<IntrinsicInst>(LHS)) + setLimitsForIntrinsic(*II, Lower, Upper); ConstantRange LHS_CR = Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true); if (auto *I = dyn_cast<Instruction>(LHS)) - if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) + if (auto *Ranges = IIQ.getMetadata(I, LLVMContext::MD_range)) LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges)); if (!LHS_CR.isFullSet()) { @@ -2654,16 +2767,20 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, B = LBO->getOperand(1); NoLHSWrapProblem = ICmpInst::isEquality(Pred) || - (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) || - (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap()); + (CmpInst::isUnsigned(Pred) && + Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) || + (CmpInst::isSigned(Pred) && + Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO))); } if (RBO && RBO->getOpcode() == Instruction::Add) { C = RBO->getOperand(0); D = RBO->getOperand(1); NoRHSWrapProblem = ICmpInst::isEquality(Pred) || - (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) || - (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap()); + (CmpInst::isUnsigned(Pred) && + Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) || + (CmpInst::isSigned(Pred) && + Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO))); } // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. @@ -2881,7 +2998,8 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, // - The shift is nuw, we can't shift out the one bit. // - CI2 is one // - CI isn't zero - if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() || + if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) || + Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) || CI2Val->isOneValue() || !CI->isZero()) { if (Pred == ICmpInst::ICMP_EQ) return ConstantInt::getFalse(RHS->getContext()); @@ -2905,29 +3023,31 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, break; case Instruction::UDiv: case Instruction::LShr: - if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact()) + if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) || + !Q.IIQ.isExact(RBO)) break; if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), RBO->getOperand(0), Q, MaxRecurse - 1)) return V; break; case Instruction::SDiv: - if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact()) + if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) || + !Q.IIQ.isExact(RBO)) break; if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), RBO->getOperand(0), Q, MaxRecurse - 1)) return V; break; case Instruction::AShr: - if (!LBO->isExact() || !RBO->isExact()) + if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO)) break; if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), RBO->getOperand(0), Q, MaxRecurse - 1)) return V; break; case Instruction::Shl: { - bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap(); - bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap(); + bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); + bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); if (!NUW && !NSW) break; if (!NSW && ICmpInst::isSigned(Pred)) @@ -2942,6 +3062,44 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, return nullptr; } +static Value *simplifyICmpWithAbsNabs(CmpInst::Predicate Pred, Value *Op0, + Value *Op1) { + // We need a comparison with a constant. + const APInt *C; + if (!match(Op1, m_APInt(C))) + return nullptr; + + // matchSelectPattern returns the negation part of an abs pattern in SP1. + // If the negate has an NSW flag, abs(INT_MIN) is undefined. Without that + // constraint, we can't make a contiguous range for the result of abs. + ICmpInst::Predicate AbsPred = ICmpInst::BAD_ICMP_PREDICATE; + Value *SP0, *SP1; + SelectPatternFlavor SPF = matchSelectPattern(Op0, SP0, SP1).Flavor; + if (SPF == SelectPatternFlavor::SPF_ABS && + cast<Instruction>(SP1)->hasNoSignedWrap()) + // The result of abs(X) is >= 0 (with nsw). + AbsPred = ICmpInst::ICMP_SGE; + if (SPF == SelectPatternFlavor::SPF_NABS) + // The result of -abs(X) is <= 0. + AbsPred = ICmpInst::ICMP_SLE; + + if (AbsPred == ICmpInst::BAD_ICMP_PREDICATE) + return nullptr; + + // If there is no intersection between abs/nabs and the range of this icmp, + // the icmp must be false. If the abs/nabs range is a subset of the icmp + // range, the icmp must be true. + APInt Zero = APInt::getNullValue(C->getBitWidth()); + ConstantRange AbsRange = ConstantRange::makeExactICmpRegion(AbsPred, Zero); + ConstantRange CmpRange = ConstantRange::makeExactICmpRegion(Pred, *C); + if (AbsRange.intersectWith(CmpRange).isEmptySet()) + return getFalse(GetCompareTy(Op0)); + if (CmpRange.contains(AbsRange)) + return getTrue(GetCompareTy(Op0)); + + return nullptr; +} + /// Simplify integer comparisons where at least one operand of the compare /// matches an integer min/max idiom. static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, @@ -3175,7 +3333,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) return V; - if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS)) + if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ)) return V; // If both operands have range metadata, use the metadata @@ -3184,8 +3342,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, auto RHS_Instr = cast<Instruction>(RHS); auto LHS_Instr = cast<Instruction>(LHS); - if (RHS_Instr->getMetadata(LLVMContext::MD_range) && - LHS_Instr->getMetadata(LLVMContext::MD_range)) { + if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) && + Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) { auto RHS_CR = getConstantRangeFromMetadata( *RHS_Instr->getMetadata(LLVMContext::MD_range)); auto LHS_CR = getConstantRangeFromMetadata( @@ -3363,7 +3521,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, // icmp eq|ne X, Y -> false|true if X != Y if (ICmpInst::isEquality(Pred) && - isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) { + isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) { return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy); } @@ -3373,11 +3531,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) return V; + if (Value *V = simplifyICmpWithAbsNabs(Pred, LHS, RHS)) + return V; + // Simplify comparisons of related pointers using a powerful, recursive // GEP-walk when we have target data available.. if (LHS->getType()->isPointerTy()) - if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS, - RHS)) + if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, + Q.IIQ, LHS, RHS)) return C; if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) @@ -3386,7 +3547,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == Q.DL.getTypeSizeInBits(CRHS->getType())) if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, - CLHS->getPointerOperand(), + Q.IIQ, CLHS->getPointerOperand(), CRHS->getPointerOperand())) return C; @@ -3457,13 +3618,11 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (Pred == FCmpInst::FCMP_TRUE) return getTrue(RetTy); - // UNO/ORD predicates can be trivially folded if NaNs are ignored. - if (FMF.noNaNs()) { - if (Pred == FCmpInst::FCMP_UNO) - return getFalse(RetTy); - if (Pred == FCmpInst::FCMP_ORD) - return getTrue(RetTy); - } + // Fold (un)ordered comparison if we can determine there are no NaNs. + if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD) + if (FMF.noNaNs() || + (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI))) + return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD); // NaN is unordered; NaN is not ordered. assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && @@ -3518,12 +3677,19 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, } if (C->isZero()) { switch (Pred) { + case FCmpInst::FCMP_OGE: + if (FMF.noNaNs() && CannotBeOrderedLessThanZero(LHS, Q.TLI)) + return getTrue(RetTy); + break; case FCmpInst::FCMP_UGE: if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) return getTrue(RetTy); break; + case FCmpInst::FCMP_ULT: + if (FMF.noNaNs() && CannotBeOrderedLessThanZero(LHS, Q.TLI)) + return getFalse(RetTy); + break; case FCmpInst::FCMP_OLT: - // X < 0 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) return getFalse(RetTy); break; @@ -3600,11 +3766,10 @@ static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, // // We can't replace %sel with %add unless we strip away the flags. if (isa<OverflowingBinaryOperator>(B)) - if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap()) - return nullptr; - if (isa<PossiblyExactOperator>(B)) - if (B->isExact()) + if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B)) return nullptr; + if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B)) + return nullptr; if (MaxRecurse) { if (B->getOperand(0) == Op) @@ -3738,6 +3903,28 @@ static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, Pred == ICmpInst::ICMP_EQ)) return V; + + // Test for zero-shift-guard-ops around funnel shifts. These are used to + // avoid UB from oversized shifts in raw IR rotate patterns, but the + // intrinsics do not have that problem. + Value *ShAmt; + auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), + m_Value(ShAmt)), + m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), + m_Value(ShAmt))); + // (ShAmt != 0) ? fshl(X, *, ShAmt) : X --> fshl(X, *, ShAmt) + // (ShAmt != 0) ? fshr(*, X, ShAmt) : X --> fshr(*, X, ShAmt) + // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X + // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X + if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt) + return Pred == ICmpInst::ICMP_NE ? TrueVal : X; + + // (ShAmt == 0) ? X : fshl(X, *, ShAmt) --> fshl(X, *, ShAmt) + // (ShAmt == 0) ? X : fshr(*, X, ShAmt) --> fshr(*, X, ShAmt) + // (ShAmt != 0) ? X : fshl(X, *, ShAmt) --> X + // (ShAmt != 0) ? X : fshr(*, X, ShAmt) --> X + if (match(FalseVal, isFsh) && TrueVal == X && CmpLHS == ShAmt) + return Pred == ICmpInst::ICMP_EQ ? FalseVal : X; } // Check for other compares that behave like bit test. @@ -3775,6 +3962,34 @@ static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, return nullptr; } +/// Try to simplify a select instruction when its condition operand is a +/// floating-point comparison. +static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F) { + FCmpInst::Predicate Pred; + if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) && + !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T)))) + return nullptr; + + // TODO: The transform may not be valid with -0.0. An incomplete way of + // testing for that possibility is to check if at least one operand is a + // non-zero constant. + const APFloat *C; + if ((match(T, m_APFloat(C)) && C->isNonZero()) || + (match(F, m_APFloat(C)) && C->isNonZero())) { + // (T == F) ? T : F --> F + // (F == T) ? T : F --> F + if (Pred == FCmpInst::FCMP_OEQ) + return F; + + // (T != F) ? T : F --> T + // (F != T) ? T : F --> T + if (Pred == FCmpInst::FCMP_UNE) + return T; + } + + return nullptr; +} + /// Given operands for a SelectInst, see if we can fold the result. /// If not, this returns null. static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, @@ -3811,9 +4026,16 @@ static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse)) return V; + if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal)) + return V; + if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) return V; + Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); + if (Imp) + return *Imp ? TrueVal : FalseVal; + return nullptr; } @@ -4325,6 +4547,14 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))) return ConstantFP::getNullValue(Op0->getType()); + // (X - Y) + Y --> X + // Y + (X - Y) --> X + Value *X; + if (FMF.noSignedZeros() && FMF.allowReassoc() && + (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) || + match(Op1, m_FSub(m_Value(X), m_Specific(Op0))))) + return X; + return nullptr; } @@ -4362,6 +4592,13 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, if (FMF.noNaNs() && Op0 == Op1) return Constant::getNullValue(Op0->getType()); + // Y - (Y - X) --> X + // (X + Y) - Y --> X + if (FMF.noSignedZeros() && FMF.allowReassoc() && + (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) || + match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X))))) + return X; + return nullptr; } @@ -4442,10 +4679,8 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, // -X / X -> -1.0 and // X / -X -> -1.0 are legal when NaNs are ignored. // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. - if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) && - BinaryOperator::getFNegArgument(Op0) == Op1) || - (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) && - BinaryOperator::getFNegArgument(Op1) == Op0)) + if (match(Op0, m_FNegNSZ(m_Specific(Op1))) || + match(Op1, m_FNegNSZ(m_Specific(Op0)))) return ConstantFP::get(Op0->getType(), -1.0); } @@ -4747,6 +4982,40 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, if (match(Op0, m_Undef()) || match(Op1, m_Undef())) return Constant::getNullValue(ReturnType); break; + case Intrinsic::uadd_sat: + // sat(MAX + X) -> MAX + // sat(X + MAX) -> MAX + if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes())) + return Constant::getAllOnesValue(ReturnType); + LLVM_FALLTHROUGH; + case Intrinsic::sadd_sat: + // sat(X + undef) -> -1 + // sat(undef + X) -> -1 + // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). + // For signed: Assume undef is ~X, in which case X + ~X = -1. + if (match(Op0, m_Undef()) || match(Op1, m_Undef())) + return Constant::getAllOnesValue(ReturnType); + + // X + 0 -> X + if (match(Op1, m_Zero())) + return Op0; + // 0 + X -> X + if (match(Op0, m_Zero())) + return Op1; + break; + case Intrinsic::usub_sat: + // sat(0 - X) -> 0, sat(X - MAX) -> 0 + if (match(Op0, m_Zero()) || match(Op1, m_AllOnes())) + return Constant::getNullValue(ReturnType); + LLVM_FALLTHROUGH; + case Intrinsic::ssub_sat: + // X - X -> 0, X - undef -> 0, undef - X -> 0 + if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef())) + return Constant::getNullValue(ReturnType); + // X - 0 -> X + if (match(Op1, m_Zero())) + return Op0; + break; case Intrinsic::load_relative: if (auto *C0 = dyn_cast<Constant>(Op0)) if (auto *C1 = dyn_cast<Constant>(Op1)) @@ -4764,10 +5033,51 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, break; case Intrinsic::maxnum: case Intrinsic::minnum: - // If one argument is NaN, return the other argument. - if (match(Op0, m_NaN())) return Op1; - if (match(Op1, m_NaN())) return Op0; + case Intrinsic::maximum: + case Intrinsic::minimum: { + // If the arguments are the same, this is a no-op. + if (Op0 == Op1) return Op0; + + // If one argument is undef, return the other argument. + if (match(Op0, m_Undef())) + return Op1; + if (match(Op1, m_Undef())) + return Op0; + + // If one argument is NaN, return other or NaN appropriately. + bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; + if (match(Op0, m_NaN())) + return PropagateNaN ? Op0 : Op1; + if (match(Op1, m_NaN())) + return PropagateNaN ? Op1 : Op0; + + // Min/max of the same operation with common operand: + // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) + if (auto *M0 = dyn_cast<IntrinsicInst>(Op0)) + if (M0->getIntrinsicID() == IID && + (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1)) + return Op0; + if (auto *M1 = dyn_cast<IntrinsicInst>(Op1)) + if (M1->getIntrinsicID() == IID && + (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0)) + return Op1; + + // min(X, -Inf) --> -Inf (and commuted variant) + // max(X, +Inf) --> +Inf (and commuted variant) + bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum; + const APFloat *C; + if ((match(Op0, m_APFloat(C)) && C->isInfinity() && + C->isNegative() == UseNegInf) || + (match(Op1, m_APFloat(C)) && C->isInfinity() && + C->isNegative() == UseNegInf)) + return ConstantFP::getInfinity(ReturnType, UseNegInf); + + // TODO: minnum(nnan x, inf) -> x + // TODO: minnum(nnan ninf x, flt_max) -> x + // TODO: maxnum(nnan x, -inf) -> x + // TODO: maxnum(nnan ninf x, -flt_max) -> x break; + } default: break; } @@ -4802,7 +5112,16 @@ static Value *simplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, } case Intrinsic::fshl: case Intrinsic::fshr: { - Value *ShAmtArg = ArgBegin[2]; + Value *Op0 = ArgBegin[0], *Op1 = ArgBegin[1], *ShAmtArg = ArgBegin[2]; + + // If both operands are undef, the result is undef. + if (match(Op0, m_Undef()) && match(Op1, m_Undef())) + return UndefValue::get(F->getReturnType()); + + // If shift amount is undef, assume it is zero. + if (match(ShAmtArg, m_Undef())) + return ArgBegin[IID == Intrinsic::fshl ? 0 : 1]; + const APInt *ShAmtC; if (match(ShAmtArg, m_APInt(ShAmtC))) { // If there's effectively no shift, return the 1st arg or 2nd arg. @@ -4889,18 +5208,20 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, I->getFastMathFlags(), Q); break; case Instruction::Add: - Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1), - cast<BinaryOperator>(I)->hasNoSignedWrap(), - cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); + Result = + SimplifyAddInst(I->getOperand(0), I->getOperand(1), + Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), + Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); break; case Instruction::FSub: Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), I->getFastMathFlags(), Q); break; case Instruction::Sub: - Result = SimplifySubInst(I->getOperand(0), I->getOperand(1), - cast<BinaryOperator>(I)->hasNoSignedWrap(), - cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); + Result = + SimplifySubInst(I->getOperand(0), I->getOperand(1), + Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), + Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); break; case Instruction::FMul: Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), @@ -4930,17 +5251,18 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, I->getFastMathFlags(), Q); break; case Instruction::Shl: - Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1), - cast<BinaryOperator>(I)->hasNoSignedWrap(), - cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); + Result = + SimplifyShlInst(I->getOperand(0), I->getOperand(1), + Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), + Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); break; case Instruction::LShr: Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), - cast<BinaryOperator>(I)->isExact(), Q); + Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); break; case Instruction::AShr: Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), - cast<BinaryOperator>(I)->isExact(), Q); + Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); break; case Instruction::And: Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); @@ -5066,7 +5388,7 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, // Gracefully handle edge cases where the instruction is not wired into any // parent block. - if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && + if (I->getParent() && !I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects()) I->eraseFromParent(); } else { @@ -5095,7 +5417,7 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, // Gracefully handle edge cases where the instruction is not wired into any // parent block. - if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && + if (I->getParent() && !I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects()) I->eraseFromParent(); } |