summaryrefslogtreecommitdiff
path: root/lib/Transforms/InstCombine
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/InstCombine')
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp115
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp331
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp42
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp30
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp67
-rw-r--r--lib/Transforms/InstCombine/InstCombineInternal.h22
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp9
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp431
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp43
9 files changed, 534 insertions, 556 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index e30a4bafb9b0c..030461004f568 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -17,6 +17,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
using namespace PatternMatch;
@@ -794,6 +795,11 @@ unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
if (Opnd->isConstant())
continue;
+ // The constant check above is really for a few special constant
+ // coefficients.
+ if (isa<UndefValue>(Opnd->getSymVal()))
+ continue;
+
const FAddendCoef &CE = Opnd->getCoef();
if (CE.isMinusOne() || CE.isMinusTwo())
NegOpndNum++;
@@ -894,24 +900,22 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
return true;
unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
- APInt LHSKnownZero(BitWidth, 0);
- APInt LHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
+ KnownBits LHSKnown(BitWidth);
+ computeKnownBits(LHS, LHSKnown, 0, &CxtI);
- APInt RHSKnownZero(BitWidth, 0);
- APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(RHS, RHSKnown, 0, &CxtI);
// Addition of two 2's complement numbers having opposite signs will never
// overflow.
- if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
- (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
+ if ((LHSKnown.One[BitWidth - 1] && RHSKnown.Zero[BitWidth - 1]) ||
+ (LHSKnown.Zero[BitWidth - 1] && RHSKnown.One[BitWidth - 1]))
return true;
// Check if carry bit of addition will not cause overflow.
- if (checkRippleForAdd(LHSKnownZero, RHSKnownZero))
+ if (checkRippleForAdd(LHSKnown.Zero, RHSKnown.Zero))
return true;
- if (checkRippleForAdd(RHSKnownZero, LHSKnownZero))
+ if (checkRippleForAdd(RHSKnown.Zero, LHSKnown.Zero))
return true;
return false;
@@ -931,18 +935,16 @@ bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
return true;
unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
- APInt LHSKnownZero(BitWidth, 0);
- APInt LHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
+ KnownBits LHSKnown(BitWidth);
+ computeKnownBits(LHS, LHSKnown, 0, &CxtI);
- APInt RHSKnownZero(BitWidth, 0);
- APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(RHS, RHSKnown, 0, &CxtI);
// Subtraction of two 2's complement numbers having identical signs will
// never overflow.
- if ((LHSKnownOne[BitWidth - 1] && RHSKnownOne[BitWidth - 1]) ||
- (LHSKnownZero[BitWidth - 1] && RHSKnownZero[BitWidth - 1]))
+ if ((LHSKnown.One[BitWidth - 1] && RHSKnown.One[BitWidth - 1]) ||
+ (LHSKnown.Zero[BitWidth - 1] && RHSKnown.Zero[BitWidth - 1]))
return true;
// TODO: implement logic similar to checkRippleForAdd
@@ -1113,10 +1115,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// a sub and fuse this add with it.
if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
IntegerType *IT = cast<IntegerType>(I.getType());
- APInt LHSKnownOne(IT->getBitWidth(), 0);
- APInt LHSKnownZero(IT->getBitWidth(), 0);
- computeKnownBits(XorLHS, LHSKnownZero, LHSKnownOne, 0, &I);
- if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
+ KnownBits LHSKnown(IT->getBitWidth());
+ computeKnownBits(XorLHS, LHSKnown, 0, &I);
+ if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue())
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
XorLHS);
}
@@ -1385,39 +1386,58 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
// integer add followed by a promotion.
if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
Value *LHSIntVal = LHSConv->getOperand(0);
+ Type *FPType = LHSConv->getType();
+
+ // TODO: This check is overly conservative. In many cases known bits
+ // analysis can tell us that the result of the addition has less significant
+ // bits than the integer type can hold.
+ auto IsValidPromotion = [](Type *FTy, Type *ITy) {
+ Type *FScalarTy = FTy->getScalarType();
+ Type *IScalarTy = ITy->getScalarType();
+
+ // Do we have enough bits in the significand to represent the result of
+ // the integer addition?
+ unsigned MaxRepresentableBits =
+ APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
+ return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
+ };
// (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
// ... if the constant fits in the integer value. This is useful for things
// like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
// requires a constant pool load, and generally allows the add to be better
// instcombined.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
- Constant *CI =
- ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
- if (LHSConv->hasOneUse() &&
- ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
- WillNotOverflowSignedAdd(LHSIntVal, CI, I)) {
- // Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
- CI, "addconv");
- return new SIToFPInst(NewAdd, I.getType());
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
+ if (IsValidPromotion(FPType, LHSIntVal->getType())) {
+ Constant *CI =
+ ConstantExpr::getFPToSI(CFP, LHSIntVal->getType());
+ if (LHSConv->hasOneUse() &&
+ ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
+ WillNotOverflowSignedAdd(LHSIntVal, CI, I)) {
+ // Insert the new integer add.
+ Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
+ CI, "addconv");
+ return new SIToFPInst(NewAdd, I.getType());
+ }
}
- }
// (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
Value *RHSIntVal = RHSConv->getOperand(0);
-
- // Only do this if x/y have the same type, if at least one of them has a
- // single use (so we don't increase the number of int->fp conversions),
- // and if the integer add will not overflow.
- if (LHSIntVal->getType() == RHSIntVal->getType() &&
- (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
- WillNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
- // Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
- RHSIntVal, "addconv");
- return new SIToFPInst(NewAdd, I.getType());
+ // It's enough to check LHS types only because we require int types to
+ // be the same for this transform.
+ if (IsValidPromotion(FPType, LHSIntVal->getType())) {
+ // Only do this if x/y have the same type, if at least one of them has a
+ // single use (so we don't increase the number of int->fp conversions),
+ // and if the integer add will not overflow.
+ if (LHSIntVal->getType() == RHSIntVal->getType() &&
+ (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
+ WillNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
+ // Insert the new integer add.
+ Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal,
+ RHSIntVal, "addconv");
+ return new SIToFPInst(NewAdd, I.getType());
+ }
}
}
}
@@ -1617,10 +1637,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
// zero.
if (Op0C->isMask()) {
- APInt RHSKnownZero(BitWidth, 0);
- APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(Op1, RHSKnownZero, RHSKnownOne, 0, &I);
- if ((*Op0C | RHSKnownZero).isAllOnesValue())
+ KnownBits RHSKnown(BitWidth);
+ computeKnownBits(Op1, RHSKnown, 0, &I);
+ if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
return BinaryOperator::CreateXor(Op1, Op0);
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3a98e8937bda7..a97b5a9ec0bb8 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -906,15 +906,6 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
switch (PredL) {
default:
llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
- case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
- case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
- return LHS;
- }
case ICmpInst::ICMP_NE:
switch (PredR) {
default:
@@ -930,43 +921,15 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
return Builder->CreateICmpSLT(LHS0, LHSC);
break; // (X != 13 & X s< 15) -> no change
- case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
- case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
- return RHS;
case ICmpInst::ICMP_NE:
// Potential folds for this case should already be handled.
break;
}
break;
- case ICmpInst::ICMP_ULT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
- case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
- case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
- return LHS;
- }
- break;
- case ICmpInst::ICMP_SLT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
- case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
- return LHS;
- }
- break;
case ICmpInst::ICMP_UGT:
switch (PredR) {
default:
llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
- return RHS;
case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
return Builder->CreateICmp(PredL, LHS0, RHSC);
@@ -980,9 +943,6 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
switch (PredR) {
default:
llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
- return RHS;
case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
return Builder->CreateICmp(PredL, LHS0, RHSC);
@@ -1234,6 +1194,56 @@ static Instruction *foldBoolSextMaskToSelect(BinaryOperator &I) {
return nullptr;
}
+static Instruction *foldAndToXor(BinaryOperator &I,
+ InstCombiner::BuilderTy &Builder) {
+ assert(I.getOpcode() == Instruction::And);
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ Value *A, *B;
+
+ // Operand complexity canonicalization guarantees that the 'or' is Op0.
+ // (A | B) & ~(A & B) --> A ^ B
+ // (A | B) & ~(B & A) --> A ^ B
+ if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
+ match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B)))))
+ return BinaryOperator::CreateXor(A, B);
+
+ // (A | ~B) & (~A | B) --> ~(A ^ B)
+ // (A | ~B) & (B | ~A) --> ~(A ^ B)
+ // (~B | A) & (~A | B) --> ~(A ^ B)
+ // (~B | A) & (B | ~A) --> ~(A ^ B)
+ if (match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
+ match(Op1, m_c_Or(m_Not(m_Specific(A)), m_Value(B))))
+ return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
+
+ return nullptr;
+}
+
+static Instruction *foldOrToXor(BinaryOperator &I,
+ InstCombiner::BuilderTy &Builder) {
+ assert(I.getOpcode() == Instruction::Or);
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ Value *A, *B;
+
+ // Operand complexity canonicalization guarantees that the 'and' is Op0.
+ // (A & B) | ~(A | B) --> ~(A ^ B)
+ // (A & B) | ~(B | A) --> ~(A ^ B)
+ if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
+ match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
+ return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
+
+ // (A & ~B) | (~A & B) --> A ^ B
+ // (A & ~B) | (B & ~A) --> A ^ B
+ // (~B & A) | (~A & B) --> A ^ B
+ // (~B & A) | (B & ~A) --> A ^ B
+ if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
+ match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
+ return BinaryOperator::CreateXor(A, B);
+
+ return nullptr;
+}
+
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
@@ -1247,15 +1257,19 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (Value *V = SimplifyAndInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
- // (A|B)&(A|C) -> A|(B&C) etc
- if (Value *V = SimplifyUsingDistributiveLaws(I))
- return replaceInstUsesWith(I, V);
-
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
return &I;
+ // Do this before using distributive laws to catch simple and/or/not patterns.
+ if (Instruction *Xor = foldAndToXor(I, *Builder))
+ return Xor;
+
+ // (A|B)&(A|C) -> A|(B&C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return replaceInstUsesWith(I, V);
+
if (Value *V = SimplifyBSwap(I))
return replaceInstUsesWith(I, V);
@@ -1366,19 +1380,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
return DeMorgan;
{
- Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
- // (A|B) & ~(A&B) -> A^B
- if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
- match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
- ((A == C && B == D) || (A == D && B == C)))
- return BinaryOperator::CreateXor(A, B);
-
- // ~(A&B) & (A|B) -> A^B
- if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
- match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
- ((A == C && B == D) || (A == D && B == C)))
- return BinaryOperator::CreateXor(A, B);
-
+ Value *A = nullptr, *B = nullptr, *C = nullptr;
// A&(A^B) => A & ~B
{
Value *tmpOp0 = Op0;
@@ -1405,11 +1407,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
// (A&((~A)|B)) -> A&B
- if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
- match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
+ if (match(Op0, m_c_Or(m_Not(m_Specific(Op1)), m_Value(A))))
return BinaryOperator::CreateAnd(A, Op1);
- if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
- match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
+ if (match(Op1, m_c_Or(m_Not(m_Specific(Op0)), m_Value(A))))
return BinaryOperator::CreateAnd(A, Op0);
// (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
@@ -1425,13 +1425,18 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
return BinaryOperator::CreateAnd(Op1, Builder->CreateNot(C));
// (A | B) & ((~A) ^ B) -> (A & B)
- if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
- match(Op1, m_Xor(m_Not(m_Specific(A)), m_Specific(B))))
+ // (A | B) & (B ^ (~A)) -> (A & B)
+ // (B | A) & ((~A) ^ B) -> (A & B)
+ // (B | A) & (B ^ (~A)) -> (A & B)
+ if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
+ match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
return BinaryOperator::CreateAnd(A, B);
// ((~A) ^ B) & (A | B) -> (A & B)
// ((~A) ^ B) & (B | A) -> (A & B)
- if (match(Op0, m_Xor(m_Not(m_Value(A)), m_Value(B))) &&
+ // (B ^ (~A)) & (A | B) -> (A & B)
+ // (B ^ (~A)) & (B | A) -> (A & B)
+ if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
return BinaryOperator::CreateAnd(A, B);
}
@@ -2037,15 +2042,19 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Value *V = SimplifyOrInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
- // (A&B)|(A&C) -> A&(B|C) etc
- if (Value *V = SimplifyUsingDistributiveLaws(I))
- return replaceInstUsesWith(I, V);
-
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
return &I;
+ // Do this before using distributive laws to catch simple and/or/not patterns.
+ if (Instruction *Xor = foldOrToXor(I, *Builder))
+ return Xor;
+
+ // (A&B)|(A&C) -> A&(B|C) etc
+ if (Value *V = SimplifyUsingDistributiveLaws(I))
+ return replaceInstUsesWith(I, V);
+
if (Value *V = SimplifyBSwap(I))
return replaceInstUsesWith(I, V);
@@ -2105,19 +2114,6 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
match(Op0, m_c_And(m_Specific(A), m_Value(B))))
return BinaryOperator::CreateOr(Op1, B);
- // (A & ~B) | (A ^ B) -> (A ^ B)
- // (~B & A) | (A ^ B) -> (A ^ B)
- if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
- match(Op1, m_Xor(m_Specific(A), m_Specific(B))))
- return BinaryOperator::CreateXor(A, B);
-
- // Commute the 'or' operands.
- // (A ^ B) | (A & ~B) -> (A ^ B)
- // (A ^ B) | (~B & A) -> (A ^ B)
- if (match(Op1, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
- match(Op0, m_Xor(m_Specific(A), m_Specific(B))))
- return BinaryOperator::CreateXor(A, B);
-
// (A & C)|(B & D)
Value *C = nullptr, *D = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
@@ -2182,23 +2178,6 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return replaceInstUsesWith(I, V);
}
- // ((A&~B)|(~A&B)) -> A^B
- if ((match(C, m_Not(m_Specific(D))) &&
- match(B, m_Not(m_Specific(A)))))
- return BinaryOperator::CreateXor(A, D);
- // ((~B&A)|(~A&B)) -> A^B
- if ((match(A, m_Not(m_Specific(D))) &&
- match(B, m_Not(m_Specific(C)))))
- return BinaryOperator::CreateXor(C, D);
- // ((A&~B)|(B&~A)) -> A^B
- if ((match(C, m_Not(m_Specific(B))) &&
- match(D, m_Not(m_Specific(A)))))
- return BinaryOperator::CreateXor(A, B);
- // ((~B&A)|(B&~A)) -> A^B
- if ((match(A, m_Not(m_Specific(B))) &&
- match(D, m_Not(m_Specific(C)))))
- return BinaryOperator::CreateXor(C, B);
-
// ((A|B)&1)|(B&-2) -> (A&1) | B
if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
match(A, m_Or(m_Specific(B), m_Value(V1)))) {
@@ -2374,6 +2353,58 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return Changed ? &I : nullptr;
}
+/// A ^ B can be specified using other logic ops in a variety of patterns. We
+/// can fold these early and efficiently by morphing an existing instruction.
+static Instruction *foldXorToXor(BinaryOperator &I) {
+ assert(I.getOpcode() == Instruction::Xor);
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ Value *A, *B;
+
+ // There are 4 commuted variants for each of the basic patterns.
+
+ // (A & B) ^ (A | B) -> A ^ B
+ // (A & B) ^ (B | A) -> A ^ B
+ // (A | B) ^ (A & B) -> A ^ B
+ // (A | B) ^ (B & A) -> A ^ B
+ if ((match(Op0, m_And(m_Value(A), m_Value(B))) &&
+ match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) ||
+ (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
+ match(Op1, m_c_And(m_Specific(A), m_Specific(B))))) {
+ I.setOperand(0, A);
+ I.setOperand(1, B);
+ return &I;
+ }
+
+ // (A | ~B) ^ (~A | B) -> A ^ B
+ // (~B | A) ^ (~A | B) -> A ^ B
+ // (~A | B) ^ (A | ~B) -> A ^ B
+ // (B | ~A) ^ (A | ~B) -> A ^ B
+ if ((match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
+ match(Op1, m_Or(m_Not(m_Specific(A)), m_Specific(B)))) ||
+ (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
+ match(Op1, m_Or(m_Specific(A), m_Not(m_Specific(B)))))) {
+ I.setOperand(0, A);
+ I.setOperand(1, B);
+ return &I;
+ }
+
+ // (A & ~B) ^ (~A & B) -> A ^ B
+ // (~B & A) ^ (~A & B) -> A ^ B
+ // (~A & B) ^ (A & ~B) -> A ^ B
+ // (B & ~A) ^ (A & ~B) -> A ^ B
+ if ((match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
+ match(Op1, m_And(m_Not(m_Specific(A)), m_Specific(B)))) ||
+ (match(Op0, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
+ match(Op1, m_And(m_Specific(A), m_Not(m_Specific(B)))))) {
+ I.setOperand(0, A);
+ I.setOperand(1, B);
+ return &I;
+ }
+
+ return nullptr;
+}
+
// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
@@ -2387,6 +2418,9 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (Value *V = SimplifyXorInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
+ if (Instruction *NewXor = foldXorToXor(I))
+ return NewXor;
+
// (A&B)^(A&C) -> A&(B^C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
@@ -2399,44 +2433,39 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (Value *V = SimplifyBSwap(I))
return replaceInstUsesWith(I, V);
- // Is this a ~ operation?
- if (Value *NotOp = dyn_castNotVal(&I)) {
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
- if (Op0I->getOpcode() == Instruction::And ||
- Op0I->getOpcode() == Instruction::Or) {
- // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
- // ~(~X | Y) === (X & ~Y) - De Morgan's Law
- if (dyn_castNotVal(Op0I->getOperand(1)))
- Op0I->swapOperands();
- if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
- Value *NotY =
- Builder->CreateNot(Op0I->getOperand(1),
- Op0I->getOperand(1)->getName()+".not");
- if (Op0I->getOpcode() == Instruction::And)
- return BinaryOperator::CreateOr(Op0NotVal, NotY);
- return BinaryOperator::CreateAnd(Op0NotVal, NotY);
- }
-
- // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
- // ~(X | Y) === (~X & ~Y) - De Morgan's Law
- if (IsFreeToInvert(Op0I->getOperand(0),
- Op0I->getOperand(0)->hasOneUse()) &&
- IsFreeToInvert(Op0I->getOperand(1),
- Op0I->getOperand(1)->hasOneUse())) {
- Value *NotX =
- Builder->CreateNot(Op0I->getOperand(0), "notlhs");
- Value *NotY =
- Builder->CreateNot(Op0I->getOperand(1), "notrhs");
- if (Op0I->getOpcode() == Instruction::And)
- return BinaryOperator::CreateOr(NotX, NotY);
- return BinaryOperator::CreateAnd(NotX, NotY);
- }
+ // Is this a 'not' (~) fed by a binary operator?
+ BinaryOperator *NotOp;
+ if (match(&I, m_Not(m_BinOp(NotOp)))) {
+ if (NotOp->getOpcode() == Instruction::And ||
+ NotOp->getOpcode() == Instruction::Or) {
+ // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
+ // ~(~X | Y) === (X & ~Y) - De Morgan's Law
+ if (dyn_castNotVal(NotOp->getOperand(1)))
+ NotOp->swapOperands();
+ if (Value *Op0NotVal = dyn_castNotVal(NotOp->getOperand(0))) {
+ Value *NotY = Builder->CreateNot(
+ NotOp->getOperand(1), NotOp->getOperand(1)->getName() + ".not");
+ if (NotOp->getOpcode() == Instruction::And)
+ return BinaryOperator::CreateOr(Op0NotVal, NotY);
+ return BinaryOperator::CreateAnd(Op0NotVal, NotY);
+ }
- } else if (Op0I->getOpcode() == Instruction::AShr) {
- // ~(~X >>s Y) --> (X >>s Y)
- if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
- return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
+ // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
+ // ~(X | Y) === (~X & ~Y) - De Morgan's Law
+ if (IsFreeToInvert(NotOp->getOperand(0),
+ NotOp->getOperand(0)->hasOneUse()) &&
+ IsFreeToInvert(NotOp->getOperand(1),
+ NotOp->getOperand(1)->hasOneUse())) {
+ Value *NotX = Builder->CreateNot(NotOp->getOperand(0), "notlhs");
+ Value *NotY = Builder->CreateNot(NotOp->getOperand(1), "notrhs");
+ if (NotOp->getOpcode() == Instruction::And)
+ return BinaryOperator::CreateOr(NotX, NotY);
+ return BinaryOperator::CreateAnd(NotX, NotY);
}
+ } else if (NotOp->getOpcode() == Instruction::AShr) {
+ // ~(~X >>s Y) --> (X >>s Y)
+ if (Value *Op0NotVal = dyn_castNotVal(NotOp->getOperand(0)))
+ return BinaryOperator::CreateAShr(Op0NotVal, NotOp->getOperand(1));
}
}
@@ -2574,40 +2603,6 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
{
Value *A, *B, *C, *D;
- // (A & B)^(A | B) -> A ^ B
- if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
- match(Op1, m_Or(m_Value(C), m_Value(D)))) {
- if ((A == C && B == D) || (A == D && B == C))
- return BinaryOperator::CreateXor(A, B);
- }
- // (A | B)^(A & B) -> A ^ B
- if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
- match(Op1, m_And(m_Value(C), m_Value(D)))) {
- if ((A == C && B == D) || (A == D && B == C))
- return BinaryOperator::CreateXor(A, B);
- }
- // (A | ~B) ^ (~A | B) -> A ^ B
- // (~B | A) ^ (~A | B) -> A ^ B
- if (match(Op0, m_c_Or(m_Value(A), m_Not(m_Value(B)))) &&
- match(Op1, m_Or(m_Not(m_Specific(A)), m_Specific(B))))
- return BinaryOperator::CreateXor(A, B);
-
- // (~A | B) ^ (A | ~B) -> A ^ B
- if (match(Op0, m_Or(m_Not(m_Value(A)), m_Value(B))) &&
- match(Op1, m_Or(m_Specific(A), m_Not(m_Specific(B))))) {
- return BinaryOperator::CreateXor(A, B);
- }
- // (A & ~B) ^ (~A & B) -> A ^ B
- // (~B & A) ^ (~A & B) -> A ^ B
- if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
- match(Op1, m_And(m_Not(m_Specific(A)), m_Specific(B))))
- return BinaryOperator::CreateXor(A, B);
-
- // (~A & B) ^ (A & ~B) -> A ^ B
- if (match(Op0, m_And(m_Not(m_Value(A)), m_Value(B))) &&
- match(Op1, m_And(m_Specific(A), m_Not(m_Specific(B))))) {
- return BinaryOperator::CreateXor(A, B);
- }
// (A ^ C)^(A | B) -> ((~A) & B) ^ C
if (match(Op0, m_Xor(m_Value(D), m_Value(C))) &&
match(Op1, m_Or(m_Value(A), m_Value(B)))) {
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e7aa1a4573714..313ab13b9e2b7 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -44,6 +44,7 @@
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
@@ -1378,14 +1379,13 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
return nullptr;
unsigned BitWidth = IT->getBitWidth();
- APInt KnownZero(BitWidth, 0);
- APInt KnownOne(BitWidth, 0);
- IC.computeKnownBits(Op0, KnownZero, KnownOne, 0, &II);
+ KnownBits Known(BitWidth);
+ IC.computeKnownBits(Op0, Known, 0, &II);
// Create a mask for bits above (ctlz) or below (cttz) the first known one.
bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
- unsigned NumMaskBits = IsTZ ? KnownOne.countTrailingZeros()
- : KnownOne.countLeadingZeros();
+ unsigned NumMaskBits = IsTZ ? Known.One.countTrailingZeros()
+ : Known.One.countLeadingZeros();
APInt Mask = IsTZ ? APInt::getLowBitsSet(BitWidth, NumMaskBits)
: APInt::getHighBitsSet(BitWidth, NumMaskBits);
@@ -1393,7 +1393,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
// zero, this value is constant.
// FIXME: This should be in InstSimplify because we're replacing an
// instruction with a constant.
- if ((Mask & KnownZero) == Mask) {
+ if (Mask.isSubsetOf(Known.Zero)) {
auto *C = ConstantInt::get(IT, APInt(BitWidth, NumMaskBits));
return IC.replaceInstUsesWith(II, C);
}
@@ -1401,7 +1401,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
// If the input to cttz/ctlz is known to be non-zero,
// then change the 'ZeroIsUndef' parameter to 'true'
// because we know the zero behavior can't affect the result.
- if (KnownOne != 0 || isKnownNonZero(Op0, IC.getDataLayout())) {
+ if (Known.One != 0 || isKnownNonZero(Op0, IC.getDataLayout())) {
if (!match(II.getArgOperand(1), m_One())) {
II.setOperand(1, IC.Builder->getTrue());
return &II;
@@ -3432,8 +3432,26 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
- return replaceInstUsesWith(*II,
- ConstantExpr::getSExt(CCmp, II->getType()));
+ if (CCmp->isNullValue()) {
+ return replaceInstUsesWith(
+ *II, ConstantExpr::getSExt(CCmp, II->getType()));
+ }
+
+ // The result of V_ICMP/V_FCMP assembly instructions (which this
+ // intrinsic exposes) is one bit per thread, masked with the EXEC
+ // register (which contains the bitmask of live threads). So a
+ // comparison that always returns true is the same as a read of the
+ // EXEC register.
+ Value *NewF = Intrinsic::getDeclaration(
+ II->getModule(), Intrinsic::read_register, II->getType());
+ Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
+ MDNode *MD = MDNode::get(II->getContext(), MDArgs);
+ Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
+ CallInst *NewCall = Builder->CreateCall(NewF, Args);
+ NewCall->addAttribute(AttributeList::FunctionIndex,
+ Attribute::Convergent);
+ NewCall->takeName(II);
+ return replaceInstUsesWith(*II, NewCall);
}
// Canonicalize constants to RHS.
@@ -3599,9 +3617,9 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// If there is a dominating assume with the same condition as this one,
// then this one is redundant, and should be removed.
- APInt KnownZero(1, 0), KnownOne(1, 0);
- computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
- if (KnownOne.isAllOnesValue())
+ KnownBits Known(1);
+ computeKnownBits(IIOperand, Known, 0, II);
+ if (Known.One.isAllOnesValue())
return eraseInstFromFunction(*II);
// Update the cache of affected values for this assumption (we might be
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 9127ddca59150..312d9baae43a6 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -14,9 +14,10 @@
#include "InstCombineInternal.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/PatternMatch.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
using namespace PatternMatch;
@@ -676,11 +677,10 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
// This only works for EQ and NE
ICI->isEquality()) {
// If Op1C some other power of two, convert:
- uint32_t BitWidth = Op1C->getType()->getBitWidth();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(ICI->getOperand(0), KnownZero, KnownOne, 0, &CI);
+ KnownBits Known(Op1C->getType()->getBitWidth());
+ computeKnownBits(ICI->getOperand(0), Known, 0, &CI);
- APInt KnownZeroMask(~KnownZero);
+ APInt KnownZeroMask(~Known.Zero);
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
if (!DoTransform) return ICI;
@@ -726,13 +726,13 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
- APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
- APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
- computeKnownBits(LHS, KnownZeroLHS, KnownOneLHS, 0, &CI);
- computeKnownBits(RHS, KnownZeroRHS, KnownOneRHS, 0, &CI);
+ KnownBits KnownLHS(BitWidth);
+ KnownBits KnownRHS(BitWidth);
+ computeKnownBits(LHS, KnownLHS, 0, &CI);
+ computeKnownBits(RHS, KnownRHS, 0, &CI);
- if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
- APInt KnownBits = KnownZeroLHS | KnownOneLHS;
+ if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
+ APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
APInt UnknownBit = ~KnownBits;
if (UnknownBit.countPopulation() == 1) {
if (!DoTransform) return ICI;
@@ -740,7 +740,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
Value *Result = Builder->CreateXor(LHS, RHS);
// Mask off any bits that are set and won't be shifted away.
- if (KnownOneLHS.uge(UnknownBit))
+ if (KnownLHS.One.uge(UnknownBit))
Result = Builder->CreateAnd(Result,
ConstantInt::get(ITy, UnknownBit));
@@ -1049,10 +1049,10 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
if (ICI->hasOneUse() &&
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
unsigned BitWidth = Op1C->getType()->getBitWidth();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(Op0, KnownZero, KnownOne, 0, &CI);
+ KnownBits Known(BitWidth);
+ computeKnownBits(Op0, Known, 0, &CI);
- APInt KnownZeroMask(~KnownZero);
+ APInt KnownZeroMask(~Known.Zero);
if (KnownZeroMask.isPowerOf2()) {
Value *In = ICI->getOperand(0);
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 003029ae39d56..d846a631b96f6 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -26,6 +26,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
using namespace PatternMatch;
@@ -175,19 +176,18 @@ static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
/// Given a signed integer type and a set of known zero and one bits, compute
/// the maximum and minimum values that could have the specified known zero and
/// known one bits, returning them in Min/Max.
-static void computeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
- const APInt &KnownOne,
+/// TODO: Move to method on KnownBits struct?
+static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
APInt &Min, APInt &Max) {
- assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
- KnownZero.getBitWidth() == Min.getBitWidth() &&
- KnownZero.getBitWidth() == Max.getBitWidth() &&
+ assert(Known.getBitWidth() == Min.getBitWidth() &&
+ Known.getBitWidth() == Max.getBitWidth() &&
"KnownZero, KnownOne and Min, Max must have equal bitwidth.");
- APInt UnknownBits = ~(KnownZero|KnownOne);
+ APInt UnknownBits = ~(Known.Zero|Known.One);
// The minimum value is when all unknown bits are zeros, EXCEPT for the sign
// bit if it is unknown.
- Min = KnownOne;
- Max = KnownOne|UnknownBits;
+ Min = Known.One;
+ Max = Known.One|UnknownBits;
if (UnknownBits.isNegative()) { // Sign bit is unknown
Min.setBit(Min.getBitWidth()-1);
@@ -198,19 +198,18 @@ static void computeSignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
/// Given an unsigned integer type and a set of known zero and one bits, compute
/// the maximum and minimum values that could have the specified known zero and
/// known one bits, returning them in Min/Max.
-static void computeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
- const APInt &KnownOne,
+/// TODO: Move to method on KnownBits struct?
+static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
APInt &Min, APInt &Max) {
- assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
- KnownZero.getBitWidth() == Min.getBitWidth() &&
- KnownZero.getBitWidth() == Max.getBitWidth() &&
+ assert(Known.getBitWidth() == Min.getBitWidth() &&
+ Known.getBitWidth() == Max.getBitWidth() &&
"Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
- APInt UnknownBits = ~(KnownZero|KnownOne);
+ APInt UnknownBits = ~(Known.Zero|Known.One);
// The minimum value is when the unknown bits are all zeros.
- Min = KnownOne;
+ Min = Known.One;
// The maximum value is when the unknown bits are all ones.
- Max = KnownOne|UnknownBits;
+ Max = Known.One|UnknownBits;
}
/// This is called when we see this pattern:
@@ -1479,14 +1478,14 @@ Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
// of the high bits truncated out of x are known.
unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
SrcBits = X->getType()->getScalarSizeInBits();
- APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
- computeKnownBits(X, KnownZero, KnownOne, 0, &Cmp);
+ KnownBits Known(SrcBits);
+ computeKnownBits(X, Known, 0, &Cmp);
// If all the high bits are known, we can do this xform.
- if ((KnownZero | KnownOne).countLeadingOnes() >= SrcBits - DstBits) {
+ if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
// Pull in the high bits from known-ones set.
APInt NewRHS = C->zext(SrcBits);
- NewRHS |= KnownOne & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
+ NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
}
}
@@ -4001,16 +4000,16 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit);
}
- APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
- APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
+ KnownBits Op0Known(BitWidth);
+ KnownBits Op1Known(BitWidth);
if (SimplifyDemandedBits(&I, 0,
getDemandedBitsLHSMask(I, BitWidth, IsSignBit),
- Op0KnownZero, Op0KnownOne, 0))
+ Op0Known, 0))
return &I;
if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
- Op1KnownZero, Op1KnownOne, 0))
+ Op1Known, 0))
return &I;
// Given the known and unknown bits, compute a range that the LHS could be
@@ -4019,15 +4018,11 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
if (I.isSigned()) {
- computeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
- Op0Max);
- computeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
- Op1Max);
+ computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
+ computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
} else {
- computeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, Op0Min,
- Op0Max);
- computeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, Op1Min,
- Op1Max);
+ computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
+ computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
}
// If Min and Max are known to be the same, then SimplifyDemandedBits
@@ -4054,8 +4049,8 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// If all bits are known zero except for one, then we know at most one bit
// is set. If the comparison is against zero, then this is a check to see if
// *that* bit is set.
- APInt Op0KnownZeroInverted = ~Op0KnownZero;
- if (~Op1KnownZero == 0) {
+ APInt Op0KnownZeroInverted = ~Op0Known.Zero;
+ if (~Op1Known.Zero == 0) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
const APInt *LHSC;
@@ -4193,8 +4188,8 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// Turn a signed comparison into an unsigned one if both operands are known to
// have the same sign.
if (I.isSigned() &&
- ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
- (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
+ ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
+ (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
return nullptr;
diff --git a/lib/Transforms/InstCombine/InstCombineInternal.h b/lib/Transforms/InstCombine/InstCombineInternal.h
index 71000063ab3cb..776686d3d1171 100644
--- a/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -489,10 +489,9 @@ public:
return nullptr; // Don't do anything with FI
}
- void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ void computeKnownBits(Value *V, KnownBits &Known,
unsigned Depth, Instruction *CxtI) const {
- return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI,
- &DT);
+ return llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
}
bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
@@ -536,24 +535,23 @@ private:
/// \brief Attempts to replace V with a simpler value based on the demanded
/// bits.
- Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth,
- Instruction *CxtI);
+ Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
+ unsigned Depth, Instruction *CxtI);
bool SimplifyDemandedBits(Instruction *I, unsigned Op,
- const APInt &DemandedMask, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth = 0);
+ const APInt &DemandedMask, KnownBits &Known,
+ unsigned Depth = 0);
/// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
/// bits. It also tries to handle simplifications that can be done based on
/// DemandedMask, but without modifying the Instruction.
Value *SimplifyMultipleUseDemandedBits(Instruction *I,
const APInt &DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
+ KnownBits &Known,
unsigned Depth, Instruction *CxtI);
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
/// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
- Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
- const APInt &DemandedMask, APInt &KnownZero,
- APInt &KnownOne);
+ Value *simplifyShrShlDemandedBits(
+ Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
+ const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
/// \brief Tries to simplify operands to an integer instruction based on its
/// demanded bits.
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 5d6d899da4b5f..76829c5e457bf 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -17,6 +17,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
using namespace PatternMatch;
@@ -1476,11 +1477,11 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// The motivation for this call into value tracking is to take advantage of
// the assumption cache, so make sure that is populated.
if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
- APInt KnownOne(1, 0), KnownZero(1, 0);
- computeKnownBits(CondVal, KnownZero, KnownOne, 0, &SI);
- if (KnownOne == 1)
+ KnownBits Known(1);
+ computeKnownBits(CondVal, Known, 0, &SI);
+ if (Known.One == 1)
return replaceInstUsesWith(SI, TrueVal);
- if (KnownZero == 1)
+ if (Known.Zero == 1)
return replaceInstUsesWith(SI, FalseVal);
}
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 2ba052b7e02d3..8d0ed8532779e 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -16,6 +16,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/KnownBits.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -26,7 +27,7 @@ using namespace llvm::PatternMatch;
/// constant integer. If so, check to see if there are any bits set in the
/// constant that are not demanded. If so, shrink the constant and return true.
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
- APInt Demanded) {
+ const APInt &Demanded) {
assert(I && "No instruction?");
assert(OpNo < I->getNumOperands() && "Operand index too large");
@@ -37,13 +38,11 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
return false;
// If there are no bits set that aren't demanded, nothing to do.
- Demanded = Demanded.zextOrTrunc(C->getBitWidth());
if (C->isSubsetOf(Demanded))
return false;
// This instruction is producing bits that are not demanded. Shrink the RHS.
- Demanded &= *C;
- I->setOperand(OpNo, ConstantInt::get(Op->getType(), Demanded));
+ I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
return true;
}
@@ -54,10 +53,10 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
/// the instruction has any properties that allow us to simplify its operands.
bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ KnownBits Known(BitWidth);
APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
- Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, KnownZero, KnownOne,
+ Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
0, &Inst);
if (!V) return false;
if (V == &Inst) return true;
@@ -70,11 +69,11 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
/// change and false otherwise.
bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
const APInt &DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
+ KnownBits &Known,
unsigned Depth) {
Use &U = I->getOperandUse(OpNo);
- Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, KnownZero,
- KnownOne, Depth, I);
+ Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
+ Depth, I);
if (!NewVal) return false;
U = NewVal;
return true;
@@ -88,15 +87,16 @@ bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
/// with a constant or one of its operands. In such cases, this function does
/// the replacement and returns true. In all other cases, it returns false after
/// analyzing the expression and setting KnownOne and known to be one in the
-/// expression. KnownZero contains all the bits that are known to be zero in the
-/// expression. These are provided to potentially allow the caller (which might
-/// recursively be SimplifyDemandedBits itself) to simplify the expression.
-/// KnownOne and KnownZero always follow the invariant that:
-/// KnownOne & KnownZero == 0.
-/// That is, a bit can't be both 1 and 0. Note that the bits in KnownOne and
-/// KnownZero may only be accurate for those bits set in DemandedMask. Note also
-/// that the bitwidth of V, DemandedMask, KnownZero and KnownOne must all be the
-/// same.
+/// expression. Known.Zero contains all the bits that are known to be zero in
+/// the expression. These are provided to potentially allow the caller (which
+/// might recursively be SimplifyDemandedBits itself) to simplify the
+/// expression.
+/// Known.One and Known.Zero always follow the invariant that:
+/// Known.One & Known.Zero == 0.
+/// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
+/// Known.Zero may only be accurate for those bits set in DemandedMask. Note
+/// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
+/// be the same.
///
/// This returns null if it did not change anything and it permits no
/// simplification. This returns V itself if it did some simplification of V's
@@ -104,8 +104,7 @@ bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
/// some other non-null value if it found out that V is equal to another value
/// in the context where the specified bits are demanded, but not for all users.
Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
- unsigned Depth,
+ KnownBits &Known, unsigned Depth,
Instruction *CxtI) {
assert(V != nullptr && "Null pointer of Value???");
assert(Depth <= 6 && "Limit Search Depth");
@@ -113,18 +112,16 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Type *VTy = V->getType();
assert(
(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
- KnownOne.getBitWidth() == BitWidth &&
- "Value *V, DemandedMask, KnownZero and KnownOne "
- "must have same BitWidth");
+ Known.getBitWidth() == BitWidth &&
+ "Value *V, DemandedMask and Known must have same BitWidth");
if (isa<Constant>(V)) {
- computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
return nullptr;
}
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
+ Known.Zero.clearAllBits();
+ Known.One.clearAllBits();
if (DemandedMask == 0) // Not demanding any bits from V.
return UndefValue::get(VTy);
@@ -133,20 +130,17 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
- computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
return nullptr; // Only analyze instructions.
}
// If there are multiple uses of this value and we aren't at the root, then
// we can't do any simplifications of the operands, because DemandedMask
// only reflects the bits demanded by *one* of the users.
- if (Depth != 0 && !I->hasOneUse()) {
- return SimplifyMultipleUseDemandedBits(I, DemandedMask, KnownZero, KnownOne,
- Depth, CxtI);
- }
+ if (Depth != 0 && !I->hasOneUse())
+ return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
+ KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
// If this is the root being simplified, allow it to have multiple uses,
// just set the DemandedMask to all bits so that we can try to simplify the
@@ -157,22 +151,21 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
switch (I->getOpcode()) {
default:
- computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
break;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
- if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
- Depth + 1) ||
- SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownZero, LHSKnownZero,
- LHSKnownOne, Depth + 1))
+ if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
+ SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
+ Depth + 1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?");
+ assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?");
// Output known-0 are known to be clear if zero in either the LHS | RHS.
- APInt IKnownZero = RHSKnownZero | LHSKnownZero;
+ APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
// Output known-1 bits are only known if set in both the LHS & RHS.
- APInt IKnownOne = RHSKnownOne & LHSKnownOne;
+ APInt IKnownOne = RHSKnown.One & LHSKnown.One;
// If the client is only demanding bits that we know, return the known
// constant.
@@ -181,33 +174,32 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
- if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
+ if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
return I->getOperand(1);
// If the RHS is a constant, see if we can simplify it.
- if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
+ if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
return I;
- KnownZero = std::move(IKnownZero);
- KnownOne = std::move(IKnownOne);
+ Known.Zero = std::move(IKnownZero);
+ Known.One = std::move(IKnownOne);
break;
}
case Instruction::Or: {
// If either the LHS or the RHS are One, the result is One.
- if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
- Depth + 1) ||
- SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownOne, LHSKnownZero,
- LHSKnownOne, Depth + 1))
+ if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
+ SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
+ Depth + 1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?");
+ assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?");
// Output known-0 bits are only known if clear in both the LHS & RHS.
- APInt IKnownZero = RHSKnownZero & LHSKnownZero;
- // Output known-1 are known to be set if set in either the LHS | RHS.
- APInt IKnownOne = RHSKnownOne | LHSKnownOne;
+ APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
+ // Output known-1 are known. to be set if s.et in either the LHS | RHS.
+ APInt IKnownOne = RHSKnown.One | LHSKnown.One;
// If the client is only demanding bits that we know, return the known
// constant.
@@ -216,34 +208,32 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
- if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
return I->getOperand(1);
// If the RHS is a constant, see if we can simplify it.
if (ShrinkDemandedConstant(I, 1, DemandedMask))
return I;
- KnownZero = std::move(IKnownZero);
- KnownOne = std::move(IKnownOne);
+ Known.Zero = std::move(IKnownZero);
+ Known.One = std::move(IKnownOne);
break;
}
case Instruction::Xor: {
- if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
- Depth + 1) ||
- SimplifyDemandedBits(I, 0, DemandedMask, LHSKnownZero, LHSKnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
+ SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?");
+ assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?");
// Output known-0 bits are known if clear or set in both the LHS & RHS.
- APInt IKnownZero = (RHSKnownZero & LHSKnownZero) |
- (RHSKnownOne & LHSKnownOne);
+ APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
+ (RHSKnown.One & LHSKnown.One);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
- APInt IKnownOne = (RHSKnownZero & LHSKnownOne) |
- (RHSKnownOne & LHSKnownZero);
+ APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
+ (RHSKnown.One & LHSKnown.Zero);
// If the client is only demanding bits that we know, return the known
// constant.
@@ -252,15 +242,15 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
- if (DemandedMask.isSubsetOf(RHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(LHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnown.Zero))
return I->getOperand(1);
// If all of the demanded bits are known to be zero on one side or the
// other, turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownZero)) {
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
Instruction *Or =
BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
I->getName());
@@ -271,10 +261,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
- if (DemandedMask.isSubsetOf(RHSKnownZero|RHSKnownOne) &&
- RHSKnownOne.isSubsetOf(LHSKnownOne)) {
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
+ RHSKnown.One.isSubsetOf(LHSKnown.One)) {
Constant *AndC = Constant::getIntegerValue(VTy,
- ~RHSKnownOne & DemandedMask);
+ ~RHSKnown.One & DemandedMask);
Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
return InsertNewInstWith(And, *I);
}
@@ -292,10 +282,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
isa<ConstantInt>(I->getOperand(1)) &&
isa<ConstantInt>(LHSInst->getOperand(1)) &&
- (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) {
+ (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
- APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask);
+ APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
Constant *AndC =
ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
@@ -309,9 +299,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
// Output known-0 bits are known if clear or set in both the LHS & RHS.
- KnownZero = std::move(IKnownZero);
+ Known.Zero = std::move(IKnownZero);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
- KnownOne = std::move(IKnownOne);
+ Known.One = std::move(IKnownOne);
break;
}
case Instruction::Select:
@@ -321,13 +311,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN)
return nullptr;
- if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnownZero, RHSKnownOne,
- Depth + 1) ||
- SimplifyDemandedBits(I, 1, DemandedMask, LHSKnownZero, LHSKnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
+ SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
+ assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?");
+ assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?");
// If the operands are constants, see if we can simplify them.
if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
@@ -335,21 +323,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
// Only known if known in both the LHS and RHS.
- KnownOne = RHSKnownOne & LHSKnownOne;
- KnownZero = RHSKnownZero & LHSKnownZero;
+ Known.One = RHSKnown.One & LHSKnown.One;
+ Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
break;
case Instruction::Trunc: {
unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.zext(truncBf);
- KnownZero = KnownZero.zext(truncBf);
- KnownOne = KnownOne.zext(truncBf);
- if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
- Depth + 1))
+ Known.Zero = Known.Zero.zext(truncBf);
+ Known.One = Known.One.zext(truncBf);
+ if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.trunc(BitWidth);
- KnownZero = KnownZero.trunc(BitWidth);
- KnownOne = KnownOne.trunc(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ Known.Zero = Known.Zero.trunc(BitWidth);
+ Known.One = Known.One.trunc(BitWidth);
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
break;
}
case Instruction::BitCast:
@@ -369,27 +356,25 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Don't touch a vector-to-scalar bitcast.
return nullptr;
- if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
break;
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.trunc(SrcBitWidth);
- KnownZero = KnownZero.trunc(SrcBitWidth);
- KnownOne = KnownOne.trunc(SrcBitWidth);
- if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
- Depth + 1))
+ Known.Zero = Known.Zero.trunc(SrcBitWidth);
+ Known.One = Known.One.trunc(SrcBitWidth);
+ if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.zext(BitWidth);
- KnownZero = KnownZero.zext(BitWidth);
- KnownOne = KnownOne.zext(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ Known.Zero = Known.Zero.zext(BitWidth);
+ Known.One = Known.One.zext(BitWidth);
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// The top bits are known to be zero.
- KnownZero.setBitsFrom(SrcBitWidth);
+ Known.Zero.setBitsFrom(SrcBitWidth);
break;
}
case Instruction::SExt: {
@@ -406,27 +391,26 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
InputDemandedBits.setBit(SrcBitWidth-1);
InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
- KnownZero = KnownZero.trunc(SrcBitWidth);
- KnownOne = KnownOne.trunc(SrcBitWidth);
- if (SimplifyDemandedBits(I, 0, InputDemandedBits, KnownZero, KnownOne,
- Depth + 1))
+ Known.Zero = Known.Zero.trunc(SrcBitWidth);
+ Known.One = Known.One.trunc(SrcBitWidth);
+ if (SimplifyDemandedBits(I, 0, InputDemandedBits, Known, Depth + 1))
return I;
InputDemandedBits = InputDemandedBits.zext(BitWidth);
- KnownZero = KnownZero.zext(BitWidth);
- KnownOne = KnownOne.zext(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ Known.Zero = Known.Zero.zext(BitWidth);
+ Known.One = Known.One.zext(BitWidth);
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
// If the input sign bit is known zero, or if the NewBits are not demanded
// convert this into a zero extension.
- if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
+ if (Known.Zero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
// Convert to ZExt cast
CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
return InsertNewInstWith(NewCast, *I);
- } else if (KnownOne[SrcBitWidth-1]) { // Input sign bit known set
- KnownOne |= NewBits;
+ } else if (Known.One[SrcBitWidth-1]) { // Input sign bit known set
+ Known.One |= NewBits;
}
break;
}
@@ -440,11 +424,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// significant bit and all those below it.
APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
- SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnownZero, LHSKnownOne,
- Depth + 1) ||
+ SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
- SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnownZero, RHSKnownOne,
- Depth + 1)) {
+ SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
// Disable the nsw and nuw flags here: We can no longer guarantee that
// we won't wrap after simplification. Removing the nsw/nuw flags is
// legal here because the top bit is not demanded.
@@ -456,30 +438,28 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If we are known to be adding/subtracting zeros to every bit below
// the highest demanded bit, we just return the other side.
- if ((DemandedFromOps & RHSKnownZero) == DemandedFromOps)
+ if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
return I->getOperand(0);
// We can't do this with the LHS for subtraction.
if (I->getOpcode() == Instruction::Add &&
- (DemandedFromOps & LHSKnownZero) == DemandedFromOps)
+ DemandedFromOps.isSubsetOf(LHSKnown.Zero))
return I->getOperand(1);
}
// Otherwise just hand the add/sub off to computeKnownBits to fill in
// the known zeros and ones.
- computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
break;
}
- case Instruction::Shl:
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- {
- Value *VarX; ConstantInt *C1;
- if (match(I->getOperand(0), m_Shr(m_Value(VarX), m_ConstantInt(C1)))) {
- Instruction *Shr = cast<Instruction>(I->getOperand(0));
- Value *R = SimplifyShrShlDemandedBits(Shr, I, DemandedMask,
- KnownZero, KnownOne);
- if (R)
- return R;
- }
+ case Instruction::Shl: {
+ const APInt *SA;
+ if (match(I->getOperand(1), m_APInt(SA))) {
+ const APInt *ShrAmt;
+ if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) {
+ Instruction *Shr = cast<Instruction>(I->getOperand(0));
+ if (Value *R = simplifyShrShlDemandedBits(
+ Shr, *ShrAmt, I, *SA, DemandedMask, Known))
+ return R;
}
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
@@ -492,17 +472,17 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
else if (IOp->hasNoUnsignedWrap())
DemandedMaskIn.setHighBits(ShiftAmt);
- if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- KnownZero <<= ShiftAmt;
- KnownOne <<= ShiftAmt;
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
+ Known.Zero <<= ShiftAmt;
+ Known.One <<= ShiftAmt;
// low bits known zero.
if (ShiftAmt)
- KnownZero.setLowBits(ShiftAmt);
+ Known.Zero.setLowBits(ShiftAmt);
}
break;
+ }
case Instruction::LShr: {
const APInt *SA;
if (match(I->getOperand(1), m_APInt(SA))) {
@@ -516,14 +496,13 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (cast<LShrOperator>(I)->isExact())
DemandedMaskIn.setLowBits(ShiftAmt);
- if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- KnownZero.lshrInPlace(ShiftAmt);
- KnownOne.lshrInPlace(ShiftAmt);
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
+ Known.Zero.lshrInPlace(ShiftAmt);
+ Known.One.lshrInPlace(ShiftAmt);
if (ShiftAmt)
- KnownZero.setHighBits(ShiftAmt); // high bits known zero.
+ Known.Zero.setHighBits(ShiftAmt); // high bits known zero.
}
break;
}
@@ -560,15 +539,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (cast<AShrOperator>(I)->isExact())
DemandedMaskIn.setLowBits(ShiftAmt);
- if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// Compute the new bits that are at the top now.
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- KnownZero.lshrInPlace(ShiftAmt);
- KnownOne.lshrInPlace(ShiftAmt);
+ Known.Zero.lshrInPlace(ShiftAmt);
+ Known.One.lshrInPlace(ShiftAmt);
// Handle the sign bits.
APInt SignMask(APInt::getSignMask(BitWidth));
@@ -577,14 +555,14 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
- if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
- (HighBits & ~DemandedMask) == HighBits) {
+ if (BitWidth <= ShiftAmt || Known.Zero[BitWidth-ShiftAmt-1] ||
+ !DemandedMask.intersects(HighBits)) {
BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
I->getOperand(1));
LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
return InsertNewInstWith(LShr, *I);
- } else if ((KnownOne & SignMask) != 0) { // New bits are known one.
- KnownOne |= HighBits;
+ } else if (Known.One.intersects(SignMask)) { // New bits are known one.
+ Known.One |= HighBits;
}
}
break;
@@ -602,25 +580,24 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
APInt LowBits = RA - 1;
APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
- if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne,
- Depth + 1))
+ if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
return I;
// The low bits of LHS are unchanged by the srem.
- KnownZero = LHSKnownZero & LowBits;
- KnownOne = LHSKnownOne & LowBits;
+ Known.Zero = LHSKnown.Zero & LowBits;
+ Known.One = LHSKnown.One & LowBits;
// If LHS is non-negative or has all low bits zero, then the upper bits
// are all zero.
- if (LHSKnownZero.isSignBitSet() || ((LHSKnownZero & LowBits) == LowBits))
- KnownZero |= ~LowBits;
+ if (LHSKnown.Zero.isSignBitSet() || LowBits.isSubsetOf(LHSKnown.Zero))
+ Known.Zero |= ~LowBits;
// If LHS is negative and not all low bits are zero, then the upper bits
// are all one.
- if (LHSKnownOne.isSignBitSet() && ((LHSKnownOne & LowBits) != 0))
- KnownOne |= ~LowBits;
+ if (LHSKnown.One.isSignBitSet() && LowBits.intersects(LHSKnown.One))
+ Known.One |= ~LowBits;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
+ assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
break;
}
}
@@ -628,22 +605,21 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero.
if (DemandedMask.isSignBitSet()) {
- computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
- CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
// If it's known zero, our sign bit is also zero.
- if (LHSKnownZero.isSignBitSet())
- KnownZero.setSignBit();
+ if (LHSKnown.Zero.isSignBitSet())
+ Known.Zero.setSignBit();
}
break;
case Instruction::URem: {
- APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
+ KnownBits Known2(BitWidth);
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- if (SimplifyDemandedBits(I, 0, AllOnes, KnownZero2, KnownOne2, Depth + 1) ||
- SimplifyDemandedBits(I, 1, AllOnes, KnownZero2, KnownOne2, Depth + 1))
+ if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
+ SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
return I;
- unsigned Leaders = KnownZero2.countLeadingOnes();
- KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
+ unsigned Leaders = Known2.Zero.countLeadingOnes();
+ Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
break;
}
case Instruction::Call:
@@ -707,56 +683,54 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return ConstantInt::getNullValue(VTy);
// We know that the upper bits are set to zero.
- KnownZero.setBitsFrom(ArgWidth);
+ Known.Zero.setBitsFrom(ArgWidth);
return nullptr;
}
case Intrinsic::x86_sse42_crc32_64_64:
- KnownZero.setBitsFrom(32);
+ Known.Zero.setBitsFrom(32);
return nullptr;
}
}
- computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
break;
}
// If the client is only demanding bits that we know, return the known
// constant.
- if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
- return Constant::getIntegerValue(VTy, KnownOne);
+ if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
+ return Constant::getIntegerValue(VTy, Known.One);
return nullptr;
}
-/// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
+/// Helper routine of SimplifyDemandedUseBits. It computes Known
/// bits. It also tries to handle simplifications that can be done based on
/// DemandedMask, but without modifying the Instruction.
Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
const APInt &DemandedMask,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
unsigned Depth,
Instruction *CxtI) {
unsigned BitWidth = DemandedMask.getBitWidth();
Type *ITy = I->getType();
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
+ KnownBits LHSKnown(BitWidth);
+ KnownBits RHSKnown(BitWidth);
// Despite the fact that we can't simplify this instruction in all User's
- // context, we can at least compute the knownzero/knownone bits, and we can
+ // context, we can at least compute the known bits, and we can
// do simplifications that apply to *just* the one user if we know that
// this instruction has a simpler value in that context.
switch (I->getOpcode()) {
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
- computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
- CxtI);
- computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
CxtI);
// Output known-0 are known to be clear if zero in either the LHS | RHS.
- APInt IKnownZero = RHSKnownZero | LHSKnownZero;
+ APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
// Output known-1 bits are only known if set in both the LHS & RHS.
- APInt IKnownOne = RHSKnownOne & LHSKnownOne;
+ APInt IKnownOne = RHSKnown.One & LHSKnown.One;
// If the client is only demanding bits that we know, return the known
// constant.
@@ -766,13 +740,13 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and' in this
// context.
- if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
+ if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
return I->getOperand(1);
- KnownZero = std::move(IKnownZero);
- KnownOne = std::move(IKnownOne);
+ Known.Zero = std::move(IKnownZero);
+ Known.One = std::move(IKnownOne);
break;
}
case Instruction::Or: {
@@ -780,15 +754,14 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// only bits from X or Y are demanded.
// If either the LHS or the RHS are One, the result is One.
- computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
- CxtI);
- computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
CxtI);
// Output known-0 bits are only known if clear in both the LHS & RHS.
- APInt IKnownZero = RHSKnownZero & LHSKnownZero;
+ APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
// Output known-1 are known to be set if set in either the LHS | RHS.
- APInt IKnownOne = RHSKnownOne | LHSKnownOne;
+ APInt IKnownOne = RHSKnown.One | LHSKnown.One;
// If the client is only demanding bits that we know, return the known
// constant.
@@ -798,30 +771,29 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If all of the demanded bits are known zero on one side, return the
// other. These bits cannot contribute to the result of the 'or' in this
// context.
- if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
return I->getOperand(1);
- KnownZero = std::move(IKnownZero);
- KnownOne = std::move(IKnownOne);
+ Known.Zero = std::move(IKnownZero);
+ Known.One = std::move(IKnownOne);
break;
}
case Instruction::Xor: {
// We can simplify (X^Y) -> X or Y in the user's context if we know that
// only bits from X or Y are demanded.
- computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
- CxtI);
- computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
CxtI);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
- APInt IKnownZero = (RHSKnownZero & LHSKnownZero) |
- (RHSKnownOne & LHSKnownOne);
+ APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
+ (RHSKnown.One & LHSKnown.One);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
- APInt IKnownOne = (RHSKnownZero & LHSKnownOne) |
- (RHSKnownOne & LHSKnownZero);
+ APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) |
+ (RHSKnown.One & LHSKnown.Zero);
// If the client is only demanding bits that we know, return the known
// constant.
@@ -830,25 +802,25 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
// If all of the demanded bits are known zero on one side, return the
// other.
- if (DemandedMask.isSubsetOf(RHSKnownZero))
+ if (DemandedMask.isSubsetOf(RHSKnown.Zero))
return I->getOperand(0);
- if (DemandedMask.isSubsetOf(LHSKnownZero))
+ if (DemandedMask.isSubsetOf(LHSKnown.Zero))
return I->getOperand(1);
// Output known-0 bits are known if clear or set in both the LHS & RHS.
- KnownZero = std::move(IKnownZero);
+ Known.Zero = std::move(IKnownZero);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
- KnownOne = std::move(IKnownOne);
+ Known.One = std::move(IKnownOne);
break;
}
default:
- // Compute the KnownZero/KnownOne bits to simplify things downstream.
- computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI);
+ // Compute the Known bits to simplify things downstream.
+ computeKnownBits(I, Known, Depth, CxtI);
// If this user is only demanding bits that we know, return the known
// constant.
- if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
- return Constant::getIntegerValue(ITy, KnownOne);
+ if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
+ return Constant::getIntegerValue(ITy, Known.One);
break;
}
@@ -874,29 +846,26 @@ Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
///
/// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
/// not successful.
-Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
- Instruction *Shl,
- const APInt &DemandedMask,
- APInt &KnownZero,
- APInt &KnownOne) {
-
- const APInt &ShlOp1 = cast<ConstantInt>(Shl->getOperand(1))->getValue();
- const APInt &ShrOp1 = cast<ConstantInt>(Shr->getOperand(1))->getValue();
+Value *
+InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
+ Instruction *Shl, const APInt &ShlOp1,
+ const APInt &DemandedMask,
+ KnownBits &Known) {
if (!ShlOp1 || !ShrOp1)
- return nullptr; // Noop.
+ return nullptr; // No-op.
Value *VarX = Shr->getOperand(0);
Type *Ty = VarX->getType();
- unsigned BitWidth = Ty->getIntegerBitWidth();
+ unsigned BitWidth = Ty->getScalarSizeInBits();
if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
return nullptr; // Undef.
unsigned ShlAmt = ShlOp1.getZExtValue();
unsigned ShrAmt = ShrOp1.getZExtValue();
- KnownOne.clearAllBits();
- KnownZero.setLowBits(ShlAmt - 1);
- KnownZero &= DemandedMask;
+ Known.One.clearAllBits();
+ Known.Zero.setLowBits(ShlAmt - 1);
+ Known.Zero &= DemandedMask;
APInt BitMask1(APInt::getAllOnesValue(BitWidth));
APInt BitMask2(APInt::getAllOnesValue(BitWidth));
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 81f2d9fa179f9..4729c79ca4c3d 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -60,6 +60,7 @@
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -641,14 +642,6 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
- // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
- if ((L == A && R == B) ||
- (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
- return Op0;
- // Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
- return V;
- // Otherwise, create a new instruction.
C = Builder->CreateBinOp(InnerOpcode, L, R);
C->takeName(&I);
return C;
@@ -666,14 +659,6 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
- // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
- if ((L == B && R == C) ||
- (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
- return Op1;
- // Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
- return V;
- // Otherwise, create a new instruction.
A = Builder->CreateBinOp(InnerOpcode, L, R);
A->takeName(&I);
return A;
@@ -2196,11 +2181,10 @@ Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
// There might be assume intrinsics dominating this return that completely
// determine the value. If so, constant fold it.
- unsigned BitWidth = VTy->getPrimitiveSizeInBits();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(ResultOp, KnownZero, KnownOne, 0, &RI);
- if ((KnownZero|KnownOne).isAllOnesValue())
- RI.setOperand(0, Constant::getIntegerValue(VTy, KnownOne));
+ KnownBits Known(VTy->getPrimitiveSizeInBits());
+ computeKnownBits(ResultOp, Known, 0, &RI);
+ if ((Known.Zero|Known.One).isAllOnesValue())
+ RI.setOperand(0, Constant::getIntegerValue(VTy, Known.One));
return nullptr;
}
@@ -2279,10 +2263,10 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
}
unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
- unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
- unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
+ KnownBits Known(BitWidth);
+ computeKnownBits(Cond, Known, 0, &SI);
+ unsigned LeadingKnownZeros = Known.Zero.countLeadingOnes();
+ unsigned LeadingKnownOnes = Known.One.countLeadingOnes();
// Compute the number of leading bits we can ignore.
// TODO: A better way to determine this would use ComputeNumSignBits().
@@ -2879,11 +2863,10 @@ bool InstCombiner::run() {
Type *Ty = I->getType();
if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
unsigned BitWidth = Ty->getScalarSizeInBits();
- APInt KnownZero(BitWidth, 0);
- APInt KnownOne(BitWidth, 0);
- computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I);
- if ((KnownZero | KnownOne).isAllOnesValue()) {
- Constant *C = ConstantInt::get(Ty, KnownOne);
+ KnownBits Known(BitWidth);
+ computeKnownBits(I, Known, /*Depth*/0, I);
+ if ((Known.Zero | Known.One).isAllOnesValue()) {
+ Constant *C = ConstantInt::get(Ty, Known.One);
DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
" from: " << *I << '\n');