diff options
Diffstat (limited to 'lib/Transforms/InstCombine/InstCombineAddSub.cpp')
| -rw-r--r-- | lib/Transforms/InstCombine/InstCombineAddSub.cpp | 199 | 
1 files changed, 63 insertions, 136 deletions
| diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 153a186d5ed4..0ca62b7ae40c 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -847,92 +847,6 @@ Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {    return createFMul(OpndVal, Coeff.getValue(Instr->getType()));  } -/// \brief Return true if we can prove that adding the two values of the -/// knownbits will not overflow. -/// Otherwise return false. -static bool checkRippleForAdd(const KnownBits &LHSKnown, -                              const KnownBits &RHSKnown) { -  // Addition of two 2's complement numbers having opposite signs will never -  // overflow. -  if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || -      (LHSKnown.isNonNegative() && RHSKnown.isNegative())) -    return true; - -  // If either of the values is known to be non-negative, adding them can only -  // overflow if the second is also non-negative, so we can assume that. -  // Two non-negative numbers will only overflow if there is a carry to the  -  // sign bit, so we can check if even when the values are as big as possible -  // there is no overflow to the sign bit. -  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { -    APInt MaxLHS = ~LHSKnown.Zero; -    MaxLHS.clearSignBit(); -    APInt MaxRHS = ~RHSKnown.Zero; -    MaxRHS.clearSignBit(); -    APInt Result = std::move(MaxLHS) + std::move(MaxRHS); -    return Result.isSignBitClear(); -  } - -  // If either of the values is known to be negative, adding them can only -  // overflow if the second is also negative, so we can assume that. -  // Two negative number will only overflow if there is no carry to the sign -  // bit, so we can check if even when the values are as small as possible -  // there is overflow to the sign bit. -  if (LHSKnown.isNegative() || RHSKnown.isNegative()) { -    APInt MinLHS = LHSKnown.One; -    MinLHS.clearSignBit(); -    APInt MinRHS = RHSKnown.One; -    MinRHS.clearSignBit(); -    APInt Result = std::move(MinLHS) + std::move(MinRHS); -    return Result.isSignBitSet(); -  } - -  // If we reached here it means that we know nothing about the sign bits. -  // In this case we can't know if there will be an overflow, since by  -  // changing the sign bits any two values can be made to overflow. -  return false; -} - -/// Return true if we can prove that: -///    (sext (add LHS, RHS))  === (add (sext LHS), (sext RHS)) -/// This basically requires proving that the add in the original type would not -/// overflow to change the sign bit or have a carry out. -bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS, -                                            Instruction &CxtI) { -  // There are different heuristics we can use for this.  Here are some simple -  // ones. - -  // If LHS and RHS each have at least two sign bits, the addition will look -  // like -  // -  // XX..... + -  // YY..... -  // -  // If the carry into the most significant position is 0, X and Y can't both -  // be 1 and therefore the carry out of the addition is also 0. -  // -  // If the carry into the most significant position is 1, X and Y can't both -  // be 0 and therefore the carry out of the addition is also 1. -  // -  // Since the carry into the most significant position is always equal to -  // the carry out of the addition, there is no signed overflow. -  if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 && -      ComputeNumSignBits(RHS, 0, &CxtI) > 1) -    return true; - -  unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); -  KnownBits LHSKnown(BitWidth); -  computeKnownBits(LHS, LHSKnown, 0, &CxtI); - -  KnownBits RHSKnown(BitWidth); -  computeKnownBits(RHS, RHSKnown, 0, &CxtI); - -  // Check if carry bit of addition will not cause overflow. -  if (checkRippleForAdd(LHSKnown, RHSKnown)) -    return true; - -  return false; -} -  /// \brief Return true if we can prove that:  ///    (sub LHS, RHS)  === (sub nsw LHS, RHS)  /// This basically requires proving that the add in the original type would not @@ -968,13 +882,9 @@ bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,  bool InstCombiner::WillNotOverflowUnsignedSub(Value *LHS, Value *RHS,                                                Instruction &CxtI) {    // If the LHS is negative and the RHS is non-negative, no unsigned wrap. -  bool LHSKnownNonNegative, LHSKnownNegative; -  bool RHSKnownNonNegative, RHSKnownNegative; -  ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0, -                 &CxtI); -  ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0, -                 &CxtI); -  if (LHSKnownNegative && RHSKnownNonNegative) +  KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); +  KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); +  if (LHSKnown.isNegative() && RHSKnown.isNonNegative())      return true;    return false; @@ -1041,6 +951,57 @@ static Value *checkForNegativeOperand(BinaryOperator &I,    return nullptr;  } +static Instruction *foldAddWithConstant(BinaryOperator &Add, +                                        InstCombiner::BuilderTy &Builder) { +  Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); +  const APInt *C; +  if (!match(Op1, m_APInt(C))) +    return nullptr; + +  if (C->isSignMask()) { +    // If wrapping is not allowed, then the addition must set the sign bit: +    // X + (signmask) --> X | signmask +    if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap()) +      return BinaryOperator::CreateOr(Op0, Op1); + +    // If wrapping is allowed, then the addition flips the sign bit of LHS: +    // X + (signmask) --> X ^ signmask +    return BinaryOperator::CreateXor(Op0, Op1); +  } + +  Value *X; +  const APInt *C2; +  Type *Ty = Add.getType(); + +  // Is this add the last step in a convoluted sext? +  // add(zext(xor i16 X, -32768), -32768) --> sext X +  if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) && +      C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C) +    return CastInst::Create(Instruction::SExt, X, Ty); + +  // (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C)) +  // FIXME: This should check hasOneUse to not increase the instruction count? +  if (C->isNegative() && +      match(Op0, m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2)))) && +      C->sge(-C2->sext(C->getBitWidth()))) { +    Constant *NewC = +        ConstantInt::get(X->getType(), *C2 + C->trunc(C2->getBitWidth())); +    return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty); +  } + +  // Shifts and add used to flip and mask off the low bit: +  // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1 +  const APInt *C3; +  if (*C == 1 && match(Op0, m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C2)), +                                            m_APInt(C3)))) && +      C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) { +    Value *NotX = Builder.CreateNot(X); +    return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1)); +  } + +  return nullptr; +} +  Instruction *InstCombiner::visitAdd(BinaryOperator &I) {    bool Changed = SimplifyAssociativeOrCommutative(I);    Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); @@ -1056,41 +1017,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {    if (Value *V = SimplifyUsingDistributiveLaws(I))      return replaceInstUsesWith(I, V); -  const APInt *RHSC; -  if (match(RHS, m_APInt(RHSC))) { -    if (RHSC->isSignMask()) { -      // If wrapping is not allowed, then the addition must set the sign bit: -      // X + (signmask) --> X | signmask -      if (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) -        return BinaryOperator::CreateOr(LHS, RHS); - -      // If wrapping is allowed, then the addition flips the sign bit of LHS: -      // X + (signmask) --> X ^ signmask -      return BinaryOperator::CreateXor(LHS, RHS); -    } - -    // Is this add the last step in a convoluted sext? -    Value *X; -    const APInt *C; -    if (match(LHS, m_ZExt(m_Xor(m_Value(X), m_APInt(C)))) && -        C->isMinSignedValue() && -        C->sext(LHS->getType()->getScalarSizeInBits()) == *RHSC) { -      // add(zext(xor i16 X, -32768), -32768) --> sext X -      return CastInst::Create(Instruction::SExt, X, LHS->getType()); -    } - -    if (RHSC->isNegative() && -        match(LHS, m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C)))) && -        RHSC->sge(-C->sext(RHSC->getBitWidth()))) { -      // (add (zext (add nuw X, C)), Val) -> (zext (add nuw X, C+Val)) -      Constant *NewC = -          ConstantInt::get(X->getType(), *C + RHSC->trunc(C->getBitWidth())); -      return new ZExtInst(Builder->CreateNUWAdd(X, NewC), I.getType()); -    } -  } +  if (Instruction *X = foldAddWithConstant(I, *Builder)) +    return X; -  // FIXME: Use the match above instead of dyn_cast to allow these transforms -  // for splat vectors. +  // FIXME: This should be moved into the above helper function to allow these +  // transforms for splat vectors.    if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {      // zext(bool) + C -> bool ? C + 1 : C      if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS)) @@ -1285,8 +1216,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {          Constant *CI =              ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());          if (ConstantExpr::getZExt(CI, I.getType()) == RHSC && -            computeOverflowForUnsignedAdd(LHSConv->getOperand(0), CI, &I) == -                OverflowResult::NeverOverflows) { +            willNotOverflowUnsignedAdd(LHSConv->getOperand(0), CI, I)) {            // Insert the new, smaller add.            Value *NewAdd =                Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv"); @@ -1303,9 +1233,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {        if (LHSConv->getOperand(0)->getType() ==                RHSConv->getOperand(0)->getType() &&            (LHSConv->hasOneUse() || RHSConv->hasOneUse()) && -          computeOverflowForUnsignedAdd(LHSConv->getOperand(0), -                                        RHSConv->getOperand(0), -                                        &I) == OverflowResult::NeverOverflows) { +          willNotOverflowUnsignedAdd(LHSConv->getOperand(0), +                                     RHSConv->getOperand(0), I)) {          // Insert the new integer add.          Value *NewAdd = Builder->CreateNUWAdd(              LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv"); @@ -1347,15 +1276,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {    }    // TODO(jingyue): Consider WillNotOverflowSignedAdd and -  // WillNotOverflowUnsignedAdd to reduce the number of invocations of +  // willNotOverflowUnsignedAdd to reduce the number of invocations of    // computeKnownBits.    if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, I)) {      Changed = true;      I.setHasNoSignedWrap(true);    } -  if (!I.hasNoUnsignedWrap() && -      computeOverflowForUnsignedAdd(LHS, RHS, &I) == -          OverflowResult::NeverOverflows) { +  if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) {      Changed = true;      I.setHasNoUnsignedWrap(true);    } | 
