diff options
Diffstat (limited to 'contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp')
| -rw-r--r-- | contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 346 | 
1 files changed, 210 insertions, 136 deletions
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index fd34a244f271..7c46cfd28fc9 100644 --- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -42,8 +42,9 @@  #include "llvm/Analysis/AssumptionCache.h"  #include "llvm/Analysis/CFG.h"  #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/EHPersonalities.h" +#include "llvm/Analysis/GlobalsModRef.h"  #include "llvm/Analysis/InstructionSimplify.h" -#include "llvm/Analysis/LibCallSemantics.h"  #include "llvm/Analysis/LoopInfo.h"  #include "llvm/Analysis/MemoryBuiltins.h"  #include "llvm/Analysis/TargetLibraryInfo.h" @@ -79,14 +80,12 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {    return llvm::EmitGEPOffset(Builder, DL, GEP);  } -/// ShouldChangeType - Return true if it is desirable to convert a computation -/// from 'From' to 'To'.  We don't want to convert from a legal to an illegal -/// type for example, or from a smaller to a larger illegal type. -bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { -  assert(From->isIntegerTy() && To->isIntegerTy()); - -  unsigned FromWidth = From->getPrimitiveSizeInBits(); -  unsigned ToWidth = To->getPrimitiveSizeInBits(); +/// Return true if it is desirable to convert an integer computation from a +/// given bit width to a new bit width. +/// We don't want to convert from a legal to an illegal type for example or from +/// a smaller to a larger illegal type. +bool InstCombiner::ShouldChangeType(unsigned FromWidth, +                                    unsigned ToWidth) const {    bool FromLegal = DL.isLegalInteger(FromWidth);    bool ToLegal = DL.isLegalInteger(ToWidth); @@ -103,6 +102,17 @@ bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {    return true;  } +/// Return true if it is desirable to convert a computation from 'From' to 'To'. +/// We don't want to convert from a legal to an illegal type for example or from +/// a smaller to a larger illegal type. +bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { +  assert(From->isIntegerTy() && To->isIntegerTy()); + +  unsigned FromWidth = From->getPrimitiveSizeInBits(); +  unsigned ToWidth = To->getPrimitiveSizeInBits(); +  return ShouldChangeType(FromWidth, ToWidth); +} +  // Return true, if No Signed Wrap should be maintained for I.  // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",  // where both B and C should be ConstantInts, results in a constant that does @@ -156,27 +166,26 @@ static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {    I.setFastMathFlags(FMF);  } -/// SimplifyAssociativeOrCommutative - This performs a few simplifications for -/// operators which are associative or commutative: -// -//  Commutative operators: -// -//  1. Order operands such that they are listed from right (least complex) to -//     left (most complex).  This puts constants before unary operators before -//     binary operators. -// -//  Associative operators: -// -//  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. -//  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. -// -//  Associative and commutative operators: -// -//  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. -//  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. -//  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" -//     if C1 and C2 are constants. -// +/// This performs a few simplifications for operators that are associative or +/// commutative: +/// +///  Commutative operators: +/// +///  1. Order operands such that they are listed from right (least complex) to +///     left (most complex).  This puts constants before unary operators before +///     binary operators. +/// +///  Associative operators: +/// +///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. +///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. +/// +///  Associative and commutative operators: +/// +///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. +///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. +///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" +///     if C1 and C2 are constants.  bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {    Instruction::BinaryOps Opcode = I.getOpcode();    bool Changed = false; @@ -322,7 +331,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {    } while (1);  } -/// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to +/// Return whether "X LOp (Y ROp Z)" is always equal to  /// "(X LOp Y) ROp (X LOp Z)".  static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,                                       Instruction::BinaryOps ROp) { @@ -361,7 +370,7 @@ static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,    }  } -/// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to +/// Return whether "(X LOp Y) ROp Z" is always equal to  /// "(X ROp Z) LOp (Y ROp Z)".  static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,                                       Instruction::BinaryOps ROp) { @@ -519,7 +528,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder,            if (isa<OverflowingBinaryOperator>(Op1))              HasNSW &= Op1->hasNoSignedWrap(); -        // We can propogate 'nsw' if we know that +        // We can propagate 'nsw' if we know that          //  %Y = mul nsw i16 %X, C          //  %Z = add nsw i16 %Y, %X          // => @@ -537,11 +546,11 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder,    return SimplifiedInst;  } -/// SimplifyUsingDistributiveLaws - This tries to simplify binary operations -/// which some other binary operation distributes over either by factorizing -/// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this -/// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is -/// a win).  Returns the simplified value, or null if it didn't simplify. +/// This tries to simplify binary operations which some other binary operation +/// distributes over either by factorizing out common terms +/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in +/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win). +/// Returns the simplified value, or null if it didn't simplify.  Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {    Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);    BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); @@ -623,12 +632,38 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {        }    } +  // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0)) +  // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d))) +  if (auto *SI0 = dyn_cast<SelectInst>(LHS)) { +    if (auto *SI1 = dyn_cast<SelectInst>(RHS)) { +      if (SI0->getCondition() == SI1->getCondition()) { +        Value *SI = nullptr; +        if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(), +                                     SI1->getFalseValue(), DL, TLI, DT, AC)) +          SI = Builder->CreateSelect(SI0->getCondition(), +                                     Builder->CreateBinOp(TopLevelOpcode, +                                                          SI0->getTrueValue(), +                                                          SI1->getTrueValue()), +                                     V); +        if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(), +                                     SI1->getTrueValue(), DL, TLI, DT, AC)) +          SI = Builder->CreateSelect( +              SI0->getCondition(), V, +              Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(), +                                   SI1->getFalseValue())); +        if (SI) { +          SI->takeName(&I); +          return SI; +        } +      } +    } +  } +    return nullptr;  } -// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction -// if the LHS is a constant zero (which is the 'negate' form). -// +/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a +/// constant zero (which is the 'negate' form).  Value *InstCombiner::dyn_castNegVal(Value *V) const {    if (BinaryOperator::isNeg(V))      return BinaryOperator::getNegArgument(V); @@ -644,10 +679,8 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {    return nullptr;  } -// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the -// instruction if the LHS is a constant negative zero (which is the 'negate' -// form). -// +/// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is +/// a constant negative zero (which is the 'negate' form).  Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {    if (BinaryOperator::isFNeg(V, IgnoreZeroSign))      return BinaryOperator::getFNegArgument(V); @@ -700,10 +733,10 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,    llvm_unreachable("Unknown binary instruction type!");  } -// FoldOpIntoSelect - Given an instruction with a select as one operand and a -// constant as the other operand, try to fold the binary operator into the -// select arguments.  This also works for Cast instructions, which obviously do -// not have a second operand. +/// Given an instruction with a select as one operand and a constant as the +/// other operand, try to fold the binary operator into the select arguments. +/// This also works for Cast instructions, which obviously do not have a second +/// operand.  Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {    // Don't modify shared select instructions    if (!SI->hasOneUse()) return nullptr; @@ -752,10 +785,9 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {    return nullptr;  } -/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which -/// has a PHI node as operand #0, see if we can fold the instruction into the -/// PHI (which is only possible if all operands to the PHI are constants). -/// +/// Given a binary operator, cast instruction, or select which has a PHI node as +/// operand #0, see if we can fold the instruction into the PHI (which is only +/// possible if all operands to the PHI are constants).  Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {    PHINode *PN = cast<PHINode>(I.getOperand(0));    unsigned NumPHIValues = PN->getNumIncomingValues(); @@ -819,7 +851,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {    NewPN->takeName(PN);    // If we are going to have to insert a new computation, do so right before the -  // predecessors terminator. +  // predecessor's terminator.    if (NonConstBB)      Builder->SetInsertPoint(NonConstBB->getTerminator()); @@ -893,10 +925,10 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {    return ReplaceInstUsesWith(I, NewPN);  } -/// FindElementAtOffset - Given a pointer type and a constant offset, determine -/// whether or not there is a sequence of GEP indices into the pointed type that -/// will land us at the specified offset.  If so, fill them into NewIndices and -/// return the resultant element type, otherwise return null. +/// Given a pointer type and a constant offset, determine whether or not there +/// is a sequence of GEP indices into the pointed type that will land us at the +/// specified offset. If so, fill them into NewIndices and return the resultant +/// element type, otherwise return null.  Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,                                          SmallVectorImpl<Value *> &NewIndices) {    Type *Ty = PtrTy->getElementType(); @@ -965,8 +997,8 @@ static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {    return true;  } -/// Descale - Return a value X such that Val = X * Scale, or null if none.  If -/// the multiplication is known not to overflow then NoSignedWrap is set. +/// Return a value X such that Val = X * Scale, or null if none. +/// If the multiplication is known not to overflow, then NoSignedWrap is set.  Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {    assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");    assert(cast<IntegerType>(Val->getType())->getBitWidth() == @@ -1008,11 +1040,11 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {    // 0'th operand of Val.    std::pair<Instruction*, unsigned> Parent; -  // RequireNoSignedWrap - Set if the transform requires a descaling at deeper -  // levels that doesn't overflow. +  // Set if the transform requires a descaling at deeper levels that doesn't +  // overflow.    bool RequireNoSignedWrap = false; -  // logScale - log base 2 of the scale.  Negative if not a power of 2. +  // Log base 2 of the scale. Negative if not a power of 2.    int32_t logScale = Scale.exactLogBase2();    for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down @@ -1213,16 +1245,11 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {  /// specified one but with other operands.  static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,                                   InstCombiner::BuilderTy *B) { -  Value *BORes = B->CreateBinOp(Inst.getOpcode(), LHS, RHS); -  if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BORes)) { -    if (isa<OverflowingBinaryOperator>(NewBO)) { -      NewBO->setHasNoSignedWrap(Inst.hasNoSignedWrap()); -      NewBO->setHasNoUnsignedWrap(Inst.hasNoUnsignedWrap()); -    } -    if (isa<PossiblyExactOperator>(NewBO)) -      NewBO->setIsExact(Inst.isExact()); -  } -  return BORes; +  Value *BO = B->CreateBinOp(Inst.getOpcode(), LHS, RHS); +  // If LHS and RHS are constant, BO won't be a binary operator. +  if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO)) +    NewBO->copyIRFlags(&Inst); +  return BO;  }  /// \brief Makes transformation of binary operation specific for vector types. @@ -1256,9 +1283,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {          LShuf->getMask() == RShuf->getMask()) {        Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),            RShuf->getOperand(0), Builder); -      Value *Res = Builder->CreateShuffleVector(NewBO, +      return Builder->CreateShuffleVector(NewBO,            UndefValue::get(NewBO->getType()), LShuf->getMask()); -      return Res;      }    } @@ -1294,18 +1320,11 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {      }      if (MayChange) {        Constant *C2 = ConstantVector::get(C2M); -      Value *NewLHS, *NewRHS; -      if (isa<Constant>(LHS)) { -        NewLHS = C2; -        NewRHS = Shuffle->getOperand(0); -      } else { -        NewLHS = Shuffle->getOperand(0); -        NewRHS = C2; -      } +      Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0); +      Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;        Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder); -      Value *Res = Builder->CreateShuffleVector(NewBO, +      return Builder->CreateShuffleVector(NewBO,            UndefValue::get(Inst.getType()), Shuffle->getMask()); -      return Res;      }    } @@ -1323,7 +1342,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {    // Eliminate unneeded casts for indices, and replace indices which displace    // by multiples of a zero size type with zero.    bool MadeChange = false; -  Type *IntPtrTy = DL.getIntPtrType(GEP.getPointerOperandType()); +  Type *IntPtrTy = +    DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());    gep_type_iterator GTI = gep_type_begin(GEP);    for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; @@ -1333,21 +1353,25 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {      if (!SeqTy)        continue; +    // Index type should have the same width as IntPtr +    Type *IndexTy = (*I)->getType(); +    Type *NewIndexType = IndexTy->isVectorTy() ? +      VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy; +       // If the element type has zero size then any index over it is equivalent      // to an index of zero, so replace it with zero if it is not zero already.      if (SeqTy->getElementType()->isSized() &&          DL.getTypeAllocSize(SeqTy->getElementType()) == 0)        if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { -        *I = Constant::getNullValue(IntPtrTy); +        *I = Constant::getNullValue(NewIndexType);          MadeChange = true;        } -    Type *IndexTy = (*I)->getType(); -    if (IndexTy != IntPtrTy) { +    if (IndexTy != NewIndexType) {        // If we are using a wider index than needed for this platform, shrink        // it to what we need.  If narrower, sign-extend it to what we need.        // This explicit cast can make subsequent optimizations more obvious. -      *I = Builder->CreateIntCast(*I, IntPtrTy, true); +      *I = Builder->CreateIntCast(*I, NewIndexType, true);        MadeChange = true;      }    } @@ -1421,8 +1445,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {        }      } -    GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone()); +    // If not all GEPs are identical we'll have to create a new PHI node. +    // Check that the old PHI node has only one use so that it will get +    // removed. +    if (DI != -1 && !PN->hasOneUse()) +      return nullptr; +    GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());      if (DI == -1) {        // All the GEPs feeding the PHI are identical. Clone one down into our        // BB so that it can be merged with the current GEP. @@ -1432,11 +1461,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {        // All the GEPs feeding the PHI differ at a single offset. Clone a GEP        // into the current block so it can be merged, and create a new PHI to        // set that index. -      Instruction *InsertPt = Builder->GetInsertPoint(); -      Builder->SetInsertPoint(PN); -      PHINode *NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), -                                          PN->getNumOperands()); -      Builder->SetInsertPoint(InsertPt); +      PHINode *NewPN; +      { +        IRBuilderBase::InsertPointGuard Guard(*Builder); +        Builder->SetInsertPoint(PN); +        NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), +                                   PN->getNumOperands()); +      }        for (auto &I : PN->operands())          NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI), @@ -1790,7 +1821,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {            if (Instruction *I = visitBitCast(*BCI)) {              if (I != BCI) {                I->takeName(BCI); -              BCI->getParent()->getInstList().insert(BCI, I); +              BCI->getParent()->getInstList().insert(BCI->getIterator(), I);                ReplaceInstUsesWith(*BCI, I);              }              return &GEP; @@ -1931,7 +1962,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {      if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {        // Replace invoke with a NOP intrinsic to maintain the original CFG -      Module *M = II->getParent()->getParent()->getParent(); +      Module *M = II->getModule();        Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);        InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),                           None, "", II->getParent()); @@ -2280,9 +2311,10 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {    }    if (LoadInst *L = dyn_cast<LoadInst>(Agg))      // If the (non-volatile) load only has one use, we can rewrite this to a -    // load from a GEP. This reduces the size of the load. -    // FIXME: If a load is used only by extractvalue instructions then this -    //        could be done regardless of having multiple uses. +    // load from a GEP. This reduces the size of the load. If a load is used +    // only by extractvalue instructions then this either must have been +    // optimized before, or it is a struct with padding, in which case we +    // don't want to do the transformation as it loses padding knowledge.      if (L->isSimple() && L->hasOneUse()) {        // extractvalue has integer indices, getelementptr has Value*s. Convert.        SmallVector<Value*, 4> Indices; @@ -2294,7 +2326,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {        // We need to insert these at the location of the old load, not at that of        // the extractvalue. -      Builder->SetInsertPoint(L->getParent(), L); +      Builder->SetInsertPoint(L);        Value *GEP = Builder->CreateInBoundsGEP(L->getType(),                                                L->getPointerOperand(), Indices);        // Returning the load directly will cause the main loop to insert it in @@ -2312,7 +2344,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {    return nullptr;  } -/// isCatchAll - Return 'true' if the given typeinfo will match anything. +/// Return 'true' if the given typeinfo will match anything.  static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {    switch (Personality) {    case EHPersonality::GNU_C: @@ -2330,6 +2362,7 @@ static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {    case EHPersonality::MSVC_X86SEH:    case EHPersonality::MSVC_Win64SEH:    case EHPersonality::MSVC_CXX: +  case EHPersonality::CoreCLR:      return TypeInfo->isNullValue();    }    llvm_unreachable("invalid enum"); @@ -2441,10 +2474,24 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {              SawCatchAll = true;              break;            } -          if (AlreadyCaught.count(TypeInfo)) -            // Already caught by an earlier clause, so having it in the filter -            // is pointless. -            continue; + +          // Even if we've seen a type in a catch clause, we don't want to +          // remove it from the filter.  An unexpected type handler may be +          // set up for a call site which throws an exception of the same +          // type caught.  In order for the exception thrown by the unexpected +          // handler to propogate correctly, the filter must be correctly +          // described for the call site. +          // +          // Example: +          // +          // void unexpected() { throw 1;} +          // void foo() throw (int) { +          //   std::set_unexpected(unexpected); +          //   try { +          //     throw 2.0; +          //   } catch (int i) {} +          // } +            // There is no point in having multiple copies of the same typeinfo in            // a filter, so only add it if we didn't already.            if (SeenInFilter.insert(TypeInfo).second) @@ -2637,15 +2684,15 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {    return nullptr;  } -/// TryToSinkInstruction - Try to move the specified instruction from its -/// current block into the beginning of DestBlock, which can only happen if it's -/// safe to move the instruction past all of the instructions between it and the -/// end of its block. +/// Try to move the specified instruction from its current block into the +/// beginning of DestBlock, which can only happen if it's safe to move the +/// instruction past all of the instructions between it and the end of its +/// block.  static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {    assert(I->hasOneUse() && "Invariants didn't hold!");    // Cannot move control-flow-involving, volatile loads, vaarg, etc. -  if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() || +  if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||        isa<TerminatorInst>(I))      return false; @@ -2654,17 +2701,24 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {          &DestBlock->getParent()->getEntryBlock())      return false; +  // Do not sink convergent call instructions. +  if (auto *CI = dyn_cast<CallInst>(I)) { +    if (CI->isConvergent()) +      return false; +  } +    // We can only sink load instructions if there is nothing between the load and    // the end of block that could change the value.    if (I->mayReadFromMemory()) { -    for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); +    for (BasicBlock::iterator Scan = I->getIterator(), +                              E = I->getParent()->end();           Scan != E; ++Scan)        if (Scan->mayWriteToMemory())          return false;    }    BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); -  I->moveBefore(InsertPos); +  I->moveBefore(&*InsertPos);    ++NumSunkInst;    return true;  } @@ -2698,6 +2752,27 @@ bool InstCombiner::run() {        }      } +    // In general, it is possible for computeKnownBits to determine all bits in a +    // value even when the operands are not all constants. +    if (!I->use_empty() && I->getType()->isIntegerTy()) { +      unsigned BitWidth = I->getType()->getScalarSizeInBits(); +      APInt KnownZero(BitWidth, 0); +      APInt KnownOne(BitWidth, 0); +      computeKnownBits(I, KnownZero, KnownOne, /*Depth*/0, I); +      if ((KnownZero | KnownOne).isAllOnesValue()) { +        Constant *C = ConstantInt::get(I->getContext(), KnownOne); +        DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C << +                        " from: " << *I << '\n'); + +        // Add operands to the worklist. +        ReplaceInstUsesWith(*I, C); +        ++NumConstProp; +        EraseInstFromFunction(*I); +        MadeIRChange = true; +        continue; +      } +    } +      // See if we can trivially sink this instruction to a successor basic block.      if (I->hasOneUse()) {        BasicBlock *BB = I->getParent(); @@ -2738,7 +2813,7 @@ bool InstCombiner::run() {      }      // Now that we have an instruction, try combining it to simplify it. -    Builder->SetInsertPoint(I->getParent(), I); +    Builder->SetInsertPoint(I);      Builder->SetCurrentDebugLocation(I->getDebugLoc());  #ifndef NDEBUG @@ -2768,7 +2843,7 @@ bool InstCombiner::run() {          // Insert the new instruction into the basic block...          BasicBlock *InstParent = I->getParent(); -        BasicBlock::iterator InsertPos = I; +        BasicBlock::iterator InsertPos = I->getIterator();          // If we replace a PHI with something that isn't a PHI, fix up the          // insertion point. @@ -2801,8 +2876,8 @@ bool InstCombiner::run() {    return MadeIRChange;  } -/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding -/// all reachable code to the worklist. +/// Walk the function in depth-first order, adding all reachable code to the +/// worklist.  ///  /// This has a couple of tricks to make the code faster and more powerful.  In  /// particular, we constant fold and DCE instructions as we go, to avoid adding @@ -2829,7 +2904,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,        continue;      for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { -      Instruction *Inst = BBI++; +      Instruction *Inst = &*BBI++;        // DCE instruction if trivially dead.        if (isInstructionTriviallyDead(Inst, TLI)) { @@ -2900,8 +2975,8 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,        }      } -    for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) -      Worklist.push_back(TI->getSuccessor(i)); +    for (BasicBlock *SuccBB : TI->successors()) +      Worklist.push_back(SuccBB);    } while (!Worklist.empty());    // Once we've found all of the instructions to add to instcombine's worklist, @@ -2909,8 +2984,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,    // of the function down.  This jives well with the way that it adds all uses    // of instructions to the worklist after doing a transformation, thus avoiding    // some N^2 behavior in pathological cases. -  ICWorklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], -                             InstrsForInstCombineWorklist.size()); +  ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist);    return MadeIRChange;  } @@ -2930,13 +3004,13 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,    // track of which blocks we visit.    SmallPtrSet<BasicBlock *, 64> Visited;    MadeIRChange |= -      AddReachableCodeToWorklist(F.begin(), DL, Visited, ICWorklist, TLI); +      AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);    // Do a quick scan over the function.  If we find any blocks that are    // unreachable, remove any instructions inside of them.  This prevents    // the instcombine code from having to deal with some bad special cases.    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { -    if (Visited.count(BB)) +    if (Visited.count(&*BB))        continue;      // Delete the instructions backwards, as it has a reduced likelihood of @@ -2944,11 +3018,10 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,      Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.      while (EndInst != BB->begin()) {        // Delete the next to last instruction. -      BasicBlock::iterator I = EndInst; -      Instruction *Inst = --I; -      if (!Inst->use_empty()) +      Instruction *Inst = &*--EndInst->getIterator(); +      if (!Inst->use_empty() && !Inst->getType()->isTokenTy())          Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); -      if (isa<LandingPadInst>(Inst)) { +      if (Inst->isEHPad()) {          EndInst = Inst;          continue;        } @@ -2956,7 +3029,8 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,          ++NumDeadInst;          MadeIRChange = true;        } -      Inst->eraseFromParent(); +      if (!Inst->getType()->isTokenTy()) +        Inst->eraseFromParent();      }    } @@ -2968,8 +3042,6 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,                                  AliasAnalysis *AA, AssumptionCache &AC,                                  TargetLibraryInfo &TLI, DominatorTree &DT,                                  LoopInfo *LI = nullptr) { -  // Minimizing size? -  bool MinimizeSize = F.hasFnAttribute(Attribute::MinSize);    auto &DL = F.getParent()->getDataLayout();    /// Builder - This is an IRBuilder that automatically inserts new @@ -2992,7 +3064,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,      if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))        Changed = true; -    InstCombiner IC(Worklist, &Builder, MinimizeSize, +    InstCombiner IC(Worklist, &Builder, F.optForMinSize(),                      AA, &AC, &TLI, &DT, DL, LI);      if (IC.run())        Changed = true; @@ -3046,11 +3118,12 @@ public:  void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {    AU.setPreservesCFG(); -  AU.addRequired<AliasAnalysis>(); +  AU.addRequired<AAResultsWrapperPass>();    AU.addRequired<AssumptionCacheTracker>();    AU.addRequired<TargetLibraryInfoWrapperPass>();    AU.addRequired<DominatorTreeWrapperPass>();    AU.addPreserved<DominatorTreeWrapperPass>(); +  AU.addPreserved<GlobalsAAWrapperPass>();  }  bool InstructionCombiningPass::runOnFunction(Function &F) { @@ -3058,7 +3131,7 @@ bool InstructionCombiningPass::runOnFunction(Function &F) {      return false;    // Required analyses. -  auto AA = &getAnalysis<AliasAnalysis>(); +  auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();    auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);    auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();    auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); @@ -3076,7 +3149,8 @@ INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",  INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)  INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)  INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) +INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)  INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",                      "Combine redundant instructions", false, false)  | 
