diff options
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp')
| -rw-r--r-- | contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp | 202 | 
1 files changed, 102 insertions, 100 deletions
| diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp index 792aa8208f27..f4776589910f 100644 --- a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -539,12 +539,10 @@ static Value *getUnwindDestToken(Instruction *EHPad,  static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(      BasicBlock *BB, BasicBlock *UnwindEdge,      UnwindDestMemoTy *FuncletUnwindMap = nullptr) { -  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { -    Instruction *I = &*BBI++; - +  for (Instruction &I : llvm::make_early_inc_range(*BB)) {      // We only need to check for function calls: inlined invoke      // instructions require no special handling. -    CallInst *CI = dyn_cast<CallInst>(I); +    CallInst *CI = dyn_cast<CallInst>(&I);      if (!CI || CI->doesNotThrow())        continue; @@ -830,6 +828,7 @@ static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart,    }  } +namespace {  /// Utility for cloning !noalias and !alias.scope metadata. When a code region  /// using scoped alias metadata is inlined, the aliasing relationships may not  /// hold between the two version. It is necessary to create a deep clone of the @@ -851,6 +850,7 @@ public:    /// metadata.    void remap(Function::iterator FStart, Function::iterator FEnd);  }; +} // namespace  ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(      const Function *F) { @@ -1179,14 +1179,8 @@ static bool MayContainThrowingOrExitingCall(Instruction *Begin,    assert(Begin->getParent() == End->getParent() &&           "Expected to be in same basic block!"); -  unsigned NumInstChecked = 0; -  // Check that all instructions in the range [Begin, End) are guaranteed to -  // transfer execution to successor. -  for (auto &I : make_range(Begin->getIterator(), End->getIterator())) -    if (NumInstChecked++ > InlinerAttributeWindow || -        !isGuaranteedToTransferExecutionToSuccessor(&I)) -      return true; -  return false; +  return !llvm::isGuaranteedToTransferExecutionToSuccessor( +      Begin->getIterator(), End->getIterator(), InlinerAttributeWindow + 1);  }  static AttrBuilder IdentifyValidAttributes(CallBase &CB) { @@ -1259,8 +1253,7 @@ static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {      // existing attribute value (i.e. attributes such as dereferenceable,      // dereferenceable_or_null etc). See AttrBuilder::merge for more details.      AttributeList AL = NewRetVal->getAttributes(); -    AttributeList NewAL = -        AL.addAttributes(Context, AttributeList::ReturnIndex, Valid); +    AttributeList NewAL = AL.addRetAttributes(Context, Valid);      NewRetVal->setAttributes(NewAL);    }  } @@ -1376,13 +1369,13 @@ static void UpdateCallGraphAfterInlining(CallBase &CB,    CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));  } -static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, -                                    BasicBlock *InsertBlock, +static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, +                                    Module *M, BasicBlock *InsertBlock,                                      InlineFunctionInfo &IFI) { -  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();    IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); -  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); +  Value *Size = +      Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));    // Always generate a memcpy of alignment 1 here because we don't know    // the alignment of the src pointer.  Other optimizations can infer @@ -1393,13 +1386,13 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,  /// When inlining a call site that has a byval argument,  /// we have to make the implicit memcpy explicit by adding it. -static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, +static Value *HandleByValArgument(Type *ByValType, Value *Arg, +                                  Instruction *TheCall,                                    const Function *CalledFunc,                                    InlineFunctionInfo &IFI,                                    unsigned ByValAlignment) { -  PointerType *ArgTy = cast<PointerType>(Arg->getType()); -  Type *AggTy = ArgTy->getElementType(); - +  assert(cast<PointerType>(Arg->getType()) +             ->isOpaqueOrPointeeTypeMatches(ByValType));    Function *Caller = TheCall->getFunction();    const DataLayout &DL = Caller->getParent()->getDataLayout(); @@ -1427,7 +1420,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,    }    // Create the alloca.  If we have DataLayout, use nice alignment. -  Align Alignment(DL.getPrefTypeAlignment(AggTy)); +  Align Alignment(DL.getPrefTypeAlignment(ByValType));    // If the byval had an alignment specified, we *must* use at least that    // alignment, as it is required by the byval argument (and uses of the @@ -1435,7 +1428,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,    Alignment = max(Alignment, MaybeAlign(ByValAlignment));    Value *NewAlloca = -      new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment, +      new AllocaInst(ByValType, DL.getAllocaAddrSpace(), nullptr, Alignment,                       Arg->getName(), &*Caller->begin()->begin());    IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); @@ -1607,8 +1600,7 @@ static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,                                const ProfileCount &CalleeEntryCount,                                const CallBase &TheCall, ProfileSummaryInfo *PSI,                                BlockFrequencyInfo *CallerBFI) { -  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() || -      CalleeEntryCount.getCount() < 1) +  if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)      return;    auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;    int64_t CallCount = @@ -1617,40 +1609,39 @@ static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,  }  void llvm::updateProfileCallee( -    Function *Callee, int64_t entryDelta, +    Function *Callee, int64_t EntryDelta,      const ValueMap<const Value *, WeakTrackingVH> *VMap) {    auto CalleeCount = Callee->getEntryCount();    if (!CalleeCount.hasValue())      return; -  uint64_t priorEntryCount = CalleeCount.getCount(); -  uint64_t newEntryCount; +  const uint64_t PriorEntryCount = CalleeCount->getCount();    // Since CallSiteCount is an estimate, it could exceed the original callee    // count and has to be set to 0 so guard against underflow. -  if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount) -    newEntryCount = 0; -  else -    newEntryCount = priorEntryCount + entryDelta; +  const uint64_t NewEntryCount = +      (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount) +          ? 0 +          : PriorEntryCount + EntryDelta;    // During inlining ?    if (VMap) { -    uint64_t cloneEntryCount = priorEntryCount - newEntryCount; +    uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;      for (auto Entry : *VMap)        if (isa<CallInst>(Entry.first))          if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) -          CI->updateProfWeight(cloneEntryCount, priorEntryCount); +          CI->updateProfWeight(CloneEntryCount, PriorEntryCount);    } -  if (entryDelta) { -    Callee->setEntryCount(newEntryCount); +  if (EntryDelta) { +    Callee->setEntryCount(NewEntryCount);      for (BasicBlock &BB : *Callee)        // No need to update the callsite if it is pruned during inlining.        if (!VMap || VMap->count(&BB))          for (Instruction &I : BB)            if (CallInst *CI = dyn_cast<CallInst>(&I)) -            CI->updateProfWeight(newEntryCount, priorEntryCount); +            CI->updateProfWeight(NewEntryCount, PriorEntryCount);    }  } @@ -1672,66 +1663,69 @@ void llvm::updateProfileCallee(  /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is  ///    a retainRV call.  static void -inlineRetainOrClaimRVCalls(CallBase &CB, +inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,                             const SmallVectorImpl<ReturnInst *> &Returns) {    Module *Mod = CB.getModule(); -  bool IsRetainRV = objcarc::hasAttachedCallOpBundle(&CB, true), +  assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function"); +  bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,         IsClaimRV = !IsRetainRV;    for (auto *RI : Returns) {      Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0)); -    BasicBlock::reverse_iterator I = ++(RI->getIterator().getReverse()); -    BasicBlock::reverse_iterator EI = RI->getParent()->rend();      bool InsertRetainCall = IsRetainRV;      IRBuilder<> Builder(RI->getContext());      // Walk backwards through the basic block looking for either a matching      // autoreleaseRV call or an unannotated call. -    for (; I != EI;) { -      auto CurI = I++; - +    auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()), +                                      RI->getParent()->rend()); +    for (Instruction &I : llvm::make_early_inc_range(InstRange)) {        // Ignore casts. -      if (isa<CastInst>(*CurI)) +      if (isa<CastInst>(I))          continue; -      if (auto *II = dyn_cast<IntrinsicInst>(&*CurI)) { -        if (II->getIntrinsicID() == Intrinsic::objc_autoreleaseReturnValue && -            II->hasNUses(0) && -            objcarc::GetRCIdentityRoot(II->getOperand(0)) == RetOpnd) { -          // If we've found a matching authoreleaseRV call: -          // - If claimRV is attached to the call, insert a call to objc_release -          //   and erase the autoreleaseRV call. -          // - If retainRV is attached to the call, just erase the autoreleaseRV -          //   call. -          if (IsClaimRV) { -            Builder.SetInsertPoint(II); -            Function *IFn = -                Intrinsic::getDeclaration(Mod, Intrinsic::objc_release); -            Value *BC = -                Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); -            Builder.CreateCall(IFn, BC, ""); -          } -          II->eraseFromParent(); -          InsertRetainCall = false; -        } -      } else if (auto *CI = dyn_cast<CallInst>(&*CurI)) { -        if (objcarc::GetRCIdentityRoot(CI) == RetOpnd && -            !objcarc::hasAttachedCallOpBundle(CI)) { -          // If we've found an unannotated call that defines RetOpnd, add a -          // "clang.arc.attachedcall" operand bundle. -          Value *BundleArgs[] = {ConstantInt::get( -              Builder.getInt64Ty(), -              objcarc::getAttachedCallOperandBundleEnum(IsRetainRV))}; -          OperandBundleDef OB("clang.arc.attachedcall", BundleArgs); -          auto *NewCall = CallBase::addOperandBundle( -              CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI); -          NewCall->copyMetadata(*CI); -          CI->replaceAllUsesWith(NewCall); -          CI->eraseFromParent(); -          InsertRetainCall = false; +      if (auto *II = dyn_cast<IntrinsicInst>(&I)) { +        if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue || +            !II->hasNUses(0) || +            objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd) +          break; + +        // If we've found a matching authoreleaseRV call: +        // - If claimRV is attached to the call, insert a call to objc_release +        //   and erase the autoreleaseRV call. +        // - If retainRV is attached to the call, just erase the autoreleaseRV +        //   call. +        if (IsClaimRV) { +          Builder.SetInsertPoint(II); +          Function *IFn = +              Intrinsic::getDeclaration(Mod, Intrinsic::objc_release); +          Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); +          Builder.CreateCall(IFn, BC, "");          } +        II->eraseFromParent(); +        InsertRetainCall = false; +        break;        } +      auto *CI = dyn_cast<CallInst>(&I); + +      if (!CI) +        break; + +      if (objcarc::GetRCIdentityRoot(CI) != RetOpnd || +          objcarc::hasAttachedCallOpBundle(CI)) +        break; + +      // If we've found an unannotated call that defines RetOpnd, add a +      // "clang.arc.attachedcall" operand bundle. +      Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)}; +      OperandBundleDef OB("clang.arc.attachedcall", BundleArgs); +      auto *NewCall = CallBase::addOperandBundle( +          CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI); +      NewCall->copyMetadata(*CI); +      CI->replaceAllUsesWith(NewCall); +      CI->eraseFromParent(); +      InsertRetainCall = false;        break;      } @@ -1895,8 +1889,13 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,    { // Scope to destroy VMap after cloning.      ValueToValueMapTy VMap; +    struct ByValInit { +      Value *Dst; +      Value *Src; +      Type *Ty; +    };      // Keep a list of pair (dst, src) to emit byval initializations. -    SmallVector<std::pair<Value*, Value*>, 4> ByValInit; +    SmallVector<ByValInit, 4> ByValInits;      // When inlining a function that contains noalias scope metadata,      // this metadata needs to be cloned so that the inlined blocks @@ -1921,10 +1920,12 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,        // or readnone, because the copy would be unneeded: the callee doesn't        // modify the struct.        if (CB.isByValArgument(ArgNo)) { -        ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI, +        ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg, +                                        &CB, CalledFunc, IFI,                                          CalledFunc->getParamAlignment(ArgNo));          if (ActualArg != *AI) -          ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); +          ByValInits.push_back( +              {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});        }        VMap[&*I] = ActualArg; @@ -1953,8 +1954,9 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,      FirstNewBlock = LastBlock; ++FirstNewBlock;      // Insert retainRV/clainRV runtime calls. -    if (objcarc::hasAttachedCallOpBundle(&CB)) -      inlineRetainOrClaimRVCalls(CB, Returns); +    objcarc::ARCInstKind RVCallKind = objcarc::getAttachedARCFunctionKind(&CB); +    if (RVCallKind != objcarc::ARCInstKind::None) +      inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);      // Updated caller/callee profiles only when requested. For sample loader      // inlining, the context-sensitive inlinee profile doesn't need to be @@ -1966,13 +1968,14 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,          updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,                          CalledFunc->front()); -      updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB, -                        IFI.PSI, IFI.CallerBFI); +      if (auto Profile = CalledFunc->getEntryCount()) +        updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI, +                          IFI.CallerBFI);      }      // Inject byval arguments initialization. -    for (std::pair<Value*, Value*> &Init : ByValInit) -      HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), +    for (ByValInit &Init : ByValInits) +      HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),                                &*FirstNewBlock, IFI);      Optional<OperandBundleUse> ParentDeopt = @@ -2100,9 +2103,9 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,    SmallVector<Value*,4> VarArgsToForward;    SmallVector<AttributeSet, 4> VarArgsAttrs;    for (unsigned i = CalledFunc->getFunctionType()->getNumParams(); -       i < CB.getNumArgOperands(); i++) { +       i < CB.arg_size(); i++) {      VarArgsToForward.push_back(CB.getArgOperand(i)); -    VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i)); +    VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));    }    bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; @@ -2117,8 +2120,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,      for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;           ++BB) { -      for (auto II = BB->begin(); II != BB->end();) { -        Instruction &I = *II++; +      for (Instruction &I : llvm::make_early_inc_range(*BB)) {          CallInst *CI = dyn_cast<CallInst>(&I);          if (!CI)            continue; @@ -2135,15 +2137,15 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,            if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {              for (unsigned ArgNo = 0;                   ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo) -              ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo)); +              ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));            }            // Add VarArg attributes.            ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end()); -          Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(), -                                     Attrs.getRetAttributes(), ArgAttrs); +          Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(), +                                     Attrs.getRetAttrs(), ArgAttrs);            // Add VarArgs to existing parameters. -          SmallVector<Value *, 6> Params(CI->arg_operands()); +          SmallVector<Value *, 6> Params(CI->args());            Params.append(VarArgsToForward.begin(), VarArgsToForward.end());            CallInst *NewCI = CallInst::Create(                CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI); @@ -2295,8 +2297,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,           BB != E; ++BB) {        // Add bundle operands to any top-level call sites.        SmallVector<OperandBundleDef, 1> OpBundles; -      for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { -        CallBase *I = dyn_cast<CallBase>(&*BBI++); +      for (Instruction &II : llvm::make_early_inc_range(*BB)) { +        CallBase *I = dyn_cast<CallBase>(&II);          if (!I)            continue; | 
