diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2023-09-02 21:17:18 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2023-12-08 17:34:50 +0000 |
commit | 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e (patch) | |
tree | 62f873df87c7c675557a179e0c4c83fe9f3087bc /contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp | |
parent | cf037972ea8863e2bab7461d77345367d2c1e054 (diff) | |
parent | 7fa27ce4a07f19b07799a767fc29416f3b625afb (diff) |
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp')
-rw-r--r-- | contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 308 |
1 files changed, 112 insertions, 196 deletions
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index 3f61dbe3354e..34299f9dbb23 100644 --- a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -50,8 +50,6 @@ #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" -#include "llvm/InitializePasses.h" -#include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" @@ -154,7 +152,7 @@ static MemoryEffects checkFunctionMemoryAccess(Function &F, bool ThisBody, // If it's not an identified object, it might be an argument. if (!isIdentifiedObject(UO)) ME |= MemoryEffects::argMemOnly(MR); - ME |= MemoryEffects(MemoryEffects::Other, MR); + ME |= MemoryEffects(IRMemLocation::Other, MR); }; // Scan the function body for instructions that may read or write memory. for (Instruction &I : instructions(F)) { @@ -181,17 +179,17 @@ static MemoryEffects checkFunctionMemoryAccess(Function &F, bool ThisBody, if (isa<PseudoProbeInst>(I)) continue; - ME |= CallME.getWithoutLoc(MemoryEffects::ArgMem); + ME |= CallME.getWithoutLoc(IRMemLocation::ArgMem); // If the call accesses captured memory (currently part of "other") and // an argument is captured (currently not tracked), then it may also // access argument memory. - ModRefInfo OtherMR = CallME.getModRef(MemoryEffects::Other); + ModRefInfo OtherMR = CallME.getModRef(IRMemLocation::Other); ME |= MemoryEffects::argMemOnly(OtherMR); // Check whether all pointer arguments point to local memory, and // ignore calls that only access local memory. - ModRefInfo ArgMR = CallME.getModRef(MemoryEffects::ArgMem); + ModRefInfo ArgMR = CallME.getModRef(IRMemLocation::ArgMem); if (ArgMR != ModRefInfo::NoModRef) { for (const Use &U : Call->args()) { const Value *Arg = U; @@ -640,7 +638,7 @@ determinePointerAccessAttrs(Argument *A, if (Visited.insert(&UU).second) Worklist.push_back(&UU); } - + if (CB.doesNotAccessMemory()) continue; @@ -723,18 +721,18 @@ static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes, continue; // There is nothing to do if an argument is already marked as 'returned'. - if (llvm::any_of(F->args(), - [](const Argument &Arg) { return Arg.hasReturnedAttr(); })) + if (F->getAttributes().hasAttrSomewhere(Attribute::Returned)) continue; - auto FindRetArg = [&]() -> Value * { - Value *RetArg = nullptr; + auto FindRetArg = [&]() -> Argument * { + Argument *RetArg = nullptr; for (BasicBlock &BB : *F) if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) { // Note that stripPointerCasts should look through functions with // returned arguments. - Value *RetVal = Ret->getReturnValue()->stripPointerCasts(); - if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType()) + auto *RetVal = + dyn_cast<Argument>(Ret->getReturnValue()->stripPointerCasts()); + if (!RetVal || RetVal->getType() != F->getReturnType()) return nullptr; if (!RetArg) @@ -746,9 +744,8 @@ static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes, return RetArg; }; - if (Value *RetArg = FindRetArg()) { - auto *A = cast<Argument>(RetArg); - A->addAttr(Attribute::Returned); + if (Argument *RetArg = FindRetArg()) { + RetArg->addAttr(Attribute::Returned); ++NumReturned; Changed.insert(F); } @@ -1379,7 +1376,7 @@ static bool InstrBreaksNonConvergent(Instruction &I, /// Helper for NoUnwind inference predicate InstrBreaksAttribute. static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) { - if (!I.mayThrow()) + if (!I.mayThrow(/* IncludePhaseOneUnwind */ true)) return false; if (const auto *CI = dyn_cast<CallInst>(&I)) { if (Function *Callee = CI->getCalledFunction()) { @@ -1410,6 +1407,61 @@ static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) { return true; } +// Return true if this is an atomic which has an ordering stronger than +// unordered. Note that this is different than the predicate we use in +// Attributor. Here we chose to be conservative and consider monotonic +// operations potentially synchronizing. We generally don't do much with +// monotonic operations, so this is simply risk reduction. +static bool isOrderedAtomic(Instruction *I) { + if (!I->isAtomic()) + return false; + + if (auto *FI = dyn_cast<FenceInst>(I)) + // All legal orderings for fence are stronger than monotonic. + return FI->getSyncScopeID() != SyncScope::SingleThread; + else if (isa<AtomicCmpXchgInst>(I) || isa<AtomicRMWInst>(I)) + return true; + else if (auto *SI = dyn_cast<StoreInst>(I)) + return !SI->isUnordered(); + else if (auto *LI = dyn_cast<LoadInst>(I)) + return !LI->isUnordered(); + else { + llvm_unreachable("unknown atomic instruction?"); + } +} + +static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) { + // Volatile may synchronize + if (I.isVolatile()) + return true; + + // An ordered atomic may synchronize. (See comment about on monotonic.) + if (isOrderedAtomic(&I)) + return true; + + auto *CB = dyn_cast<CallBase>(&I); + if (!CB) + // Non call site cases covered by the two checks above + return false; + + if (CB->hasFnAttr(Attribute::NoSync)) + return false; + + // Non volatile memset/memcpy/memmoves are nosync + // NOTE: Only intrinsics with volatile flags should be handled here. All + // others should be marked in Intrinsics.td. + if (auto *MI = dyn_cast<MemIntrinsic>(&I)) + if (!MI->isVolatile()) + return false; + + // Speculatively assume in SCC. + if (Function *Callee = CB->getCalledFunction()) + if (SCCNodes.contains(Callee)) + return false; + + return true; +} + /// Attempt to remove convergent function attribute when possible. /// /// Returns true if any changes to function attributes were made. @@ -1441,9 +1493,7 @@ static void inferConvergent(const SCCNodeSet &SCCNodes, } /// Infer attributes from all functions in the SCC by scanning every -/// instruction for compliance to the attribute assumptions. Currently it -/// does: -/// - addition of NoUnwind attribute +/// instruction for compliance to the attribute assumptions. /// /// Returns true if any changes to function attributes were made. static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes, @@ -1495,6 +1545,22 @@ static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes, }, /* RequiresExactDefinition= */ true}); + AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ + Attribute::NoSync, + // Skip already marked functions. + [](const Function &F) { return F.hasNoSync(); }, + // Instructions that break nosync assumption. + [&SCCNodes](Instruction &I) { + return InstrBreaksNoSync(I, SCCNodes); + }, + [](Function &F) { + LLVM_DEBUG(dbgs() + << "Adding nosync attr to fn " << F.getName() << "\n"); + F.setNoSync(); + ++NumNoSync; + }, + /* RequiresExactDefinition= */ true}); + // Perform all the requested attribute inference actions. AI.run(SCCNodes, Changed); } @@ -1622,83 +1688,6 @@ static void addWillReturn(const SCCNodeSet &SCCNodes, } } -// Return true if this is an atomic which has an ordering stronger than -// unordered. Note that this is different than the predicate we use in -// Attributor. Here we chose to be conservative and consider monotonic -// operations potentially synchronizing. We generally don't do much with -// monotonic operations, so this is simply risk reduction. -static bool isOrderedAtomic(Instruction *I) { - if (!I->isAtomic()) - return false; - - if (auto *FI = dyn_cast<FenceInst>(I)) - // All legal orderings for fence are stronger than monotonic. - return FI->getSyncScopeID() != SyncScope::SingleThread; - else if (isa<AtomicCmpXchgInst>(I) || isa<AtomicRMWInst>(I)) - return true; - else if (auto *SI = dyn_cast<StoreInst>(I)) - return !SI->isUnordered(); - else if (auto *LI = dyn_cast<LoadInst>(I)) - return !LI->isUnordered(); - else { - llvm_unreachable("unknown atomic instruction?"); - } -} - -static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) { - // Volatile may synchronize - if (I.isVolatile()) - return true; - - // An ordered atomic may synchronize. (See comment about on monotonic.) - if (isOrderedAtomic(&I)) - return true; - - auto *CB = dyn_cast<CallBase>(&I); - if (!CB) - // Non call site cases covered by the two checks above - return false; - - if (CB->hasFnAttr(Attribute::NoSync)) - return false; - - // Non volatile memset/memcpy/memmoves are nosync - // NOTE: Only intrinsics with volatile flags should be handled here. All - // others should be marked in Intrinsics.td. - if (auto *MI = dyn_cast<MemIntrinsic>(&I)) - if (!MI->isVolatile()) - return false; - - // Speculatively assume in SCC. - if (Function *Callee = CB->getCalledFunction()) - if (SCCNodes.contains(Callee)) - return false; - - return true; -} - -// Infer the nosync attribute. -static void addNoSyncAttr(const SCCNodeSet &SCCNodes, - SmallSet<Function *, 8> &Changed) { - AttributeInferer AI; - AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ - Attribute::NoSync, - // Skip already marked functions. - [](const Function &F) { return F.hasNoSync(); }, - // Instructions that break nosync assumption. - [&SCCNodes](Instruction &I) { - return InstrBreaksNoSync(I, SCCNodes); - }, - [](Function &F) { - LLVM_DEBUG(dbgs() - << "Adding nosync attr to fn " << F.getName() << "\n"); - F.setNoSync(); - ++NumNoSync; - }, - /* RequiresExactDefinition= */ true}); - AI.run(SCCNodes, Changed); -} - static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) { SCCNodesResult Res; Res.HasUnknownCall = false; @@ -1756,8 +1745,6 @@ deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter) { addNoRecurseAttrs(Nodes.SCCNodes, Changed); } - addNoSyncAttr(Nodes.SCCNodes, Changed); - // Finally, infer the maximal set of attributes from the ones we've inferred // above. This is handling the cases where one attribute on a signature // implies another, but for implementation reasons the inference rule for @@ -1774,6 +1761,13 @@ PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &) { + // Skip non-recursive functions if requested. + if (C.size() == 1 && SkipNonRecursive) { + LazyCallGraph::Node &N = *C.begin(); + if (!N->lookup(N)) + return PreservedAnalyses::all(); + } + FunctionAnalysisManager &FAM = AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); @@ -1819,40 +1813,12 @@ PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, return PA; } -namespace { - -struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass { - // Pass identification, replacement for typeid - static char ID; - - PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) { - initializePostOrderFunctionAttrsLegacyPassPass( - *PassRegistry::getPassRegistry()); - } - - bool runOnSCC(CallGraphSCC &SCC) override; - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesCFG(); - AU.addRequired<AssumptionCacheTracker>(); - getAAResultsAnalysisUsage(AU); - CallGraphSCCPass::getAnalysisUsage(AU); - } -}; - -} // end anonymous namespace - -char PostOrderFunctionAttrsLegacyPass::ID = 0; -INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "function-attrs", - "Deduce function attributes", false, false) -INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) -INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) -INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) -INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "function-attrs", - "Deduce function attributes", false, false) - -Pass *llvm::createPostOrderFunctionAttrsLegacyPass() { - return new PostOrderFunctionAttrsLegacyPass(); +void PostOrderFunctionAttrsPass::printPipeline( + raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { + static_cast<PassInfoMixin<PostOrderFunctionAttrsPass> *>(this)->printPipeline( + OS, MapClassName2PassName); + if (SkipNonRecursive) + OS << "<skip-non-recursive>"; } template <typename AARGetterT> @@ -1865,48 +1831,6 @@ static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) { return !deriveAttrsInPostOrder(Functions, AARGetter).empty(); } -bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) { - if (skipSCC(SCC)) - return false; - return runImpl(SCC, LegacyAARGetter(*this)); -} - -namespace { - -struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass { - // Pass identification, replacement for typeid - static char ID; - - ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) { - initializeReversePostOrderFunctionAttrsLegacyPassPass( - *PassRegistry::getPassRegistry()); - } - - bool runOnModule(Module &M) override; - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesCFG(); - AU.addRequired<CallGraphWrapperPass>(); - AU.addPreserved<CallGraphWrapperPass>(); - } -}; - -} // end anonymous namespace - -char ReversePostOrderFunctionAttrsLegacyPass::ID = 0; - -INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass, - "rpo-function-attrs", "Deduce function attributes in RPO", - false, false) -INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) -INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass, - "rpo-function-attrs", "Deduce function attributes in RPO", - false, false) - -Pass *llvm::createReversePostOrderFunctionAttrsPass() { - return new ReversePostOrderFunctionAttrsLegacyPass(); -} - static bool addNoRecurseAttrsTopDown(Function &F) { // We check the preconditions for the function prior to calling this to avoid // the cost of building up a reversible post-order list. We assert them here @@ -1939,7 +1863,7 @@ static bool addNoRecurseAttrsTopDown(Function &F) { return true; } -static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) { +static bool deduceFunctionAttributeInRPO(Module &M, LazyCallGraph &CG) { // We only have a post-order SCC traversal (because SCCs are inherently // discovered in post-order), so we accumulate them in a vector and then walk // it in reverse. This is simpler than using the RPO iterator infrastructure @@ -1947,17 +1871,18 @@ static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) { // graph. We can also cheat egregiously because we're primarily interested in // synthesizing norecurse and so we can only save the singular SCCs as SCCs // with multiple functions in them will clearly be recursive. - SmallVector<Function *, 16> Worklist; - for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { - if (I->size() != 1) - continue; - Function *F = I->front()->getFunction(); - if (F && !F->isDeclaration() && !F->doesNotRecurse() && - F->hasInternalLinkage()) - Worklist.push_back(F); + SmallVector<Function *, 16> Worklist; + CG.buildRefSCCs(); + for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) { + for (LazyCallGraph::SCC &SCC : RC) { + if (SCC.size() != 1) + continue; + Function &F = SCC.begin()->getFunction(); + if (!F.isDeclaration() && !F.doesNotRecurse() && F.hasInternalLinkage()) + Worklist.push_back(&F); + } } - bool Changed = false; for (auto *F : llvm::reverse(Worklist)) Changed |= addNoRecurseAttrsTopDown(*F); @@ -1965,23 +1890,14 @@ static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) { return Changed; } -bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) { - if (skipModule(M)) - return false; - - auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); - - return deduceFunctionAttributeInRPO(M, CG); -} - PreservedAnalyses ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) { - auto &CG = AM.getResult<CallGraphAnalysis>(M); + auto &CG = AM.getResult<LazyCallGraphAnalysis>(M); if (!deduceFunctionAttributeInRPO(M, CG)) return PreservedAnalyses::all(); PreservedAnalyses PA; - PA.preserve<CallGraphAnalysis>(); + PA.preserve<LazyCallGraphAnalysis>(); return PA; } |