aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/IPO/Attributor.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/IPO/Attributor.cpp')
-rw-r--r--llvm/lib/Transforms/IPO/Attributor.cpp462
1 files changed, 306 insertions, 156 deletions
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index d66140a726f6..b05b7990e3f0 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -15,29 +15,25 @@
#include "llvm/Transforms/IPO/Attributor.h"
-#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/InlineCost.h"
-#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/MustExecute.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/NoFolder.h"
#include "llvm/IR/ValueHandle.h"
-#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
@@ -50,6 +46,10 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h"
+#ifdef EXPENSIVE_CHECKS
+#include "llvm/IR/Verifier.h"
+#endif
+
#include <cassert>
#include <string>
@@ -123,13 +123,13 @@ static cl::list<std::string>
SeedAllowList("attributor-seed-allow-list", cl::Hidden,
cl::desc("Comma seperated list of attribute names that are "
"allowed to be seeded."),
- cl::ZeroOrMore, cl::CommaSeparated);
+ cl::CommaSeparated);
static cl::list<std::string> FunctionSeedAllowList(
"attributor-function-seed-allow-list", cl::Hidden,
cl::desc("Comma seperated list of function names that are "
"allowed to be seeded."),
- cl::ZeroOrMore, cl::CommaSeparated);
+ cl::CommaSeparated);
#endif
static cl::opt<bool>
@@ -209,33 +209,25 @@ bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
}
bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
- const Value &V) {
- if (auto *C = dyn_cast<Constant>(&V))
- return !C->isThreadDependent();
- // TODO: Inspect and cache more complex instructions.
- if (auto *CB = dyn_cast<CallBase>(&V))
- return CB->getNumOperands() == 0 && !CB->mayHaveSideEffects() &&
- !CB->mayReadFromMemory();
- const Function *Scope = nullptr;
- if (auto *I = dyn_cast<Instruction>(&V))
- Scope = I->getFunction();
- if (auto *A = dyn_cast<Argument>(&V))
- Scope = A->getParent();
- if (!Scope)
+ const Value &V, bool ForAnalysisOnly) {
+ // TODO: See the AAInstanceInfo class comment.
+ if (!ForAnalysisOnly)
return false;
- auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
- QueryingAA, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
- return NoRecurseAA.isAssumedNoRecurse();
+ auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
+ QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
+ return InstanceInfoAA.isAssumedUniqueForAnalysis();
}
Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty,
const TargetLibraryInfo *TLI) {
if (isa<AllocaInst>(Obj))
return UndefValue::get(&Ty);
- if (isAllocationFn(&Obj, TLI))
- return getInitialValueOfAllocation(&cast<CallBase>(Obj), TLI, &Ty);
+ if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
+ return Init;
auto *GV = dyn_cast<GlobalVariable>(&Obj);
- if (!GV || !GV->hasLocalLinkage())
+ if (!GV)
+ return nullptr;
+ if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
return nullptr;
if (!GV->hasInitializer())
return UndefValue::get(&Ty);
@@ -252,19 +244,29 @@ bool AA::isValidInScope(const Value &V, const Function *Scope) {
return false;
}
-bool AA::isValidAtPosition(const Value &V, const Instruction &CtxI,
+bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
InformationCache &InfoCache) {
- if (isa<Constant>(V))
+ if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
return true;
- const Function *Scope = CtxI.getFunction();
- if (auto *A = dyn_cast<Argument>(&V))
+ const Function *Scope = nullptr;
+ const Instruction *CtxI = VAC.getCtxI();
+ if (CtxI)
+ Scope = CtxI->getFunction();
+ if (auto *A = dyn_cast<Argument>(VAC.getValue()))
return A->getParent() == Scope;
- if (auto *I = dyn_cast<Instruction>(&V))
+ if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
if (I->getFunction() == Scope) {
- const DominatorTree *DT =
- InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Scope);
- return DT && DT->dominates(I, &CtxI);
+ if (const DominatorTree *DT =
+ InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
+ *Scope))
+ return DT->dominates(I, CtxI);
+ // Local dominance check mostly for the old PM passes.
+ if (CtxI && I->getParent() == CtxI->getParent())
+ return llvm::any_of(
+ make_range(I->getIterator(), I->getParent()->end()),
+ [&](const Instruction &AfterI) { return &AfterI == CtxI; });
}
+ }
return false;
}
@@ -295,11 +297,11 @@ AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
const Optional<Value *> &B, Type *Ty) {
if (A == B)
return A;
- if (!B.hasValue())
+ if (!B)
return A;
if (*B == nullptr)
return nullptr;
- if (!A.hasValue())
+ if (!A)
return Ty ? getWithType(**B, *Ty) : nullptr;
if (*A == nullptr)
return nullptr;
@@ -314,21 +316,33 @@ AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
return nullptr;
}
-bool AA::getPotentialCopiesOfStoredValue(
- Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
- const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation) {
+template <bool IsLoad, typename Ty>
+static bool getPotentialCopiesOfMemoryValue(
+ Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
+ SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
+ const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
+ bool OnlyExact) {
+ LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
+ << " (only exact: " << OnlyExact << ")\n";);
- Value &Ptr = *SI.getPointerOperand();
+ Value &Ptr = *I.getPointerOperand();
SmallVector<Value *, 8> Objects;
- if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, QueryingAA, &SI)) {
+ if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, QueryingAA, &I,
+ UsedAssumedInformation)) {
LLVM_DEBUG(
dbgs() << "Underlying objects stored into could not be determined\n";);
return false;
}
+ // Containers to remember the pointer infos and new copies while we are not
+ // sure that we can find all of them. If we abort we want to avoid spurious
+ // dependences and potential copies in the provided container.
SmallVector<const AAPointerInfo *> PIs;
SmallVector<Value *> NewCopies;
+ SmallVector<Instruction *> NewCopyOrigins;
+ const auto *TLI =
+ A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
for (Value *Obj : Objects) {
LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
if (isa<UndefValue>(Obj))
@@ -336,7 +350,7 @@ bool AA::getPotentialCopiesOfStoredValue(
if (isa<ConstantPointerNull>(Obj)) {
// A null pointer access can be undefined but any offset from null may
// be OK. We do not try to optimize the latter.
- if (!NullPointerIsDefined(SI.getFunction(),
+ if (!NullPointerIsDefined(I.getFunction(),
Ptr.getType()->getPointerAddressSpace()) &&
A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation) ==
Obj)
@@ -345,37 +359,74 @@ bool AA::getPotentialCopiesOfStoredValue(
dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
return false;
}
+ // TODO: Use assumed noalias return.
if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj) &&
- !isNoAliasCall(Obj)) {
+ !(IsLoad ? isAllocationFn(Obj, TLI) : isNoAliasCall(Obj))) {
LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << *Obj
<< "\n";);
return false;
}
if (auto *GV = dyn_cast<GlobalVariable>(Obj))
- if (!GV->hasLocalLinkage()) {
+ if (!GV->hasLocalLinkage() &&
+ !(GV->isConstant() && GV->hasInitializer())) {
LLVM_DEBUG(dbgs() << "Underlying object is global with external "
"linkage, not supported yet: "
<< *Obj << "\n";);
return false;
}
+ if (IsLoad) {
+ Value *InitialValue = AA::getInitialValueForObj(*Obj, *I.getType(), TLI);
+ if (!InitialValue)
+ return false;
+ NewCopies.push_back(InitialValue);
+ NewCopyOrigins.push_back(nullptr);
+ }
+
auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
- if (!Acc.isRead())
+ if ((IsLoad && !Acc.isWrite()) || (!IsLoad && !Acc.isRead()))
+ return true;
+ if (IsLoad && Acc.isWrittenValueYetUndetermined())
return true;
- auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
- if (!LI) {
- LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
- "instruction not supported yet: "
- << *Acc.getRemoteInst() << "\n";);
+ if (OnlyExact && !IsExact &&
+ !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
+ LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
+ << ", abort!\n");
return false;
}
- NewCopies.push_back(LI);
+ if (IsLoad) {
+ assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
+ if (!Acc.isWrittenValueUnknown()) {
+ NewCopies.push_back(Acc.getWrittenValue());
+ NewCopyOrigins.push_back(Acc.getRemoteInst());
+ return true;
+ }
+ auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
+ if (!SI) {
+ LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
+ "instruction not supported yet: "
+ << *Acc.getRemoteInst() << "\n";);
+ return false;
+ }
+ NewCopies.push_back(SI->getValueOperand());
+ NewCopyOrigins.push_back(SI);
+ } else {
+ assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
+ auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
+ if (!LI && OnlyExact) {
+ LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
+ "instruction not supported yet: "
+ << *Acc.getRemoteInst() << "\n";);
+ return false;
+ }
+ NewCopies.push_back(Acc.getRemoteInst());
+ }
return true;
};
auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(*Obj),
DepClassTy::NONE);
- if (!PI.forallInterferingAccesses(SI, CheckAccess)) {
+ if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess)) {
LLVM_DEBUG(
dbgs()
<< "Failed to verify all interfering accesses for underlying object: "
@@ -385,16 +436,40 @@ bool AA::getPotentialCopiesOfStoredValue(
PIs.push_back(&PI);
}
+ // Only if we were successful collection all potential copies we record
+ // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
+ // given PotentialCopies container.
for (auto *PI : PIs) {
if (!PI->getState().isAtFixpoint())
UsedAssumedInformation = true;
A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
}
PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
+ PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
return true;
}
+bool AA::getPotentiallyLoadedValues(
+ Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
+ SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
+ const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
+ bool OnlyExact) {
+ return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
+ A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
+ UsedAssumedInformation, OnlyExact);
+}
+
+bool AA::getPotentialCopiesOfStoredValue(
+ Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
+ const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
+ bool OnlyExact) {
+ SmallSetVector<Instruction *, 4> PotentialValueOrigins;
+ return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
+ A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
+ UsedAssumedInformation, OnlyExact);
+}
+
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
const AbstractAttribute &QueryingAA,
bool RequireReadNone, bool &IsKnown) {
@@ -449,6 +524,8 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
SmallVector<const Instruction *> Worklist;
Worklist.push_back(&FromI);
+ const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
+ QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
while (!Worklist.empty()) {
const Instruction *CurFromI = Worklist.pop_back_val();
if (!Visited.insert(CurFromI).second)
@@ -468,7 +545,8 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
<< *ToI << " [Intra]\n");
if (Result)
return true;
- continue;
+ if (NoRecurseAA.isAssumedNoRecurse())
+ continue;
}
// TODO: If we can go arbitrarily backwards we will eventually reach an
@@ -514,10 +592,10 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
return true;
};
- bool AllCallSitesKnown;
+ bool UsedAssumedInformation = false;
Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
/* RequireAllCallSites */ true,
- &QueryingAA, AllCallSitesKnown);
+ &QueryingAA, UsedAssumedInformation);
if (Result) {
LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
<< " in @" << FromFn->getName()
@@ -631,7 +709,7 @@ Argument *IRPosition::getAssociatedArgument() const {
assert(ACS.getCalledFunction()->arg_size() > u &&
"ACS mapped into var-args arguments!");
- if (CBCandidateArg.hasValue()) {
+ if (CBCandidateArg) {
CBCandidateArg = nullptr;
break;
}
@@ -640,7 +718,7 @@ Argument *IRPosition::getAssociatedArgument() const {
}
// If we found a unique callback candidate argument, return it.
- if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
+ if (CBCandidateArg && CBCandidateArg.getValue())
return CBCandidateArg.getValue();
// If no callbacks were found, or none used the underlying call site operand
@@ -949,22 +1027,24 @@ Attributor::getAssumedConstant(const IRPosition &IRP,
bool &UsedAssumedInformation) {
// First check all callbacks provided by outside AAs. If any of them returns
// a non-null value that is different from the associated value, or None, we
- // assume it's simpliied.
+ // assume it's simplified.
for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
Optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
- if (!SimplifiedV.hasValue())
+ if (!SimplifiedV)
return llvm::None;
if (isa_and_nonnull<Constant>(*SimplifiedV))
return cast<Constant>(*SimplifiedV);
return nullptr;
}
+ if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
+ return C;
const auto &ValueSimplifyAA =
getAAFor<AAValueSimplify>(AA, IRP, DepClassTy::NONE);
Optional<Value *> SimplifiedV =
ValueSimplifyAA.getAssumedSimplifiedValue(*this);
bool IsKnown = ValueSimplifyAA.isAtFixpoint();
UsedAssumedInformation |= !IsKnown;
- if (!SimplifiedV.hasValue()) {
+ if (!SimplifiedV) {
recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
return llvm::None;
}
@@ -987,18 +1067,18 @@ Attributor::getAssumedSimplified(const IRPosition &IRP,
bool &UsedAssumedInformation) {
// First check all callbacks provided by outside AAs. If any of them returns
// a non-null value that is different from the associated value, or None, we
- // assume it's simpliied.
+ // assume it's simplified.
for (auto &CB : SimplificationCallbacks.lookup(IRP))
return CB(IRP, AA, UsedAssumedInformation);
- // If no high-level/outside simplification occured, use AAValueSimplify.
+ // If no high-level/outside simplification occurred, use AAValueSimplify.
const auto &ValueSimplifyAA =
getOrCreateAAFor<AAValueSimplify>(IRP, AA, DepClassTy::NONE);
Optional<Value *> SimplifiedV =
ValueSimplifyAA.getAssumedSimplifiedValue(*this);
bool IsKnown = ValueSimplifyAA.isAtFixpoint();
UsedAssumedInformation |= !IsKnown;
- if (!SimplifiedV.hasValue()) {
+ if (!SimplifiedV) {
if (AA)
recordDependence(ValueSimplifyAA, *AA, DepClassTy::OPTIONAL);
return llvm::None;
@@ -1017,7 +1097,7 @@ Attributor::getAssumedSimplified(const IRPosition &IRP,
Optional<Value *> Attributor::translateArgumentToCallSiteContent(
Optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
bool &UsedAssumedInformation) {
- if (!V.hasValue())
+ if (!V)
return V;
if (*V == nullptr || isa<Constant>(*V))
return V;
@@ -1078,6 +1158,19 @@ bool Attributor::isAssumedDead(const Use &U,
BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
+ if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
+ const IRPosition IRP = IRPosition::inst(*SI);
+ const AAIsDead &IsDeadAA =
+ getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
+ if (IsDeadAA.isRemovableStore()) {
+ if (QueryingAA)
+ recordDependence(IsDeadAA, *QueryingAA, DepClass);
+ if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
+ UsedAssumedInformation = true;
+ return true;
+ }
+ }
}
return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
@@ -1191,6 +1284,7 @@ bool Attributor::checkForAllUses(
function_ref<bool(const Use &, bool &)> Pred,
const AbstractAttribute &QueryingAA, const Value &V,
bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
+ bool IgnoreDroppableUses,
function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
// Check the trivial case first as it catches void values.
@@ -1231,7 +1325,7 @@ bool Attributor::checkForAllUses(
LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
continue;
}
- if (U->getUser()->isDroppable()) {
+ if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
LLVM_DEBUG(dbgs() << "[Attributor] Droppable user, skip!\n");
continue;
}
@@ -1241,9 +1335,9 @@ bool Attributor::checkForAllUses(
if (!Visited.insert(U).second)
continue;
SmallSetVector<Value *, 4> PotentialCopies;
- if (AA::getPotentialCopiesOfStoredValue(*this, *SI, PotentialCopies,
- QueryingAA,
- UsedAssumedInformation)) {
+ if (AA::getPotentialCopiesOfStoredValue(
+ *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
+ /* OnlyExact */ true)) {
LLVM_DEBUG(dbgs() << "[Attributor] Value is stored, continue with "
<< PotentialCopies.size()
<< " potential copies instead!\n");
@@ -1277,7 +1371,7 @@ bool Attributor::checkForAllUses(
bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
const AbstractAttribute &QueryingAA,
bool RequireAllCallSites,
- bool &AllCallSitesKnown) {
+ bool &UsedAssumedInformation) {
// We can try to determine information from
// the call sites. However, this is only possible all call sites are known,
// hence the function has internal linkage.
@@ -1286,31 +1380,26 @@ bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
if (!AssociatedFunction) {
LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
<< "\n");
- AllCallSitesKnown = false;
return false;
}
return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
- &QueryingAA, AllCallSitesKnown);
+ &QueryingAA, UsedAssumedInformation);
}
bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
const Function &Fn,
bool RequireAllCallSites,
const AbstractAttribute *QueryingAA,
- bool &AllCallSitesKnown) {
+ bool &UsedAssumedInformation) {
if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
LLVM_DEBUG(
dbgs()
<< "[Attributor] Function " << Fn.getName()
<< " has no internal linkage, hence not all call sites are known\n");
- AllCallSitesKnown = false;
return false;
}
- // If we do not require all call sites we might not see all.
- AllCallSitesKnown = RequireAllCallSites;
-
SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
for (unsigned u = 0; u < Uses.size(); ++u) {
const Use &U = *Uses[u];
@@ -1322,15 +1411,13 @@ bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
<< "\n";
});
- bool UsedAssumedInformation = false;
if (isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
/* CheckBBLivenessOnly */ true)) {
LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
continue;
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
- if (CE->isCast() && CE->getType()->isPointerTy() &&
- CE->getType()->getPointerElementType()->isFunctionTy()) {
+ if (CE->isCast() && CE->getType()->isPointerTy()) {
LLVM_DEBUG(
dbgs() << "[Attributor] Use, is constant cast expression, add "
<< CE->getNumUses()
@@ -1477,30 +1564,24 @@ static bool checkForAllInstructionsImpl(
}
bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
+ const Function *Fn,
const AbstractAttribute &QueryingAA,
const ArrayRef<unsigned> &Opcodes,
bool &UsedAssumedInformation,
bool CheckBBLivenessOnly,
bool CheckPotentiallyDead) {
-
- const IRPosition &IRP = QueryingAA.getIRPosition();
// Since we need to provide instructions we have to have an exact definition.
- const Function *AssociatedFunction = IRP.getAssociatedFunction();
- if (!AssociatedFunction)
- return false;
-
- if (AssociatedFunction->isDeclaration())
+ if (!Fn || Fn->isDeclaration())
return false;
// TODO: use the function scope once we have call site AAReturnedValues.
- const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
+ const IRPosition &QueryIRP = IRPosition::function(*Fn);
const auto *LivenessAA =
(CheckBBLivenessOnly || CheckPotentiallyDead)
? nullptr
: &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
- auto &OpcodeInstMap =
- InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
+ auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
LivenessAA, Opcodes, UsedAssumedInformation,
CheckBBLivenessOnly, CheckPotentiallyDead))
@@ -1509,6 +1590,19 @@ bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
return true;
}
+bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
+ const AbstractAttribute &QueryingAA,
+ const ArrayRef<unsigned> &Opcodes,
+ bool &UsedAssumedInformation,
+ bool CheckBBLivenessOnly,
+ bool CheckPotentiallyDead) {
+ const IRPosition &IRP = QueryingAA.getIRPosition();
+ const Function *AssociatedFunction = IRP.getAssociatedFunction();
+ return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
+ UsedAssumedInformation, CheckBBLivenessOnly,
+ CheckPotentiallyDead);
+}
+
bool Attributor::checkForAllReadWriteInstructions(
function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
bool &UsedAssumedInformation) {
@@ -1547,11 +1641,8 @@ void Attributor::runTillFixpoint() {
// the abstract analysis.
unsigned IterationCounter = 1;
- unsigned MaxFixedPointIterations;
- if (MaxFixpointIterations)
- MaxFixedPointIterations = MaxFixpointIterations.getValue();
- else
- MaxFixedPointIterations = SetFixpointIterations;
+ unsigned MaxIterations =
+ Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
SmallVector<AbstractAttribute *, 32> ChangedAAs;
SetVector<AbstractAttribute *> Worklist, InvalidAAs;
@@ -1636,21 +1727,20 @@ void Attributor::runTillFixpoint() {
QueryAAsAwaitingUpdate.end());
QueryAAsAwaitingUpdate.clear();
- } while (!Worklist.empty() && (IterationCounter++ < MaxFixedPointIterations ||
- VerifyMaxFixpointIterations));
+ } while (!Worklist.empty() &&
+ (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
- if (IterationCounter > MaxFixedPointIterations && !Worklist.empty()) {
+ if (IterationCounter > MaxIterations && !Functions.empty()) {
auto Remark = [&](OptimizationRemarkMissed ORM) {
return ORM << "Attributor did not reach a fixpoint after "
- << ore::NV("Iterations", MaxFixedPointIterations)
- << " iterations.";
+ << ore::NV("Iterations", MaxIterations) << " iterations.";
};
- Function *F = Worklist.front()->getIRPosition().getAssociatedFunction();
+ Function *F = Functions.front();
emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
}
LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
- << IterationCounter << "/" << MaxFixpointIterations
+ << IterationCounter << "/" << MaxIterations
<< " iterations\n");
// Reset abstract arguments not settled in a sound fixpoint by now. This
@@ -1684,11 +1774,9 @@ void Attributor::runTillFixpoint() {
<< " abstract attributes.\n";
});
- if (VerifyMaxFixpointIterations &&
- IterationCounter != MaxFixedPointIterations) {
+ if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
errs() << "\n[Attributor] Fixpoint iteration done after: "
- << IterationCounter << "/" << MaxFixedPointIterations
- << " iterations\n";
+ << IterationCounter << "/" << MaxIterations << " iterations\n";
llvm_unreachable("The fixpoint was not reached with exactly the number of "
"specified iterations!");
}
@@ -1725,6 +1813,9 @@ ChangeStatus Attributor::manifestAttributes() {
if (!State.isValidState())
continue;
+ if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
+ continue;
+
// Skip dead code.
bool UsedAssumedInformation = false;
if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
@@ -1774,7 +1865,7 @@ ChangeStatus Attributor::manifestAttributes() {
void Attributor::identifyDeadInternalFunctions() {
// Early exit if we don't intend to delete functions.
- if (!DeleteFns)
+ if (!Configuration.DeleteFns)
return;
// Identify dead internal functions and delete them. This happens outside
@@ -1795,7 +1886,7 @@ void Attributor::identifyDeadInternalFunctions() {
if (!F)
continue;
- bool AllCallSitesKnown;
+ bool UsedAssumedInformation = false;
if (checkForAllCallSites(
[&](AbstractCallSite ACS) {
Function *Callee = ACS.getInstruction()->getFunction();
@@ -1803,7 +1894,7 @@ void Attributor::identifyDeadInternalFunctions() {
(Functions.count(Callee) && Callee->hasLocalLinkage() &&
!LiveInternalFns.count(Callee));
},
- *F, true, nullptr, AllCallSitesKnown)) {
+ *F, true, nullptr, UsedAssumedInformation)) {
continue;
}
@@ -1826,7 +1917,8 @@ ChangeStatus Attributor::cleanupIR() {
<< ToBeDeletedBlocks.size() << " blocks and "
<< ToBeDeletedInsts.size() << " instructions and "
<< ToBeChangedValues.size() << " values and "
- << ToBeChangedUses.size() << " uses. "
+ << ToBeChangedUses.size() << " uses. To insert "
+ << ToBeChangedToUnreachableInsts.size() << " unreachables."
<< "Preserve manifest added " << ManifestAddedBlocks.size()
<< " blocks\n");
@@ -1844,12 +1936,15 @@ ChangeStatus Attributor::cleanupIR() {
NewV = Entry.first;
} while (true);
+ Instruction *I = dyn_cast<Instruction>(U->getUser());
+ assert((!I || isRunOn(*I->getFunction())) &&
+ "Cannot replace an instruction outside the current SCC!");
+
// Do not replace uses in returns if the value is a must-tail call we will
// not delete.
- if (auto *RI = dyn_cast<ReturnInst>(U->getUser())) {
+ if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
- if (CI->isMustTailCall() &&
- (!ToBeDeletedInsts.count(CI) || !isRunOn(*CI->getCaller())))
+ if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
return;
// If we rewrite a return and the new value is not an argument, strip the
// `returned` attribute as it is wrong now.
@@ -1859,8 +1954,8 @@ ChangeStatus Attributor::cleanupIR() {
}
// Do not perform call graph altering changes outside the SCC.
- if (auto *CB = dyn_cast<CallBase>(U->getUser()))
- if (CB->isCallee(U) && !isRunOn(*CB->getCaller()))
+ if (auto *CB = dyn_cast_or_null<CallBase>(I))
+ if (CB->isCallee(U))
return;
LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
@@ -1908,8 +2003,12 @@ ChangeStatus Attributor::cleanupIR() {
for (auto &U : OldV->uses())
if (Entry.second || !U.getUser()->isDroppable())
Uses.push_back(&U);
- for (Use *U : Uses)
+ for (Use *U : Uses) {
+ if (auto *I = dyn_cast<Instruction>(U->getUser()))
+ if (!isRunOn(*I->getFunction()))
+ continue;
ReplaceUse(U, NewV);
+ }
}
for (auto &V : InvokeWithDeadSuccessor)
@@ -1940,15 +2039,15 @@ ChangeStatus Attributor::cleanupIR() {
}
}
for (Instruction *I : TerminatorsToFold) {
- if (!isRunOn(*I->getFunction()))
- continue;
+ assert(isRunOn(*I->getFunction()) &&
+ "Cannot replace a terminator outside the current SCC!");
CGModifiedFunctions.insert(I->getFunction());
ConstantFoldTerminator(I->getParent());
}
for (auto &V : ToBeChangedToUnreachableInsts)
if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
- if (!isRunOn(*I->getFunction()))
- continue;
+ assert(isRunOn(*I->getFunction()) &&
+ "Cannot replace an instruction outside the current SCC!");
CGModifiedFunctions.insert(I->getFunction());
changeToUnreachable(I);
}
@@ -1956,10 +2055,10 @@ ChangeStatus Attributor::cleanupIR() {
for (auto &V : ToBeDeletedInsts) {
if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
if (auto *CB = dyn_cast<CallBase>(I)) {
- if (!isRunOn(*I->getFunction()))
- continue;
+ assert(isRunOn(*I->getFunction()) &&
+ "Cannot delete an instruction outside the current SCC!");
if (!isa<IntrinsicInst>(CB))
- CGUpdater.removeCallSite(*CB);
+ Configuration.CGUpdater.removeCallSite(*CB);
}
I->dropDroppableUses();
CGModifiedFunctions.insert(I->getFunction());
@@ -1972,9 +2071,7 @@ ChangeStatus Attributor::cleanupIR() {
}
}
- llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) {
- return !I || !isRunOn(*cast<Instruction>(I)->getFunction());
- });
+ llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
LLVM_DEBUG({
dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
@@ -2010,12 +2107,12 @@ ChangeStatus Attributor::cleanupIR() {
for (Function *Fn : CGModifiedFunctions)
if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
- CGUpdater.reanalyzeFunction(*Fn);
+ Configuration.CGUpdater.reanalyzeFunction(*Fn);
for (Function *Fn : ToBeDeletedFunctions) {
if (!Functions.count(Fn))
continue;
- CGUpdater.removeFunction(*Fn);
+ Configuration.CGUpdater.removeFunction(*Fn);
}
if (!ToBeChangedUses.empty())
@@ -2254,7 +2351,7 @@ bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
bool Attributor::isValidFunctionSignatureRewrite(
Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
- if (!RewriteSignatures)
+ if (!Configuration.RewriteSignatures)
return false;
Function *Fn = Arg.getParent();
@@ -2290,9 +2387,9 @@ bool Attributor::isValidFunctionSignatureRewrite(
}
// Avoid callbacks for now.
- bool AllCallSitesKnown;
+ bool UsedAssumedInformation = false;
if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
- AllCallSitesKnown)) {
+ UsedAssumedInformation)) {
LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
return false;
}
@@ -2305,7 +2402,6 @@ bool Attributor::isValidFunctionSignatureRewrite(
// Forbid must-tail calls for now.
// TODO:
- bool UsedAssumedInformation = false;
auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
nullptr, {Instruction::Call},
@@ -2370,7 +2466,7 @@ bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
}
ChangeStatus Attributor::rewriteFunctionSignatures(
- SmallPtrSetImpl<Function *> &ModifiedFns) {
+ SmallSetVector<Function *, 8> &ModifiedFns) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
for (auto &It : ArgumentReplacementMap) {
@@ -2403,6 +2499,12 @@ ChangeStatus Attributor::rewriteFunctionSignatures(
}
}
+ uint64_t LargestVectorWidth = 0;
+ for (auto *I : NewArgumentTypes)
+ if (auto *VT = dyn_cast<llvm::VectorType>(I))
+ LargestVectorWidth = std::max(
+ LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize());
+
FunctionType *OldFnTy = OldFn->getFunctionType();
Type *RetTy = OldFnTy->getReturnType();
@@ -2432,6 +2534,7 @@ ChangeStatus Attributor::rewriteFunctionSignatures(
NewFn->setAttributes(AttributeList::get(
Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
NewArgumentAttributes));
+ AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
// Since we have now created the new function, splice the body of the old
// function right into the new function, leaving the old rotting hulk of the
@@ -2509,14 +2612,17 @@ ChangeStatus Attributor::rewriteFunctionSignatures(
Ctx, OldCallAttributeList.getFnAttrs(),
OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
+ AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
+ LargestVectorWidth);
+
CallSitePairs.push_back({OldCB, NewCB});
return true;
};
// Use the CallSiteReplacementCreator to create replacement call sites.
- bool AllCallSitesKnown;
+ bool UsedAssumedInformation = false;
bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
- true, nullptr, AllCallSitesKnown);
+ true, nullptr, UsedAssumedInformation);
(void)Success;
assert(Success && "Assumed call site replacement to succeed!");
@@ -2529,6 +2635,9 @@ ChangeStatus Attributor::rewriteFunctionSignatures(
ARIs[OldArgNum]) {
if (ARI->CalleeRepairCB)
ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
+ if (ARI->ReplacementTypes.empty())
+ OldFnArgIt->replaceAllUsesWith(
+ PoisonValue::get(OldFnArgIt->getType()));
NewFnArgIt += ARI->ReplacementTypes.size();
} else {
NewFnArgIt->takeName(&*OldFnArgIt);
@@ -2544,17 +2653,17 @@ ChangeStatus Attributor::rewriteFunctionSignatures(
assert(OldCB.getType() == NewCB.getType() &&
"Cannot handle call sites with different types!");
ModifiedFns.insert(OldCB.getFunction());
- CGUpdater.replaceCallSite(OldCB, NewCB);
+ Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
OldCB.replaceAllUsesWith(&NewCB);
OldCB.eraseFromParent();
}
// Replace the function in the call graph (if any).
- CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
+ Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
// If the old function was modified and needed to be reanalyzed, the new one
// does now.
- if (ModifiedFns.erase(OldFn))
+ if (ModifiedFns.remove(OldFn))
ModifiedFns.insert(NewFn);
Changed = ChangeStatus::CHANGED;
@@ -2574,6 +2683,30 @@ void InformationCache::initializeInformationCache(const Function &CF,
// queried by abstract attributes during their initialization or update.
// This has to happen before we create attributes.
+ DenseMap<const Value *, Optional<short>> AssumeUsesMap;
+
+ // Add \p V to the assume uses map which track the number of uses outside of
+ // "visited" assumes. If no outside uses are left the value is added to the
+ // assume only use vector.
+ auto AddToAssumeUsesMap = [&](const Value &V) -> void {
+ SmallVector<const Instruction *> Worklist;
+ if (auto *I = dyn_cast<Instruction>(&V))
+ Worklist.push_back(I);
+ while (!Worklist.empty()) {
+ const Instruction *I = Worklist.pop_back_val();
+ Optional<short> &NumUses = AssumeUsesMap[I];
+ if (!NumUses)
+ NumUses = I->getNumUses();
+ NumUses = NumUses.getValue() - /* this assume */ 1;
+ if (NumUses.getValue() != 0)
+ continue;
+ AssumeOnlyValues.insert(I);
+ for (const Value *Op : I->operands())
+ if (auto *OpI = dyn_cast<Instruction>(Op))
+ Worklist.push_back(OpI);
+ }
+ };
+
for (Instruction &I : instructions(&F)) {
bool IsInterestingOpcode = false;
@@ -2594,6 +2727,7 @@ void InformationCache::initializeInformationCache(const Function &CF,
// For `must-tail` calls we remember the caller and callee.
if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
fillMapFromAssume(*Assume, KnowledgeMap);
+ AddToAssumeUsesMap(*Assume->getArgOperand(0));
} else if (cast<CallInst>(I).isMustTailCall()) {
FI.ContainsMustTailCall = true;
if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
@@ -2742,7 +2876,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
getOrCreateAAFor<AAIsDead>(RetPos);
// Every function might be simplified.
- getOrCreateAAFor<AAValueSimplify>(RetPos);
+ bool UsedAssumedInformation = false;
+ getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation);
// Every returned value might be marked noundef.
getOrCreateAAFor<AANoUndef>(RetPos);
@@ -2834,7 +2969,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
IRPosition CBRetPos = IRPosition::callsite_returned(CB);
- getOrCreateAAFor<AAValueSimplify>(CBRetPos);
+ bool UsedAssumedInformation = false;
+ getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation);
}
for (int I = 0, E = CB.arg_size(); I < E; ++I) {
@@ -2897,10 +3033,15 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
getOrCreateAAFor<AAAlign>(
IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
if (SimplifyAllLoads)
- getOrCreateAAFor<AAValueSimplify>(IRPosition::value(I));
- } else
- getOrCreateAAFor<AAAlign>(
- IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
+ getAssumedSimplified(IRPosition::value(I), nullptr,
+ UsedAssumedInformation);
+ } else {
+ auto &SI = cast<StoreInst>(I);
+ getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
+ getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
+ UsedAssumedInformation);
+ getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
+ }
return true;
};
Success = checkForAllInstructionsImpl(
@@ -2975,8 +3116,8 @@ raw_ostream &llvm::operator<<(raw_ostream &OS,
if (!S.isValidState())
OS << "full-set";
else {
- for (auto &it : S.getAssumedSet())
- OS << it << ", ";
+ for (auto &It : S.getAssumedSet())
+ OS << It << ", ";
if (S.undefIsContained())
OS << "undef ";
}
@@ -3018,8 +3159,12 @@ raw_ostream &llvm::operator<<(raw_ostream &OS,
OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
if (Acc.getLocalInst() != Acc.getRemoteInst())
OS << " via " << *Acc.getLocalInst();
- if (Acc.getContent().hasValue())
- OS << " [" << *Acc.getContent() << "]";
+ if (Acc.getContent()) {
+ if (*Acc.getContent())
+ OS << " [" << **Acc.getContent() << "]";
+ else
+ OS << " [ <unknown> ]";
+ }
return OS;
}
///}
@@ -3032,7 +3177,7 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache,
SetVector<Function *> &Functions,
AnalysisGetter &AG,
CallGraphUpdater &CGUpdater,
- bool DeleteFns) {
+ bool DeleteFns, bool IsModulePass) {
if (Functions.empty())
return false;
@@ -3045,8 +3190,10 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache,
// Create an Attributor and initially empty information cache that is filled
// while we identify default attribute opportunities.
- Attributor A(Functions, InfoCache, CGUpdater, /* Allowed */ nullptr,
- DeleteFns);
+ AttributorConfig AC(CGUpdater);
+ AC.IsModulePass = IsModulePass;
+ AC.DeleteFns = DeleteFns;
+ Attributor A(Functions, InfoCache, AC);
// Create shallow wrappers for all functions that are not IPO amendable
if (AllowShallowWrappers)
@@ -3151,7 +3298,7 @@ PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
BumpPtrAllocator Allocator;
InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ true)) {
+ /* DeleteFns */ true, /* IsModulePass */ true)) {
// FIXME: Think about passes we will preserve and add them here.
return PreservedAnalyses::none();
}
@@ -3179,7 +3326,8 @@ PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
BumpPtrAllocator Allocator;
InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ false)) {
+ /* DeleteFns */ false,
+ /* IsModulePass */ false)) {
// FIXME: Think about passes we will preserve and add them here.
PreservedAnalyses PA;
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
@@ -3255,7 +3403,8 @@ struct AttributorLegacyPass : public ModulePass {
BumpPtrAllocator Allocator;
InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns*/ true);
+ /* DeleteFns*/ true,
+ /* IsModulePass */ true);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -3292,7 +3441,8 @@ struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
BumpPtrAllocator Allocator;
InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
- /* DeleteFns */ false);
+ /* DeleteFns */ false,
+ /* IsModulePass */ false);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {