summaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/IPO
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-07-24 15:03:44 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-07-24 15:03:44 +0000
commit4b4fe385e49bd883fd183b5f21c1ea486c722e61 (patch)
treec3d8fdb355c9c73e57723718c22103aaf7d15aa6 /llvm/lib/Transforms/IPO
parent1f917f69ff07f09b6dbb670971f57f8efe718b84 (diff)
Diffstat (limited to 'llvm/lib/Transforms/IPO')
-rw-r--r--llvm/lib/Transforms/IPO/Attributor.cpp278
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp2074
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp45
-rw-r--r--llvm/lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/IPO.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/Internalize.cpp36
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp42
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/PassManagerBuilder.cpp106
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp102
-rw-r--r--llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp70
11 files changed, 1492 insertions, 1269 deletions
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index e5ff98e4f73f..37c773bd47d6 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -326,7 +326,7 @@ static bool getPotentialCopiesOfMemoryValue(
<< " (only exact: " << OnlyExact << ")\n";);
Value &Ptr = *I.getPointerOperand();
- SmallVector<Value *, 8> Objects;
+ SmallSetVector<Value *, 8> Objects;
if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, QueryingAA, &I,
UsedAssumedInformation)) {
LLVM_DEBUG(
@@ -343,6 +343,7 @@ static bool getPotentialCopiesOfMemoryValue(
const auto *TLI =
A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
+ LLVM_DEBUG(dbgs() << "Visit " << Objects.size() << " objects:\n");
for (Value *Obj : Objects) {
LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
if (isa<UndefValue>(Obj))
@@ -352,8 +353,8 @@ static bool getPotentialCopiesOfMemoryValue(
// be OK. We do not try to optimize the latter.
if (!NullPointerIsDefined(I.getFunction(),
Ptr.getType()->getPointerAddressSpace()) &&
- A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation) ==
- Obj)
+ A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
+ AA::Interprocedural) == Obj)
continue;
LLVM_DEBUG(
dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
@@ -375,25 +376,37 @@ static bool getPotentialCopiesOfMemoryValue(
return false;
}
- if (IsLoad) {
- Value *InitialValue = AA::getInitialValueForObj(*Obj, *I.getType(), TLI);
- if (!InitialValue)
- return false;
- NewCopies.push_back(InitialValue);
- NewCopyOrigins.push_back(nullptr);
- }
+ bool NullOnly = true;
+ bool NullRequired = false;
+ auto CheckForNullOnlyAndUndef = [&](Optional<Value *> V, bool IsExact) {
+ if (!V || *V == nullptr)
+ NullOnly = false;
+ else if (isa<UndefValue>(*V))
+ /* No op */;
+ else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
+ NullRequired = !IsExact;
+ else
+ NullOnly = false;
+ };
auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
if ((IsLoad && !Acc.isWrite()) || (!IsLoad && !Acc.isRead()))
return true;
if (IsLoad && Acc.isWrittenValueYetUndetermined())
return true;
- if (OnlyExact && !IsExact &&
+ CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
+ if (OnlyExact && !IsExact && !NullOnly &&
!isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
<< ", abort!\n");
return false;
}
+ if (NullRequired && !NullOnly) {
+ LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
+ "one, however found non-null one: "
+ << *Acc.getRemoteInst() << ", abort!\n");
+ return false;
+ }
if (IsLoad) {
assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
if (!Acc.isWrittenValueUnknown()) {
@@ -424,15 +437,36 @@ static bool getPotentialCopiesOfMemoryValue(
return true;
};
+ // If the value has been written to we don't need the initial value of the
+ // object.
+ bool HasBeenWrittenTo = false;
+
auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(*Obj),
DepClassTy::NONE);
- if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess)) {
+ if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess,
+ HasBeenWrittenTo)) {
LLVM_DEBUG(
dbgs()
<< "Failed to verify all interfering accesses for underlying object: "
<< *Obj << "\n");
return false;
}
+
+ if (IsLoad && !HasBeenWrittenTo) {
+ Value *InitialValue = AA::getInitialValueForObj(*Obj, *I.getType(), TLI);
+ if (!InitialValue)
+ return false;
+ CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
+ if (NullRequired && !NullOnly) {
+ LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
+ "null or undef, abort!\n");
+ return false;
+ }
+
+ NewCopies.push_back(InitialValue);
+ NewCopyOrigins.push_back(nullptr);
+ }
+
PIs.push_back(&PI);
}
@@ -520,12 +554,21 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
<< " from " << FromI << " [GBCB: " << bool(GoBackwardsCB)
<< "]\n");
+ // TODO: If we can go arbitrarily backwards we will eventually reach an
+ // entry point that can reach ToI. Only once this takes a set of blocks
+ // through which we cannot go, or once we track internal functions not
+ // accessible from the outside, it makes sense to perform backwards analysis
+ // in the absence of a GoBackwardsCB.
+ if (!GoBackwardsCB) {
+ LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
+ << " is not checked backwards, abort\n");
+ return true;
+ }
+
SmallPtrSet<const Instruction *, 8> Visited;
SmallVector<const Instruction *> Worklist;
Worklist.push_back(&FromI);
- const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
- QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
while (!Worklist.empty()) {
const Instruction *CurFromI = Worklist.pop_back_val();
if (!Visited.insert(CurFromI).second)
@@ -545,26 +588,13 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
<< *ToI << " [Intra]\n");
if (Result)
return true;
- if (NoRecurseAA.isAssumedNoRecurse())
- continue;
- }
-
- // TODO: If we can go arbitrarily backwards we will eventually reach an
- // entry point that can reach ToI. Only once this takes a set of blocks
- // through which we cannot go, or once we track internal functions not
- // accessible from the outside, it makes sense to perform backwards analysis
- // in the absence of a GoBackwardsCB.
- if (!GoBackwardsCB) {
- LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from "
- << *CurFromI << " is not checked backwards, abort\n");
- return true;
}
// Check if the current instruction is already known to reach the ToFn.
const auto &FnReachabilityAA = A.getAAFor<AAFunctionReachability>(
QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
bool Result = FnReachabilityAA.instructionCanReach(
- A, *CurFromI, ToFn, /* UseBackwards */ false);
+ A, *CurFromI, ToFn);
LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
<< " " << (Result ? "can potentially " : "cannot ")
<< "reach @" << ToFn.getName() << " [FromFn]\n");
@@ -1038,60 +1068,74 @@ Attributor::getAssumedConstant(const IRPosition &IRP,
}
if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
return C;
- const auto &ValueSimplifyAA =
- getAAFor<AAValueSimplify>(AA, IRP, DepClassTy::NONE);
- Optional<Value *> SimplifiedV =
- ValueSimplifyAA.getAssumedSimplifiedValue(*this);
- bool IsKnown = ValueSimplifyAA.isAtFixpoint();
- UsedAssumedInformation |= !IsKnown;
- if (!SimplifiedV) {
- recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
- return llvm::None;
- }
- if (isa_and_nonnull<UndefValue>(SimplifiedV.value())) {
- recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
- return UndefValue::get(IRP.getAssociatedType());
+ SmallVector<AA::ValueAndContext> Values;
+ if (getAssumedSimplifiedValues(IRP, &AA, Values,
+ AA::ValueScope::Interprocedural,
+ UsedAssumedInformation)) {
+ if (Values.empty())
+ return llvm::None;
+ if (auto *C = dyn_cast_or_null<Constant>(
+ AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
+ return C;
}
- Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.value());
- if (CI)
- CI = dyn_cast_or_null<Constant>(
- AA::getWithType(*CI, *IRP.getAssociatedType()));
- if (CI)
- recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
- return CI;
+ return nullptr;
}
-Optional<Value *>
-Attributor::getAssumedSimplified(const IRPosition &IRP,
- const AbstractAttribute *AA,
- bool &UsedAssumedInformation) {
+Optional<Value *> Attributor::getAssumedSimplified(const IRPosition &IRP,
+ const AbstractAttribute *AA,
+ bool &UsedAssumedInformation,
+ AA::ValueScope S) {
// First check all callbacks provided by outside AAs. If any of them returns
// a non-null value that is different from the associated value, or None, we
// assume it's simplified.
for (auto &CB : SimplificationCallbacks.lookup(IRP))
return CB(IRP, AA, UsedAssumedInformation);
- // If no high-level/outside simplification occurred, use AAValueSimplify.
- const auto &ValueSimplifyAA =
- getOrCreateAAFor<AAValueSimplify>(IRP, AA, DepClassTy::NONE);
- Optional<Value *> SimplifiedV =
- ValueSimplifyAA.getAssumedSimplifiedValue(*this);
- bool IsKnown = ValueSimplifyAA.isAtFixpoint();
- UsedAssumedInformation |= !IsKnown;
- if (!SimplifiedV) {
- if (AA)
- recordDependence(ValueSimplifyAA, *AA, DepClassTy::OPTIONAL);
+ SmallVector<AA::ValueAndContext> Values;
+ if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
+ return &IRP.getAssociatedValue();
+ if (Values.empty())
return llvm::None;
+ if (AA)
+ if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
+ return V;
+ if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
+ IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
+ return nullptr;
+ return &IRP.getAssociatedValue();
+}
+
+bool Attributor::getAssumedSimplifiedValues(
+ const IRPosition &IRP, const AbstractAttribute *AA,
+ SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
+ bool &UsedAssumedInformation) {
+ // First check all callbacks provided by outside AAs. If any of them returns
+ // a non-null value that is different from the associated value, or None, we
+ // assume it's simplified.
+ const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
+ for (auto &CB : SimplificationCBs) {
+ Optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
+ if (!CBResult.has_value())
+ continue;
+ Value *V = CBResult.value();
+ if (!V)
+ return false;
+ if ((S & AA::ValueScope::Interprocedural) ||
+ AA::isValidInScope(*V, IRP.getAnchorScope()))
+ Values.push_back(AA::ValueAndContext{*V, nullptr});
+ else
+ return false;
}
- if (*SimplifiedV == nullptr)
- return const_cast<Value *>(&IRP.getAssociatedValue());
- if (Value *SimpleV =
- AA::getWithType(**SimplifiedV, *IRP.getAssociatedType())) {
- if (AA)
- recordDependence(ValueSimplifyAA, *AA, DepClassTy::OPTIONAL);
- return SimpleV;
- }
- return const_cast<Value *>(&IRP.getAssociatedValue());
+ if (!SimplificationCBs.empty())
+ return true;
+
+ // If no high-level/outside simplification occurred, use AAPotentialValues.
+ const auto &PotentialValuesAA =
+ getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
+ if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
+ return false;
+ UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
+ return true;
}
Optional<Value *> Attributor::translateArgumentToCallSiteContent(
@@ -1106,7 +1150,7 @@ Optional<Value *> Attributor::translateArgumentToCallSiteContent(
if (!Arg->hasPointeeInMemoryValueAttr())
return getAssumedSimplified(
IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
- UsedAssumedInformation);
+ UsedAssumedInformation, AA::Intraprocedural);
return nullptr;
}
@@ -1295,8 +1339,21 @@ bool Attributor::checkForAllUses(
SmallVector<const Use *, 16> Worklist;
SmallPtrSet<const Use *, 16> Visited;
- for (const Use &U : V.uses())
- Worklist.push_back(&U);
+ auto AddUsers = [&](const Value &V, const Use *OldUse) {
+ for (const Use &UU : V.uses()) {
+ if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
+ LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
+ "rejected by the equivalence call back: "
+ << *UU << "!\n");
+ return false;
+ }
+
+ Worklist.push_back(&UU);
+ }
+ return true;
+ };
+
+ AddUsers(V, /* OldUse */ nullptr);
LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
<< " initial uses to check\n");
@@ -1342,15 +1399,8 @@ bool Attributor::checkForAllUses(
<< PotentialCopies.size()
<< " potential copies instead!\n");
for (Value *PotentialCopy : PotentialCopies)
- for (const Use &CopyUse : PotentialCopy->uses()) {
- if (EquivalentUseCB && !EquivalentUseCB(*U, CopyUse)) {
- LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
- "rejected by the equivalence call back: "
- << *CopyUse << "!\n");
- return false;
- }
- Worklist.push_back(&CopyUse);
- }
+ if (!AddUsers(*PotentialCopy, U))
+ return false;
continue;
}
}
@@ -1361,8 +1411,25 @@ bool Attributor::checkForAllUses(
return false;
if (!Follow)
continue;
- for (const Use &UU : U->getUser()->uses())
- Worklist.push_back(&UU);
+
+ User &Usr = *U->getUser();
+ AddUsers(Usr, /* OldUse */ nullptr);
+
+ auto *RI = dyn_cast<ReturnInst>(&Usr);
+ if (!RI)
+ continue;
+
+ Function &F = *RI->getFunction();
+ auto CallSitePred = [&](AbstractCallSite ACS) {
+ return AddUsers(*ACS.getInstruction(), U);
+ };
+ if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
+ &QueryingAA, UsedAssumedInformation)) {
+ LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
+ "to all call sites: "
+ << *RI << "\n");
+ return false;
+ }
}
return true;
@@ -1918,7 +1985,8 @@ ChangeStatus Attributor::cleanupIR() {
<< ToBeDeletedInsts.size() << " instructions and "
<< ToBeChangedValues.size() << " values and "
<< ToBeChangedUses.size() << " uses. To insert "
- << ToBeChangedToUnreachableInsts.size() << " unreachables."
+ << ToBeChangedToUnreachableInsts.size()
+ << " unreachables.\n"
<< "Preserve manifest added " << ManifestAddedBlocks.size()
<< " blocks\n");
@@ -2046,6 +2114,8 @@ ChangeStatus Attributor::cleanupIR() {
}
for (auto &V : ToBeChangedToUnreachableInsts)
if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
+ LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
+ << "\n");
assert(isRunOn(*I->getFunction()) &&
"Cannot replace an instruction outside the current SCC!");
CGModifiedFunctions.insert(I->getFunction());
@@ -2877,7 +2947,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
// Every function might be simplified.
bool UsedAssumedInformation = false;
- getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation);
+ getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
+ AA::Intraprocedural);
// Every returned value might be marked noundef.
getOrCreateAAFor<AANoUndef>(RetPos);
@@ -2906,7 +2977,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
// interface though as outside AAs can register custom simplification
// callbacks.
bool UsedAssumedInformation = false;
- getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation);
+ getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
+ AA::Intraprocedural);
// Every argument might be dead.
getOrCreateAAFor<AAIsDead>(ArgPos);
@@ -2970,7 +3042,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
IRPosition CBRetPos = IRPosition::callsite_returned(CB);
bool UsedAssumedInformation = false;
- getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation);
+ getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
+ AA::Intraprocedural);
}
for (int I = 0, E = CB.arg_size(); I < E; ++I) {
@@ -2984,7 +3057,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
// Attributor interface though as outside AAs can register custom
// simplification callbacks.
bool UsedAssumedInformation = false;
- getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation);
+ getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
+ AA::Intraprocedural);
// Every call site argument might be marked "noundef".
getOrCreateAAFor<AANoUndef>(CBArgPos);
@@ -3034,12 +3108,12 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
if (SimplifyAllLoads)
getAssumedSimplified(IRPosition::value(I), nullptr,
- UsedAssumedInformation);
+ UsedAssumedInformation, AA::Intraprocedural);
} else {
auto &SI = cast<StoreInst>(I);
getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
- UsedAssumedInformation);
+ UsedAssumedInformation, AA::Intraprocedural);
getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
}
return true;
@@ -3126,6 +3200,26 @@ raw_ostream &llvm::operator<<(raw_ostream &OS,
return OS;
}
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const PotentialLLVMValuesState &S) {
+ OS << "set-state(< {";
+ if (!S.isValidState())
+ OS << "full-set";
+ else {
+ for (auto &It : S.getAssumedSet()) {
+ if (auto *F = dyn_cast<Function>(It.first.getValue()))
+ OS << "@" << F->getName() << "[" << int(It.second) << "], ";
+ else
+ OS << *It.first.getValue() << "[" << int(It.second) << "], ";
+ }
+ if (S.undefIsContained())
+ OS << "undef ";
+ }
+ OS << "} >)";
+
+ return OS;
+}
+
void AbstractAttribute::print(raw_ostream &OS) const {
OS << "[";
OS << getName();
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 1ff54b78e27e..660ff3ee9563 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -14,12 +14,14 @@
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
@@ -35,11 +37,13 @@
#include "llvm/IR/Argument.h"
#include "llvm/IR/Assumptions.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
@@ -72,6 +76,8 @@ static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
template <>
unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
+template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
+
static cl::opt<unsigned, true> MaxPotentialValues(
"attributor-max-potential-values", cl::Hidden,
cl::desc("Maximum number of potential values to be "
@@ -79,6 +85,12 @@ static cl::opt<unsigned, true> MaxPotentialValues(
cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
cl::init(7));
+static cl::opt<int> MaxPotentialValuesIterations(
+ "attributor-max-potential-values-iterations", cl::Hidden,
+ cl::desc(
+ "Maximum number of iterations we keep dismantling potential values."),
+ cl::init(64));
+
static cl::opt<unsigned> MaxInterferingAccesses(
"attributor-max-interfering-accesses", cl::Hidden,
cl::desc("Maximum number of interfering accesses to "
@@ -162,6 +174,7 @@ PIPE_OPERATOR(AAValueConstantRange)
PIPE_OPERATOR(AAPrivatizablePtr)
PIPE_OPERATOR(AAUndefinedBehavior)
PIPE_OPERATOR(AAPotentialConstantValues)
+PIPE_OPERATOR(AAPotentialValues)
PIPE_OPERATOR(AANoUndef)
PIPE_OPERATOR(AACallEdges)
PIPE_OPERATOR(AAFunctionReachability)
@@ -293,228 +306,35 @@ static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
return Ptr;
}
-/// Recursively visit all values that might become \p IRP at some point. This
-/// will be done by looking through cast instructions, selects, phis, and calls
-/// with the "returned" attribute. Once we cannot look through the value any
-/// further, the callback \p VisitValueCB is invoked and passed the current
-/// value, the \p State, and a flag to indicate if we stripped anything.
-/// Stripped means that we unpacked the value associated with \p IRP at least
-/// once. Note that the value used for the callback may still be the value
-/// associated with \p IRP (due to PHIs). To limit how much effort is invested,
-/// we will never visit more values than specified by \p MaxValues.
-/// If \p VS does not contain the Interprocedural bit, only values valid in the
-/// scope of \p CtxI will be visited and simplification into other scopes is
-/// prevented.
-template <typename StateTy>
-static bool genericValueTraversal(
- Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
- StateTy &State,
- function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
- VisitValueCB,
- const Instruction *CtxI, bool &UsedAssumedInformation,
- bool UseValueSimplify = true, int MaxValues = 16,
- function_ref<Value *(Value *)> StripCB = nullptr,
- AA::ValueScope VS = AA::Interprocedural) {
-
- struct LivenessInfo {
- const AAIsDead *LivenessAA = nullptr;
- bool AnyDead = false;
- };
- SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
- auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
- LivenessInfo &LI = LivenessAAs[&F];
- if (!LI.LivenessAA)
- LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
- DepClassTy::NONE);
- return LI;
- };
-
- Value *InitialV = &IRP.getAssociatedValue();
- using Item = std::pair<Value *, const Instruction *>;
- SmallSet<Item, 16> Visited;
- SmallVector<Item, 16> Worklist;
- Worklist.push_back({InitialV, CtxI});
-
- int Iteration = 0;
- do {
- Item I = Worklist.pop_back_val();
- Value *V = I.first;
- CtxI = I.second;
- if (StripCB)
- V = StripCB(V);
-
- // Check if we should process the current value. To prevent endless
- // recursion keep a record of the values we followed!
- if (!Visited.insert(I).second)
- continue;
-
- // Make sure we limit the compile time for complex expressions.
- if (Iteration++ >= MaxValues) {
- LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
- << Iteration << "!\n");
- return false;
- }
-
- // Explicitly look through calls with a "returned" attribute if we do
- // not have a pointer as stripPointerCasts only works on them.
- Value *NewV = nullptr;
- if (V->getType()->isPointerTy()) {
- NewV = V->stripPointerCasts();
- } else {
- auto *CB = dyn_cast<CallBase>(V);
- if (CB && CB->getCalledFunction()) {
- for (Argument &Arg : CB->getCalledFunction()->args())
- if (Arg.hasReturnedAttr()) {
- NewV = CB->getArgOperand(Arg.getArgNo());
- break;
- }
- }
- }
- if (NewV && NewV != V) {
- Worklist.push_back({NewV, CtxI});
- continue;
- }
-
- // Look through select instructions, visit assumed potential values.
- if (auto *SI = dyn_cast<SelectInst>(V)) {
- Optional<Constant *> C = A.getAssumedConstant(
- *SI->getCondition(), QueryingAA, UsedAssumedInformation);
- bool NoValueYet = !C;
- if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
- continue;
- if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
- if (CI->isZero())
- Worklist.push_back({SI->getFalseValue(), CtxI});
- else
- Worklist.push_back({SI->getTrueValue(), CtxI});
- continue;
- }
- // We could not simplify the condition, assume both values.(
- Worklist.push_back({SI->getTrueValue(), CtxI});
- Worklist.push_back({SI->getFalseValue(), CtxI});
- continue;
- }
-
- // Look through phi nodes, visit all live operands.
- if (auto *PHI = dyn_cast<PHINode>(V)) {
- LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
- for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
- BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
- if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
- LI.AnyDead = true;
- UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
- continue;
- }
- Worklist.push_back(
- {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
- }
- continue;
- }
-
- if (auto *Arg = dyn_cast<Argument>(V)) {
- if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
- SmallVector<Item> CallSiteValues;
- bool UsedAssumedInformation = false;
- if (A.checkForAllCallSites(
- [&](AbstractCallSite ACS) {
- // Callbacks might not have a corresponding call site operand,
- // stick with the argument in that case.
- Value *CSOp = ACS.getCallArgOperand(*Arg);
- if (!CSOp)
- return false;
- CallSiteValues.push_back({CSOp, ACS.getInstruction()});
- return true;
- },
- *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
- Worklist.append(CallSiteValues);
- continue;
- }
- }
- }
-
- if (UseValueSimplify && !isa<Constant>(V)) {
- Optional<Value *> SimpleV =
- A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
- if (!SimpleV)
- continue;
- Value *NewV = SimpleV.value();
- if (NewV && NewV != V) {
- if ((VS & AA::Interprocedural) || !CtxI ||
- AA::isValidInScope(*NewV, CtxI->getFunction())) {
- Worklist.push_back({NewV, CtxI});
- continue;
- }
- }
- }
-
- if (auto *LI = dyn_cast<LoadInst>(V)) {
- bool UsedAssumedInformation = false;
- // If we ask for the potentially loaded values from the initial pointer we
- // will simply end up here again. The load is as far as we can make it.
- if (LI->getPointerOperand() != InitialV) {
- SmallSetVector<Value *, 4> PotentialCopies;
- SmallSetVector<Instruction *, 4> PotentialValueOrigins;
- if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
- PotentialValueOrigins, QueryingAA,
- UsedAssumedInformation,
- /* OnlyExact */ true)) {
- // Values have to be dynamically unique or we loose the fact that a
- // single llvm::Value might represent two runtime values (e.g., stack
- // locations in different recursive calls).
- bool DynamicallyUnique =
- llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
- return AA::isDynamicallyUnique(A, QueryingAA, *PC);
- });
- if (DynamicallyUnique &&
- ((VS & AA::Interprocedural) || !CtxI ||
- llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
- return AA::isValidInScope(*PC, CtxI->getFunction());
- }))) {
- for (auto *PotentialCopy : PotentialCopies)
- Worklist.push_back({PotentialCopy, CtxI});
- continue;
- }
- }
- }
- }
-
- // Once a leaf is reached we inform the user through the callback.
- if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
- LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
- << *V << "!\n");
- return false;
- }
- } while (!Worklist.empty());
-
- // If we actually used liveness information so we have to record a dependence.
- for (auto &It : LivenessAAs)
- if (It.second.AnyDead)
- A.recordDependence(*It.second.LivenessAA, QueryingAA,
- DepClassTy::OPTIONAL);
-
- // All values have been visited.
- return true;
-}
-
bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
- SmallVectorImpl<Value *> &Objects,
+ SmallSetVector<Value *, 8> &Objects,
const AbstractAttribute &QueryingAA,
const Instruction *CtxI,
bool &UsedAssumedInformation,
- AA::ValueScope VS) {
- auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
- SmallPtrSet<Value *, 8> SeenObjects;
- auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
- SmallVectorImpl<Value *> &Objects,
- bool) -> bool {
- if (SeenObjects.insert(&Val).second)
- Objects.push_back(&Val);
+ AA::ValueScope S,
+ SmallPtrSetImpl<Value *> *SeenObjects) {
+ SmallPtrSet<Value *, 8> LocalSeenObjects;
+ if (!SeenObjects)
+ SeenObjects = &LocalSeenObjects;
+
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values,
+ S, UsedAssumedInformation)) {
+ Objects.insert(const_cast<Value *>(&Ptr));
return true;
- };
- if (!genericValueTraversal<decltype(Objects)>(
- A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
- UsedAssumedInformation, true, 32, StripCB, VS))
- return false;
+ }
+
+ for (auto &VAC : Values) {
+ Value *UO = getUnderlyingObject(VAC.getValue());
+ if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) {
+ if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA,
+ VAC.getCtxI(), UsedAssumedInformation, S,
+ SeenObjects))
+ return false;
+ continue;
+ }
+ Objects.insert(VAC.getValue());
+ }
return true;
}
@@ -1122,9 +942,6 @@ struct AAPointerInfoImpl
using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
- /// See AbstractAttribute::initialize(...).
- void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
-
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
return std::string("PointerInfo ") +
@@ -1144,9 +961,14 @@ struct AAPointerInfoImpl
const override {
return State::forallInterferingAccesses(OAS, CB);
}
- bool forallInterferingAccesses(
- Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
- function_ref<bool(const Access &, bool)> UserCB) const override {
+
+ bool
+ forallInterferingAccesses(Attributor &A, const AbstractAttribute &QueryingAA,
+ Instruction &I,
+ function_ref<bool(const Access &, bool)> UserCB,
+ bool &HasBeenWrittenTo) const override {
+ HasBeenWrittenTo = false;
+
SmallPtrSet<const Access *, 8> DominatingWrites;
SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
@@ -1182,14 +1004,12 @@ struct AAPointerInfoImpl
const bool FindInterferingWrites = I.mayReadFromMemory();
const bool FindInterferingReads = I.mayWriteToMemory();
- const bool UseDominanceReasoning = FindInterferingWrites;
+ const bool UseDominanceReasoning =
+ FindInterferingWrites && NoRecurseAA.isKnownNoRecurse();
const bool CanUseCFGResoning = CanIgnoreThreading(I);
InformationCache &InfoCache = A.getInfoCache();
const DominatorTree *DT =
- NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
- ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
- Scope)
- : nullptr;
+ InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(Scope);
enum GPUAddressSpace : unsigned {
Generic = 0,
@@ -1246,22 +1066,17 @@ struct AAPointerInfoImpl
(!FindInterferingReads || !Acc.isRead()))
return true;
+ bool Dominates = DT && Exact && Acc.isMustAccess() &&
+ (Acc.getLocalInst()->getFunction() == &Scope) &&
+ DT->dominates(Acc.getRemoteInst(), &I);
+ if (FindInterferingWrites && Dominates)
+ HasBeenWrittenTo = true;
+
// For now we only filter accesses based on CFG reasoning which does not
// work yet if we have threading effects, or the access is complicated.
- if (CanUseCFGResoning) {
- if ((!Acc.isWrite() ||
- !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
- IsLiveInCalleeCB)) &&
- (!Acc.isRead() ||
- !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
- IsLiveInCalleeCB)))
- return true;
- if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
- IsSameThreadAsLoad(Acc)) {
- if (DT->dominates(Acc.getLocalInst(), &I))
- DominatingWrites.insert(&Acc);
- }
- }
+ if (CanUseCFGResoning && Dominates && UseDominanceReasoning &&
+ IsSameThreadAsLoad(Acc))
+ DominatingWrites.insert(&Acc);
InterferingAccesses.push_back({&Acc, Exact});
return true;
@@ -1269,19 +1084,27 @@ struct AAPointerInfoImpl
if (!State::forallInterferingAccesses(I, AccessCB))
return false;
- // If we cannot use CFG reasoning we only filter the non-write accesses
- // and are done here.
- if (!CanUseCFGResoning) {
- for (auto &It : InterferingAccesses)
- if (!UserCB(*It.first, It.second))
- return false;
- return true;
+ if (HasBeenWrittenTo) {
+ const Function *ScopePtr = &Scope;
+ IsLiveInCalleeCB = [ScopePtr](const Function &Fn) {
+ return ScopePtr != &Fn;
+ };
}
// Helper to determine if we can skip a specific write access. This is in
// the worst case quadratic as we are looking for another write that will
// hide the effect of this one.
auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
+ if ((!Acc.isWrite() ||
+ !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
+ IsLiveInCalleeCB)) &&
+ (!Acc.isRead() ||
+ !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
+ IsLiveInCalleeCB)))
+ return true;
+
+ if (!DT || !UseDominanceReasoning)
+ return false;
if (!IsSameThreadAsLoad(Acc))
return false;
if (!DominatingWrites.count(&Acc))
@@ -1303,7 +1126,7 @@ struct AAPointerInfoImpl
// succeeded for all or not.
unsigned NumInterferingAccesses = InterferingAccesses.size();
for (auto &It : InterferingAccesses) {
- if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
+ if (NumInterferingAccesses > MaxInterferingAccesses ||
!CanSkipAccess(*It.first, It.second)) {
if (!UserCB(*It.first, It.second))
return false;
@@ -1339,8 +1162,9 @@ struct AAPointerInfoImpl
if (FromCallee) {
Content = A.translateArgumentToCallSiteContent(
RAcc.getContent(), CB, *this, UsedAssumedInformation);
- AK = AccessKind(
- AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
+ AK =
+ AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
+ AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
}
Changed =
Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
@@ -1353,6 +1177,27 @@ struct AAPointerInfoImpl
/// Statistic tracking for all AAPointerInfo implementations.
/// See AbstractAttribute::trackStatistics().
void trackPointerInfoStatistics(const IRPosition &IRP) const {}
+
+ /// Dump the state into \p O.
+ void dumpState(raw_ostream &O) {
+ for (auto &It : AccessBins) {
+ O << "[" << It.first.getOffset() << "-"
+ << It.first.getOffset() + It.first.getSize()
+ << "] : " << It.getSecond()->size() << "\n";
+ for (auto &Acc : *It.getSecond()) {
+ O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
+ if (Acc.getLocalInst() != Acc.getRemoteInst())
+ O << " --> " << *Acc.getRemoteInst()
+ << "\n";
+ if (!Acc.isWrittenValueYetUndetermined()) {
+ if (Acc.getWrittenValue())
+ O << " - c: " << *Acc.getWrittenValue() << "\n";
+ else
+ O << " - c: <unknown>\n";
+ }
+ }
+ }
+ }
};
struct AAPointerInfoFloating : public AAPointerInfoImpl {
@@ -1360,9 +1205,6 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
: AAPointerInfoImpl(IRP, A) {}
- /// See AbstractAttribute::initialize(...).
- void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
-
/// Deal with an access and signal if it was handled successfully.
bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
Optional<Value *> Content, AccessKind Kind, int64_t Offset,
@@ -1460,7 +1302,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
Follow = true;
return true;
}
- if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
+ if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
// For PHIs we need to take care of the recurrence explicitly as the value
@@ -1469,6 +1311,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
if (isa<PHINode>(Usr)) {
// Note the order here, the Usr access might change the map, CurPtr is
// already in it though.
+ bool IsFirstPHIUser = !OffsetInfoMap.count(Usr);
OffsetInfo &UsrOI = OffsetInfoMap[Usr];
OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
// Check if the PHI is invariant (so far).
@@ -1484,52 +1327,69 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
}
// Check if the PHI operand is not dependent on the PHI itself.
- // TODO: This is not great as we look at the pointer type. However, it
- // is unclear where the Offset size comes from with typeless pointers.
APInt Offset(
DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
0);
- if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
- DL, Offset, /* AllowNonInbounds */ true)) {
- if (Offset != PtrOI.Offset) {
- LLVM_DEBUG(dbgs()
- << "[AAPointerInfo] PHI operand pointer offset mismatch "
- << *CurPtr << " in " << *Usr << "\n");
- return false;
- }
- return HandlePassthroughUser(Usr, PtrOI, Follow);
+ Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true);
+ auto It = OffsetInfoMap.find(CurPtrBase);
+ if (It != OffsetInfoMap.end()) {
+ Offset += It->getSecond().Offset;
+ if (IsFirstPHIUser || Offset == UsrOI.Offset)
+ return HandlePassthroughUser(Usr, PtrOI, Follow);
+ LLVM_DEBUG(dbgs()
+ << "[AAPointerInfo] PHI operand pointer offset mismatch "
+ << *CurPtr << " in " << *Usr << "\n");
+ } else {
+ LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
+ << *CurPtr << " in " << *Usr << "\n");
}
// TODO: Approximate in case we know the direction of the recurrence.
- LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
- << *CurPtr << " in " << *Usr << "\n");
UsrOI = PtrOI;
UsrOI.Offset = OffsetAndSize::Unknown;
Follow = true;
return true;
}
- if (auto *LoadI = dyn_cast<LoadInst>(Usr))
- return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
- AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
- Changed, LoadI->getType());
+ if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
+ // If the access is to a pointer that may or may not be the associated
+ // value, e.g. due to a PHI, we cannot assume it will be read.
+ AccessKind AK = AccessKind::AK_R;
+ if (getUnderlyingObject(CurPtr) == &AssociatedValue)
+ AK = AccessKind(AK | AccessKind::AK_MUST);
+ else
+ AK = AccessKind(AK | AccessKind::AK_MAY);
+ return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, AK,
+ OffsetInfoMap[CurPtr].Offset, Changed,
+ LoadI->getType());
+ }
+
if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
if (StoreI->getValueOperand() == CurPtr) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
<< *StoreI << "\n");
return false;
}
+ // If the access is to a pointer that may or may not be the associated
+ // value, e.g. due to a PHI, we cannot assume it will be written.
+ AccessKind AK = AccessKind::AK_W;
+ if (getUnderlyingObject(CurPtr) == &AssociatedValue)
+ AK = AccessKind(AK | AccessKind::AK_MUST);
+ else
+ AK = AccessKind(AK | AccessKind::AK_MAY);
bool UsedAssumedInformation = false;
- Optional<Value *> Content = A.getAssumedSimplified(
- *StoreI->getValueOperand(), *this, UsedAssumedInformation);
- return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
+ Optional<Value *> Content =
+ A.getAssumedSimplified(*StoreI->getValueOperand(), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ return handleAccess(A, *StoreI, *CurPtr, Content, AK,
OffsetInfoMap[CurPtr].Offset, Changed,
StoreI->getValueOperand()->getType());
}
if (auto *CB = dyn_cast<CallBase>(Usr)) {
if (CB->isLifetimeStartOrEnd())
return true;
- if (TLI && isFreeCall(CB, TLI))
+ if (getFreedOperand(CB, TLI) == U)
return true;
if (CB->isArgOperand(&U)) {
unsigned ArgNo = CB->getArgOperandNo(&U);
@@ -1539,7 +1399,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
Changed = translateAndAddState(A, CSArgPI,
OffsetInfoMap[CurPtr].Offset, *CB) |
Changed;
- return true;
+ return isValidState();
}
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
<< "\n");
@@ -1551,36 +1411,30 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
return false;
};
auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
- if (OffsetInfoMap.count(NewU))
+ if (OffsetInfoMap.count(NewU)) {
+ LLVM_DEBUG({
+ if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
+ dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
+ << OffsetInfoMap[NewU].Offset << " vs "
+ << OffsetInfoMap[OldU].Offset << "\n";
+ }
+ });
return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
+ }
OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
return true;
};
if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
/* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
- /* IgnoreDroppableUses */ true, EquivalentUseCB))
+ /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
+ LLVM_DEBUG(
+ dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
return indicatePessimisticFixpoint();
+ }
LLVM_DEBUG({
dbgs() << "Accesses by bin after update:\n";
- for (auto &It : AccessBins) {
- dbgs() << "[" << It.first.getOffset() << "-"
- << It.first.getOffset() + It.first.getSize()
- << "] : " << It.getSecond()->size() << "\n";
- for (auto &Acc : *It.getSecond()) {
- dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()
- << "\n";
- if (Acc.getLocalInst() != Acc.getRemoteInst())
- dbgs() << " --> "
- << *Acc.getRemoteInst() << "\n";
- if (!Acc.isWrittenValueYetUndetermined()) {
- if (Acc.getWrittenValue())
- dbgs() << " - c: " << *Acc.getWrittenValue() << "\n";
- else
- dbgs() << " - c: <unknown>\n";
- }
- }
- }
+ dumpState(dbgs());
});
return Changed;
@@ -1643,16 +1497,22 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
unsigned ArgNo = getIRPosition().getCallSiteArgNo();
ChangeStatus Changed = ChangeStatus::UNCHANGED;
if (ArgNo == 0) {
- handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
- nullptr, LengthVal);
+ handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_WRITE, 0,
+ Changed, nullptr, LengthVal);
} else if (ArgNo == 1) {
- handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
+ handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_READ, 0, Changed,
nullptr, LengthVal);
} else {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
<< *MI << "\n");
return indicatePessimisticFixpoint();
}
+
+ LLVM_DEBUG({
+ dbgs() << "Accesses by bin after update:\n";
+ dumpState(dbgs());
+ });
+
return Changed;
}
@@ -1954,23 +1814,23 @@ bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
- auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
- bool) -> bool {
- assert(AA::isValidInScope(V, Ret.getFunction()) &&
- "Assumed returned value should be valid in function scope!");
- if (ReturnedValues[&V].insert(&Ret))
- Changed = ChangeStatus::CHANGED;
- return true;
- };
-
+ SmallVector<AA::ValueAndContext> Values;
bool UsedAssumedInformation = false;
auto ReturnInstCB = [&](Instruction &I) {
ReturnInst &Ret = cast<ReturnInst>(I);
- return genericValueTraversal<ReturnInst>(
- A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
- &I, UsedAssumedInformation, /* UseValueSimplify */ true,
- /* MaxValues */ 16,
- /* StripCB */ nullptr, AA::Intraprocedural);
+ Values.clear();
+ if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()),
+ *this, Values, AA::Intraprocedural,
+ UsedAssumedInformation))
+ Values.push_back({*Ret.getReturnValue(), Ret});
+
+ for (auto &VAC : Values) {
+ assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&
+ "Assumed returned value should be valid in function scope!");
+ if (ReturnedValues[VAC.getValue()].insert(&Ret))
+ Changed = ChangeStatus::CHANGED;
+ }
+ return true;
};
// Discover returned values from all live returned instructions in the
@@ -2472,6 +2332,18 @@ struct AANonNullFloating : public AANonNullImpl {
ChangeStatus updateImpl(Attributor &A) override {
const DataLayout &DL = A.getDataLayout();
+ bool Stripped;
+ bool UsedAssumedInformation = false;
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
+ AA::AnyScope, UsedAssumedInformation)) {
+ Values.push_back({getAssociatedValue(), getCtxI()});
+ Stripped = false;
+ } else {
+ Stripped = Values.size() != 1 ||
+ Values.front().getValue() != &getAssociatedValue();
+ }
+
DominatorTree *DT = nullptr;
AssumptionCache *AC = nullptr;
InformationCache &InfoCache = A.getInfoCache();
@@ -2480,8 +2352,8 @@ struct AANonNullFloating : public AANonNullImpl {
AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
}
- auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
- AANonNull::StateType &T, bool Stripped) -> bool {
+ AANonNull::StateType T;
+ auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
DepClassTy::REQUIRED);
if (!Stripped && this == &AA) {
@@ -2495,12 +2367,9 @@ struct AANonNullFloating : public AANonNullImpl {
return T.isValidState();
};
- StateType T;
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation))
- return indicatePessimisticFixpoint();
+ for (const auto &VAC : Values)
+ if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
+ return indicatePessimisticFixpoint();
return clampStateAndIndicateChange(getState(), T);
}
@@ -2753,8 +2622,9 @@ struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
if (!NoUndefAA.isKnownNoUndef())
continue;
bool UsedAssumedInformation = false;
- Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
- IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
+ Optional<Value *> SimplifiedVal =
+ A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
+ UsedAssumedInformation, AA::Interprocedural);
if (UsedAssumedInformation)
continue;
if (SimplifiedVal && !SimplifiedVal.value())
@@ -2925,8 +2795,9 @@ private:
Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
Instruction *I) {
bool UsedAssumedInformation = false;
- Optional<Value *> SimplifiedV = A.getAssumedSimplified(
- IRPosition::value(*V), *this, UsedAssumedInformation);
+ Optional<Value *> SimplifiedV =
+ A.getAssumedSimplified(IRPosition::value(*V), *this,
+ UsedAssumedInformation, AA::Interprocedural);
if (!UsedAssumedInformation) {
// Don't depend on assumed values.
if (!SimplifiedV) {
@@ -3369,7 +3240,9 @@ struct AANoAliasCallSiteArgument final : AANoAliasImpl {
}
}
- if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
+ if (!AA::isPotentiallyReachable(
+ A, *UserI, *getCtxI(), *this,
+ [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
return true;
}
@@ -4364,10 +4237,23 @@ struct AADereferenceableFloating : AADereferenceableImpl {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
+
+ bool Stripped;
+ bool UsedAssumedInformation = false;
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
+ AA::AnyScope, UsedAssumedInformation)) {
+ Values.push_back({getAssociatedValue(), getCtxI()});
+ Stripped = false;
+ } else {
+ Stripped = Values.size() != 1 ||
+ Values.front().getValue() != &getAssociatedValue();
+ }
+
const DataLayout &DL = A.getDataLayout();
+ DerefState T;
- auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
- bool Stripped) -> bool {
+ auto VisitValueCB = [&](const Value &V) -> bool {
unsigned IdxWidth =
DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
APInt Offset(IdxWidth, 0);
@@ -4420,12 +4306,9 @@ struct AADereferenceableFloating : AADereferenceableImpl {
return T.isValidState();
};
- DerefState T;
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation))
- return indicatePessimisticFixpoint();
+ for (const auto &VAC : Values)
+ if (!VisitValueCB(*VAC.getValue()))
+ return indicatePessimisticFixpoint();
return clampStateAndIndicateChange(getState(), T);
}
@@ -4652,8 +4535,20 @@ struct AAAlignFloating : AAAlignImpl {
ChangeStatus updateImpl(Attributor &A) override {
const DataLayout &DL = A.getDataLayout();
- auto VisitValueCB = [&](Value &V, const Instruction *,
- AAAlign::StateType &T, bool Stripped) -> bool {
+ bool Stripped;
+ bool UsedAssumedInformation = false;
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
+ AA::AnyScope, UsedAssumedInformation)) {
+ Values.push_back({getAssociatedValue(), getCtxI()});
+ Stripped = false;
+ } else {
+ Stripped = Values.size() != 1 ||
+ Values.front().getValue() != &getAssociatedValue();
+ }
+
+ StateType T;
+ auto VisitValueCB = [&](Value &V) -> bool {
if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
return true;
const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
@@ -4686,15 +4581,13 @@ struct AAAlignFloating : AAAlignImpl {
return T.isValidState();
};
- StateType T;
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation))
- return indicatePessimisticFixpoint();
+ for (const auto &VAC : Values) {
+ if (!VisitValueCB(*VAC.getValue()))
+ return indicatePessimisticFixpoint();
+ }
- // TODO: If we know we visited all incoming values, thus no are assumed
- // dead, we can take the known information from the state T.
+ // TODO: If we know we visited all incoming values, thus no are assumed
+ // dead, we can take the known information from the state T.
return clampStateAndIndicateChange(getState(), T);
}
@@ -4941,7 +4834,9 @@ struct AAInstanceInfoImpl : public AAInstanceInfo {
return false;
// If this call base might reach the scope again we might forward the
// argument back here. This is very conservative.
- if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
+ if (AA::isPotentiallyReachable(
+ A, *CB, *Scope, *this,
+ [Scope](const Function &Fn) { return &Fn != Scope; }))
return false;
return true;
}
@@ -5518,9 +5413,9 @@ struct AAValueSimplifyImpl : AAValueSimplify {
if (const auto &NewV = VMap.lookup(&V))
return NewV;
bool UsedAssumedInformation = false;
- Optional<Value *> SimpleV =
- A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
- if (!SimpleV)
+ Optional<Value *> SimpleV = A.getAssumedSimplified(
+ V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
+ if (!SimpleV.has_value())
return PoisonValue::get(&Ty);
Value *EffectiveV = &V;
if (SimpleV.value())
@@ -5561,8 +5456,8 @@ struct AAValueSimplifyImpl : AAValueSimplify {
bool UsedAssumedInformation = false;
Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
if (Simplify)
- QueryingValueSimplified =
- A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
+ QueryingValueSimplified = A.getAssumedSimplified(
+ IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
return unionAssumed(QueryingValueSimplified);
}
@@ -5763,209 +5658,11 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
indicatePessimisticFixpoint();
}
- /// Check if \p Cmp is a comparison we can simplify.
- ///
- /// We handle multiple cases, one in which at least one operand is an
- /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
- /// operand. Return true if successful, in that case SimplifiedAssociatedValue
- /// will be updated.
- bool handleCmp(Attributor &A, CmpInst &Cmp) {
- auto Union = [&](Value &V) {
- SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
- SimplifiedAssociatedValue, &V, V.getType());
- return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
- };
-
- Value *LHS = Cmp.getOperand(0);
- Value *RHS = Cmp.getOperand(1);
-
- // Simplify the operands first.
- bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
- return true;
- if (!SimplifiedLHS.value())
- return false;
- LHS = *SimplifiedLHS;
-
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
- return true;
- if (!SimplifiedRHS.value())
- return false;
- RHS = *SimplifiedRHS;
-
- LLVMContext &Ctx = Cmp.getContext();
- // Handle the trivial case first in which we don't even need to think about
- // null or non-null.
- if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
- Constant *NewVal =
- ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
- if (!Union(*NewVal))
- return false;
- if (!UsedAssumedInformation)
- indicateOptimisticFixpoint();
- return true;
- }
-
- // From now on we only handle equalities (==, !=).
- ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
- if (!ICmp || !ICmp->isEquality())
- return false;
-
- bool LHSIsNull = isa<ConstantPointerNull>(LHS);
- bool RHSIsNull = isa<ConstantPointerNull>(RHS);
- if (!LHSIsNull && !RHSIsNull)
- return false;
-
- // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
- // non-nullptr operand and if we assume it's non-null we can conclude the
- // result of the comparison.
- assert((LHSIsNull || RHSIsNull) &&
- "Expected nullptr versus non-nullptr comparison at this point");
-
- // The index is the operand that we assume is not null.
- unsigned PtrIdx = LHSIsNull;
- auto &PtrNonNullAA = A.getAAFor<AANonNull>(
- *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
- DepClassTy::REQUIRED);
- if (!PtrNonNullAA.isAssumedNonNull())
- return false;
- UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
-
- // The new value depends on the predicate, true for != and false for ==.
- Constant *NewVal = ConstantInt::get(
- Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
- if (!Union(*NewVal))
- return false;
-
- if (!UsedAssumedInformation)
- indicateOptimisticFixpoint();
-
- return true;
- }
-
- /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
- /// simplify any operand of the instruction \p I. Return true if successful,
- /// in that case SimplifiedAssociatedValue will be updated.
- bool handleGenericInst(Attributor &A, Instruction &I) {
- bool SomeSimplified = false;
- bool UsedAssumedInformation = false;
-
- SmallVector<Value *, 8> NewOps(I.getNumOperands());
- int Idx = 0;
- for (Value *Op : I.operands()) {
- const auto &SimplifiedOp =
- A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
- *this, UsedAssumedInformation);
- // If we are not sure about any operand we are not sure about the entire
- // instruction, we'll wait.
- if (!SimplifiedOp)
- return true;
-
- if (SimplifiedOp.value())
- NewOps[Idx] = SimplifiedOp.value();
- else
- NewOps[Idx] = Op;
-
- SomeSimplified |= (NewOps[Idx] != Op);
- ++Idx;
- }
-
- // We won't bother with the InstSimplify interface if we didn't simplify any
- // operand ourselves.
- if (!SomeSimplified)
- return false;
-
- InformationCache &InfoCache = A.getInfoCache();
- Function *F = I.getFunction();
- const auto *DT =
- InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
- const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
- auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
- OptimizationRemarkEmitter *ORE = nullptr;
-
- const DataLayout &DL = I.getModule()->getDataLayout();
- SimplifyQuery Q(DL, TLI, DT, AC, &I);
- if (Value *SimplifiedI =
- simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
- SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
- SimplifiedAssociatedValue, SimplifiedI, I.getType());
- return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
- }
- return false;
- }
-
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto Before = SimplifiedAssociatedValue;
-
- // Do not simplify loads that are only used in llvm.assume if we cannot also
- // remove all stores that may feed into the load. The reason is that the
- // assume is probably worth something as long as the stores are around.
- if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
- InformationCache &InfoCache = A.getInfoCache();
- if (InfoCache.isOnlyUsedByAssume(*LI)) {
- SmallSetVector<Value *, 4> PotentialCopies;
- SmallSetVector<Instruction *, 4> PotentialValueOrigins;
- bool UsedAssumedInformation = false;
- if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
- PotentialValueOrigins, *this,
- UsedAssumedInformation,
- /* OnlyExact */ true)) {
- if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
- if (!I)
- return true;
- if (auto *SI = dyn_cast<StoreInst>(I))
- return A.isAssumedDead(SI->getOperandUse(0), this,
- /* LivenessAA */ nullptr,
- UsedAssumedInformation,
- /* CheckBBLivenessOnly */ false);
- return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
- UsedAssumedInformation,
- /* CheckBBLivenessOnly */ false);
- }))
- return indicatePessimisticFixpoint();
- }
- }
- }
-
- auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
- bool Stripped) -> bool {
- auto &AA = A.getAAFor<AAValueSimplify>(
- *this, IRPosition::value(V, getCallBaseContext()),
- DepClassTy::REQUIRED);
- if (!Stripped && this == &AA) {
-
- if (auto *I = dyn_cast<Instruction>(&V)) {
- if (auto *Cmp = dyn_cast<CmpInst>(&V))
- if (handleCmp(A, *Cmp))
- return true;
- if (handleGenericInst(A, *I))
- return true;
- }
- // TODO: Look the instruction and check recursively.
-
- LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
- << "\n");
- return false;
- }
- return checkAndUpdate(A, *this,
- IRPosition::value(V, getCallBaseContext()));
- };
-
- bool Dummy = false;
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation,
- /* UseValueSimplify */ false))
- if (!askSimplifiedValueForOtherAAs(A))
- return indicatePessimisticFixpoint();
+ if (!askSimplifiedValueForOtherAAs(A))
+ return indicatePessimisticFixpoint();
// If a candicate was found in this update, return CHANGED.
return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
@@ -6122,6 +5819,8 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
struct DeallocationInfo {
/// The call that deallocates the memory.
CallBase *const CB;
+ /// The value freed by the call.
+ Value *FreedOp;
/// Flag to indicate if we don't know all objects this deallocation might
/// free.
@@ -6153,14 +5852,14 @@ struct AAHeapToStackFunction final : public AAHeapToStack {
CallBase *CB = dyn_cast<CallBase>(&I);
if (!CB)
return true;
- if (isFreeCall(CB, TLI)) {
- DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
+ if (Value *FreedOp = getFreedOperand(CB, TLI)) {
+ DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
return true;
}
// To do heap to stack, we need to know that the allocation itself is
// removable once uses are rewritten, and that we can initialize the
// alloca to the same pattern as the original allocation result.
- if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
+ if (isRemovableAlloc(CB, TLI)) {
auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
@@ -6427,44 +6126,36 @@ ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
/* CheckBBLivenessOnly */ true))
continue;
- // Use the optimistic version to get the freed objects, ignoring dead
- // branches etc.
- SmallVector<Value *, 8> Objects;
- if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
- *this, DI.CB,
- UsedAssumedInformation)) {
- LLVM_DEBUG(
- dbgs()
- << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
+ // Use the non-optimistic version to get the freed object.
+ Value *Obj = getUnderlyingObject(DI.FreedOp);
+ if (!Obj) {
+ LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
DI.MightFreeUnknownObjects = true;
continue;
}
- // Check each object explicitly.
- for (auto *Obj : Objects) {
- // Free of null and undef can be ignored as no-ops (or UB in the latter
- // case).
- if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
- continue;
-
- CallBase *ObjCB = dyn_cast<CallBase>(Obj);
- if (!ObjCB) {
- LLVM_DEBUG(dbgs()
- << "[H2S] Free of a non-call object: " << *Obj << "\n");
- DI.MightFreeUnknownObjects = true;
- continue;
- }
+ // Free of null and undef can be ignored as no-ops (or UB in the latter
+ // case).
+ if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
+ continue;
- AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
- if (!AI) {
- LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
- << "\n");
- DI.MightFreeUnknownObjects = true;
- continue;
- }
+ CallBase *ObjCB = dyn_cast<CallBase>(Obj);
+ if (!ObjCB) {
+ LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
+ << "\n");
+ DI.MightFreeUnknownObjects = true;
+ continue;
+ }
- DI.PotentialAllocationCalls.insert(ObjCB);
+ AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
+ if (!AI) {
+ LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
+ << "\n");
+ DI.MightFreeUnknownObjects = true;
+ continue;
}
+
+ DI.PotentialAllocationCalls.insert(ObjCB);
}
};
@@ -7692,7 +7383,7 @@ bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
const Instruction *UserI) {
// The loaded value is unrelated to the pointer argument, no need to
// follow the users of the load.
- if (isa<LoadInst>(UserI))
+ if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
return false;
// By default we follow all uses assuming UserI might leak information on U,
@@ -7822,16 +7513,15 @@ struct AAMemoryLocationImpl : public AAMemoryLocation {
AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
: AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
- for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
- AccessKind2Accesses[u] = nullptr;
+ AccessKind2Accesses.fill(nullptr);
}
~AAMemoryLocationImpl() {
// The AccessSets are allocated via a BumpPtrAllocator, we call
// the destructor manually.
- for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
- if (AccessKind2Accesses[u])
- AccessKind2Accesses[u]->~AccessSet();
+ for (AccessSet *AS : AccessKind2Accesses)
+ if (AS)
+ AS->~AccessSet();
}
/// See AbstractAttribute::initialize(...).
@@ -7999,7 +7689,7 @@ protected:
/// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
/// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
- AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
+ std::array<AccessSet *, llvm::CTLog2<VALID_STATE>()> AccessKind2Accesses;
/// Categorize the pointer arguments of CB that might access memory in
/// AccessedLoc and update the state and access map accordingly.
@@ -8061,7 +7751,7 @@ void AAMemoryLocationImpl::categorizePtrValue(
<< Ptr << " ["
<< getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
- SmallVector<Value *, 8> Objects;
+ SmallSetVector<Value *, 8> Objects;
bool UsedAssumedInformation = false;
if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
UsedAssumedInformation,
@@ -8670,19 +8360,19 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
// Simplify the operands first.
bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
+ const auto &SimplifiedLHS = A.getAssumedSimplified(
+ IRPosition::value(*LHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedLHS.has_value())
return true;
if (!SimplifiedLHS.value())
return false;
LHS = *SimplifiedLHS;
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
+ const auto &SimplifiedRHS = A.getAssumedSimplified(
+ IRPosition::value(*RHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedRHS.has_value())
return true;
if (!SimplifiedRHS.value())
return false;
@@ -8723,10 +8413,10 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
// Simplify the operand first.
bool UsedAssumedInformation = false;
- const auto &SimplifiedOpV =
- A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedOpV)
+ const auto &SimplifiedOpV = A.getAssumedSimplified(
+ IRPosition::value(*OpV, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedOpV.has_value())
return true;
if (!SimplifiedOpV.value())
return false;
@@ -8753,19 +8443,19 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
// Simplify the operands first.
bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
+ const auto &SimplifiedLHS = A.getAssumedSimplified(
+ IRPosition::value(*LHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedLHS.has_value())
return true;
if (!SimplifiedLHS.value())
return false;
LHS = *SimplifiedLHS;
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
+ const auto &SimplifiedRHS = A.getAssumedSimplified(
+ IRPosition::value(*RHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedRHS.has_value())
return true;
if (!SimplifiedRHS.value())
return false;
@@ -8820,17 +8510,18 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
- auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
- IntegerRangeState &T, bool Stripped) -> bool {
+
+ IntegerRangeState T(getBitWidth());
+ auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
Instruction *I = dyn_cast<Instruction>(&V);
if (!I || isa<CallBase>(I)) {
// Simplify the operand first.
bool UsedAssumedInformation = false;
- const auto &SimplifiedOpV =
- A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedOpV)
+ const auto &SimplifiedOpV = A.getAssumedSimplified(
+ IRPosition::value(V, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Interprocedural);
+ if (!SimplifiedOpV.has_value())
return true;
if (!SimplifiedOpV.value())
return false;
@@ -8880,13 +8571,7 @@ struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
return T.isValidState();
};
- IntegerRangeState T(getBitWidth());
-
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation,
- /* UseValueSimplify */ false))
+ if (!VisitValueCB(getAssociatedValue(), getCtxI()))
return indicatePessimisticFixpoint();
// Ensure that long def-use chains can't cause circular reasoning either by
@@ -8998,6 +8683,36 @@ struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
AAPotentialConstantValues::initialize(A);
}
+ bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
+ bool &ContainsUndef) {
+ SmallVector<AA::ValueAndContext> Values;
+ bool UsedAssumedInformation = false;
+ if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
+ UsedAssumedInformation)) {
+ if (!IRP.getAssociatedType()->isIntegerTy())
+ return false;
+ auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
+ *this, IRP, DepClassTy::REQUIRED);
+ if (!PotentialValuesAA.getState().isValidState())
+ return false;
+ ContainsUndef = PotentialValuesAA.getState().undefIsContained();
+ S = PotentialValuesAA.getState().getAssumedSet();
+ return true;
+ }
+
+ for (auto &It : Values) {
+ if (isa<UndefValue>(It.getValue()))
+ continue;
+ auto *CI = dyn_cast<ConstantInt>(It.getValue());
+ if (!CI)
+ return false;
+ S.insert(CI->getValue());
+ }
+ ContainsUndef = S.empty();
+
+ return true;
+ }
+
/// See AbstractAttribute::getAsStr().
const std::string getAsStr() const override {
std::string Str;
@@ -9186,50 +8901,22 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
- // Simplify the operands first.
- bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.value())
- return indicatePessimisticFixpoint();
- LHS = *SimplifiedLHS;
-
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.value())
- return indicatePessimisticFixpoint();
- RHS = *SimplifiedRHS;
-
- if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
- return indicatePessimisticFixpoint();
-
- auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
- if (!LHSAA.isValidState())
- return indicatePessimisticFixpoint();
-
- auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
- if (!RHSAA.isValidState())
+ bool LHSContainsUndef = false, RHSContainsUndef = false;
+ SetTy LHSAAPVS, RHSAAPVS;
+ if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
+ LHSContainsUndef) ||
+ !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
+ RHSContainsUndef))
return indicatePessimisticFixpoint();
- const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
- const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
-
// TODO: make use of undef flag to limit potential values aggressively.
bool MaybeTrue = false, MaybeFalse = false;
const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
- if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
+ if (LHSContainsUndef && RHSContainsUndef) {
// The result of any comparison between undefs can be soundly replaced
// with undef.
unionAssumedWithUndef();
- } else if (LHSAA.undefIsContained()) {
+ } else if (LHSContainsUndef) {
for (const APInt &R : RHSAAPVS) {
bool CmpResult = calculateICmpInst(ICI, Zero, R);
MaybeTrue |= CmpResult;
@@ -9237,7 +8924,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
if (MaybeTrue & MaybeFalse)
return indicatePessimisticFixpoint();
}
- } else if (RHSAA.undefIsContained()) {
+ } else if (RHSContainsUndef) {
for (const APInt &L : LHSAAPVS) {
bool CmpResult = calculateICmpInst(ICI, L, Zero);
MaybeTrue |= CmpResult;
@@ -9269,29 +8956,7 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
Value *LHS = SI->getTrueValue();
Value *RHS = SI->getFalseValue();
- // Simplify the operands first.
bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.value())
- return indicatePessimisticFixpoint();
- LHS = *SimplifiedLHS;
-
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.value())
- return indicatePessimisticFixpoint();
- RHS = *SimplifiedRHS;
-
- if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
- return indicatePessimisticFixpoint();
-
Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
UsedAssumedInformation);
@@ -9302,35 +8967,36 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
else if (C && *C && (*C)->isZeroValue())
OnlyRight = true;
- const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
- if (!OnlyRight) {
- LHSAA = &A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
- if (!LHSAA->isValidState())
- return indicatePessimisticFixpoint();
- }
- if (!OnlyLeft) {
- RHSAA = &A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
- if (!RHSAA->isValidState())
- return indicatePessimisticFixpoint();
- }
+ bool LHSContainsUndef = false, RHSContainsUndef = false;
+ SetTy LHSAAPVS, RHSAAPVS;
+ if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS),
+ LHSAAPVS, LHSContainsUndef))
+ return indicatePessimisticFixpoint();
- if (!LHSAA || !RHSAA) {
+ if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS),
+ RHSAAPVS, RHSContainsUndef))
+ return indicatePessimisticFixpoint();
+
+ if (OnlyLeft || OnlyRight) {
// select (true/false), lhs, rhs
- auto *OpAA = LHSAA ? LHSAA : RHSAA;
+ auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
+ auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
- if (OpAA->undefIsContained())
+ if (Undef)
unionAssumedWithUndef();
- else
- unionAssumed(*OpAA);
+ else {
+ for (auto &It : *OpAA)
+ unionAssumed(It);
+ }
- } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
+ } else if (LHSContainsUndef && RHSContainsUndef) {
// select i1 *, undef , undef => undef
unionAssumedWithUndef();
} else {
- unionAssumed(*LHSAA);
- unionAssumed(*RHSAA);
+ for (auto &It : LHSAAPVS)
+ unionAssumed(It);
+ for (auto &It : RHSAAPVS)
+ unionAssumed(It);
}
return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
: ChangeStatus::CHANGED;
@@ -9344,26 +9010,16 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
Value *Src = CI->getOperand(0);
- // Simplify the operand first.
- bool UsedAssumedInformation = false;
- const auto &SimplifiedSrc =
- A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedSrc)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedSrc.value())
+ bool SrcContainsUndef = false;
+ SetTy SrcPVS;
+ if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
+ SrcContainsUndef))
return indicatePessimisticFixpoint();
- Src = *SimplifiedSrc;
- auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
- if (!SrcAA.isValidState())
- return indicatePessimisticFixpoint();
- const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
- if (SrcAA.undefIsContained())
+ if (SrcContainsUndef)
unionAssumedWithUndef();
else {
- for (const APInt &S : SrcAAPVS) {
+ for (const APInt &S : SrcPVS) {
APInt T = calculateCastInst(CI, S, ResultBitWidth);
unionAssumed(T);
}
@@ -9377,53 +9033,26 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
Value *LHS = BinOp->getOperand(0);
Value *RHS = BinOp->getOperand(1);
- // Simplify the operands first.
- bool UsedAssumedInformation = false;
- const auto &SimplifiedLHS =
- A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedLHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedLHS.value())
+ bool LHSContainsUndef = false, RHSContainsUndef = false;
+ SetTy LHSAAPVS, RHSAAPVS;
+ if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
+ LHSContainsUndef) ||
+ !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
+ RHSContainsUndef))
return indicatePessimisticFixpoint();
- LHS = *SimplifiedLHS;
- const auto &SimplifiedRHS =
- A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
- *this, UsedAssumedInformation);
- if (!SimplifiedRHS)
- return ChangeStatus::UNCHANGED;
- if (!SimplifiedRHS.value())
- return indicatePessimisticFixpoint();
- RHS = *SimplifiedRHS;
-
- if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
- return indicatePessimisticFixpoint();
-
- auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
- if (!LHSAA.isValidState())
- return indicatePessimisticFixpoint();
-
- auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
- if (!RHSAA.isValidState())
- return indicatePessimisticFixpoint();
-
- const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
- const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
// TODO: make use of undef flag to limit potential values aggressively.
- if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
+ if (LHSContainsUndef && RHSContainsUndef) {
if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
return indicatePessimisticFixpoint();
- } else if (LHSAA.undefIsContained()) {
+ } else if (LHSContainsUndef) {
for (const APInt &R : RHSAAPVS) {
if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
return indicatePessimisticFixpoint();
}
- } else if (RHSAA.undefIsContained()) {
+ } else if (RHSContainsUndef) {
for (const APInt &L : LHSAAPVS) {
if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
return indicatePessimisticFixpoint();
@@ -9440,35 +9069,6 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
: ChangeStatus::CHANGED;
}
- ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
- auto AssumedBefore = getAssumed();
- for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
- Value *IncomingValue = PHI->getIncomingValue(u);
-
- // Simplify the operand first.
- bool UsedAssumedInformation = false;
- const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
- IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
- UsedAssumedInformation);
- if (!SimplifiedIncomingValue)
- continue;
- if (!SimplifiedIncomingValue.value())
- return indicatePessimisticFixpoint();
- IncomingValue = *SimplifiedIncomingValue;
-
- auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
- *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
- if (!PotentialValuesAA.isValidState())
- return indicatePessimisticFixpoint();
- if (PotentialValuesAA.undefIsContained())
- unionAssumedWithUndef();
- else
- unionAssumed(PotentialValuesAA.getAssumed());
- }
- return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
- : ChangeStatus::CHANGED;
- }
-
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
Value &V = getAssociatedValue();
@@ -9486,9 +9086,6 @@ struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
if (auto *BinOp = dyn_cast<BinaryOperator>(I))
return updateWithBinaryOperator(A, BinOp);
- if (auto *PHI = dyn_cast<PHINode>(I))
- return updateWithPHINode(A, PHI);
-
return indicatePessimisticFixpoint();
}
@@ -9642,7 +9239,8 @@ struct AANoUndefImpl : AANoUndef {
// A position whose simplified value does not have any value is
// considered to be dead. We don't manifest noundef in such positions for
// the same reason above.
- if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
+ if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
+ AA::Interprocedural)
.has_value())
return ChangeStatus::UNCHANGED;
return AANoUndef::manifest(A);
@@ -9663,11 +9261,19 @@ struct AANoUndefFloating : public AANoUndefImpl {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
- auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
- AANoUndef::StateType &T, bool Stripped) -> bool {
+
+ SmallVector<AA::ValueAndContext> Values;
+ bool UsedAssumedInformation = false;
+ if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
+ AA::AnyScope, UsedAssumedInformation)) {
+ Values.push_back({getAssociatedValue(), getCtxI()});
+ }
+
+ StateType T;
+ auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
DepClassTy::REQUIRED);
- if (!Stripped && this == &AA) {
+ if (this == &AA) {
T.indicatePessimisticFixpoint();
} else {
const AANoUndef::StateType &S =
@@ -9677,12 +9283,9 @@ struct AANoUndefFloating : public AANoUndefImpl {
return T.isValidState();
};
- StateType T;
- bool UsedAssumedInformation = false;
- if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
- VisitValueCB, getCtxI(),
- UsedAssumedInformation))
- return indicatePessimisticFixpoint();
+ for (const auto &VAC : Values)
+ if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
+ return indicatePessimisticFixpoint();
return clampStateAndIndicateChange(getState(), T);
}
@@ -9782,8 +9385,7 @@ struct AACallEdgesCallSite : public AACallEdgesImpl {
ChangeStatus updateImpl(Attributor &A) override {
ChangeStatus Change = ChangeStatus::UNCHANGED;
- auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
- bool Stripped) -> bool {
+ auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
if (Function *Fn = dyn_cast<Function>(&V)) {
addCalledFunction(Fn, Change);
} else {
@@ -9795,17 +9397,17 @@ struct AACallEdgesCallSite : public AACallEdgesImpl {
return true;
};
+ SmallVector<AA::ValueAndContext> Values;
// Process any value that we might call.
- auto ProcessCalledOperand = [&](Value *V) {
- bool DummyValue = false;
+ auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
bool UsedAssumedInformation = false;
- if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
- DummyValue, VisitValue, nullptr,
- UsedAssumedInformation, false)) {
- // If we haven't gone through all values, assume that there are unknown
- // callees.
- setHasUnknownCallee(true, Change);
+ Values.clear();
+ if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
+ AA::AnyScope, UsedAssumedInformation)) {
+ Values.push_back({*V, CtxI});
}
+ for (auto &VAC : Values)
+ VisitValue(*VAC.getValue(), VAC.getCtxI());
};
CallBase *CB = cast<CallBase>(getCtxI());
@@ -9828,13 +9430,13 @@ struct AACallEdgesCallSite : public AACallEdgesImpl {
}
// The most simple case.
- ProcessCalledOperand(CB->getCalledOperand());
+ ProcessCalledOperand(CB->getCalledOperand(), CB);
// Process callback functions.
SmallVector<const Use *, 4u> CallbackUses;
AbstractCallSite::getCallbackUses(*CB, CallbackUses);
for (const Use *U : CallbackUses)
- ProcessCalledOperand(U->get());
+ ProcessCalledOperand(U->get(), CB);
return Change;
}
@@ -9920,8 +9522,11 @@ private:
for (auto *AAEdges : AAEdgesList) {
if (AAEdges->hasUnknownCallee()) {
- if (!CanReachUnknownCallee)
+ if (!CanReachUnknownCallee) {
+ LLVM_DEBUG(dbgs()
+ << "[QueryResolver] Edges include unknown callee!\n");
Change = ChangeStatus::CHANGED;
+ }
CanReachUnknownCallee = true;
return Change;
}
@@ -10065,14 +9670,10 @@ public:
}
bool instructionCanReach(Attributor &A, const Instruction &Inst,
- const Function &Fn,
- bool UseBackwards) const override {
+ const Function &Fn) const override {
if (!isValidState())
return true;
- if (UseBackwards)
- return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
-
const auto &Reachability = A.getAAFor<AAReachability>(
*this, IRPosition::function(*getAssociatedFunction()),
DepClassTy::REQUIRED);
@@ -10085,8 +9686,11 @@ public:
// This is a hack for us to be able to cache queries.
auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
- if (!AllKnown)
+ if (!AllKnown) {
+ LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges known, "
+ "may reach unknown callee!\n");
InstQSet.CanReachUnknownCallee = true;
+ }
return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
}
@@ -10119,8 +9723,11 @@ public:
bool AllKnown =
getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
// Update will return change if we this effects any queries.
- if (!AllKnown)
+ if (!AllKnown) {
+ LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges "
+ "known, may reach unknown callee!\n");
InstPair.second.CanReachUnknownCallee = true;
+ }
Change |= InstPair.second.update(A, *this, CallEdges);
}
}
@@ -10133,8 +9740,11 @@ public:
WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
return "FunctionReachability [" +
- std::to_string(WholeFunction.Reachable.size()) + "," +
- std::to_string(QueryCount) + "]";
+ (canReachUnknownCallee()
+ ? "unknown"
+ : (std::to_string(WholeFunction.Reachable.size()) + "," +
+ std::to_string(QueryCount))) +
+ "]";
}
void trackStatistics() const override {}
@@ -10156,6 +9766,822 @@ private:
};
} // namespace
+template <typename AAType>
+static Optional<Constant *>
+askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
+ const IRPosition &IRP, Type &Ty) {
+ if (!Ty.isIntegerTy())
+ return nullptr;
+
+ // This will also pass the call base context.
+ const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
+
+ Optional<Constant *> COpt = AA.getAssumedConstant(A);
+
+ if (!COpt.has_value()) {
+ A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
+ return llvm::None;
+ }
+ if (auto *C = COpt.value()) {
+ A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
+ return C;
+ }
+ return nullptr;
+}
+
+Value *AAPotentialValues::getSingleValue(
+ Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
+ SmallVectorImpl<AA::ValueAndContext> &Values) {
+ Type &Ty = *IRP.getAssociatedType();
+ Optional<Value *> V;
+ for (auto &It : Values) {
+ V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
+ if (V.has_value() && !V.value())
+ break;
+ }
+ if (!V.has_value())
+ return UndefValue::get(&Ty);
+ return V.value();
+}
+
+namespace {
+struct AAPotentialValuesImpl : AAPotentialValues {
+ using StateType = PotentialLLVMValuesState;
+
+ AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValues(IRP, A) {}
+
+ /// See AbstractAttribute::initialize(..).
+ void initialize(Attributor &A) override {
+ if (A.hasSimplificationCallback(getIRPosition())) {
+ indicatePessimisticFixpoint();
+ return;
+ }
+ Value *Stripped = getAssociatedValue().stripPointerCasts();
+ if (isa<Constant>(Stripped)) {
+ addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
+ getAnchorScope());
+ indicateOptimisticFixpoint();
+ return;
+ }
+ AAPotentialValues::initialize(A);
+ }
+
+ /// See AbstractAttribute::getAsStr().
+ const std::string getAsStr() const override {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ OS << getState();
+ return OS.str();
+ }
+
+ template <typename AAType>
+ static Optional<Value *> askOtherAA(Attributor &A,
+ const AbstractAttribute &AA,
+ const IRPosition &IRP, Type &Ty) {
+ if (isa<Constant>(IRP.getAssociatedValue()))
+ return &IRP.getAssociatedValue();
+ Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
+ if (!C)
+ return llvm::None;
+ if (C.value())
+ if (auto *CC = AA::getWithType(**C, Ty))
+ return CC;
+ return nullptr;
+ }
+
+ void addValue(Attributor &A, StateType &State, Value &V,
+ const Instruction *CtxI, AA::ValueScope S,
+ Function *AnchorScope) const {
+
+ IRPosition ValIRP = IRPosition::value(V);
+ if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
+ for (auto &U : CB->args()) {
+ if (U.get() != &V)
+ continue;
+ ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
+ break;
+ }
+ }
+
+ Value *VPtr = &V;
+ if (ValIRP.getAssociatedType()->isIntegerTy()) {
+ Type &Ty = *getAssociatedType();
+ Optional<Value *> SimpleV =
+ askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
+ if (SimpleV.has_value() && !SimpleV.value()) {
+ auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
+ *this, ValIRP, DepClassTy::OPTIONAL);
+ if (PotentialConstantsAA.isValidState()) {
+ for (auto &It : PotentialConstantsAA.getAssumedSet()) {
+ State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
+ }
+ assert(!PotentialConstantsAA.undefIsContained() &&
+ "Undef should be an explicit value!");
+ return;
+ }
+ }
+ if (!SimpleV.has_value())
+ return;
+
+ if (SimpleV.value())
+ VPtr = SimpleV.value();
+ }
+
+ if (isa<ConstantInt>(VPtr))
+ CtxI = nullptr;
+ if (!AA::isValidInScope(*VPtr, AnchorScope))
+ S = AA::ValueScope(S | AA::Interprocedural);
+
+ State.unionAssumed({{*VPtr, CtxI}, S});
+ }
+
+ /// Helper struct to tie a value+context pair together with the scope for
+ /// which this is the simplified version.
+ struct ItemInfo {
+ AA::ValueAndContext I;
+ AA::ValueScope S;
+ };
+
+ bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
+ SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
+ for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
+ if (!(CS & S))
+ continue;
+
+ bool UsedAssumedInformation = false;
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
+ UsedAssumedInformation))
+ return false;
+
+ for (auto &It : Values)
+ ValueScopeMap[It] += CS;
+ }
+ for (auto &It : ValueScopeMap)
+ addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
+ AA::ValueScope(It.second), getAnchorScope());
+
+ return true;
+ }
+
+ void giveUpOnIntraprocedural(Attributor &A) {
+ auto NewS = StateType::getBestState(getState());
+ for (auto &It : getAssumedSet()) {
+ if (It.second == AA::Intraprocedural)
+ continue;
+ addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
+ AA::Interprocedural, getAnchorScope());
+ }
+ assert(!undefIsContained() && "Undef should be an explicit value!");
+ addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
+ getAnchorScope());
+ getState() = NewS;
+ }
+
+ /// See AbstractState::indicatePessimisticFixpoint(...).
+ ChangeStatus indicatePessimisticFixpoint() override {
+ getState() = StateType::getBestState(getState());
+ getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
+ AAPotentialValues::indicateOptimisticFixpoint();
+ return ChangeStatus::CHANGED;
+ }
+
+ /// See AbstractAttribute::updateImpl(...).
+ ChangeStatus updateImpl(Attributor &A) override {
+ return indicatePessimisticFixpoint();
+ }
+
+ /// See AbstractAttribute::manifest(...).
+ ChangeStatus manifest(Attributor &A) override {
+ SmallVector<AA::ValueAndContext> Values;
+ for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
+ Values.clear();
+ if (!getAssumedSimplifiedValues(A, Values, S))
+ continue;
+ Value &OldV = getAssociatedValue();
+ if (isa<UndefValue>(OldV))
+ continue;
+ Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
+ if (!NewV || NewV == &OldV)
+ continue;
+ if (getCtxI() &&
+ !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
+ continue;
+ if (A.changeAfterManifest(getIRPosition(), *NewV))
+ return ChangeStatus::CHANGED;
+ }
+ return ChangeStatus::UNCHANGED;
+ }
+
+ bool getAssumedSimplifiedValues(Attributor &A,
+ SmallVectorImpl<AA::ValueAndContext> &Values,
+ AA::ValueScope S) const override {
+ if (!isValidState())
+ return false;
+ for (auto &It : getAssumedSet())
+ if (It.second & S)
+ Values.push_back(It.first);
+ assert(!undefIsContained() && "Undef should be an explicit value!");
+ return true;
+ }
+};
+
+struct AAPotentialValuesFloating : AAPotentialValuesImpl {
+ AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValuesImpl(IRP, A) {}
+
+ /// See AbstractAttribute::updateImpl(...).
+ ChangeStatus updateImpl(Attributor &A) override {
+ auto AssumedBefore = getAssumed();
+
+ genericValueTraversal(A);
+
+ return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
+ : ChangeStatus::CHANGED;
+ }
+
+ /// Helper struct to remember which AAIsDead instances we actually used.
+ struct LivenessInfo {
+ const AAIsDead *LivenessAA = nullptr;
+ bool AnyDead = false;
+ };
+
+ /// Check if \p Cmp is a comparison we can simplify.
+ ///
+ /// We handle multiple cases, one in which at least one operand is an
+ /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
+ /// operand. Return true if successful, in that case Worklist will be updated.
+ bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist) {
+ Value *LHS = Cmp.getOperand(0);
+ Value *RHS = Cmp.getOperand(1);
+
+ // Simplify the operands first.
+ bool UsedAssumedInformation = false;
+ const auto &SimplifiedLHS = A.getAssumedSimplified(
+ IRPosition::value(*LHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Intraprocedural);
+ if (!SimplifiedLHS.has_value())
+ return true;
+ if (!SimplifiedLHS.value())
+ return false;
+ LHS = *SimplifiedLHS;
+
+ const auto &SimplifiedRHS = A.getAssumedSimplified(
+ IRPosition::value(*RHS, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Intraprocedural);
+ if (!SimplifiedRHS.has_value())
+ return true;
+ if (!SimplifiedRHS.value())
+ return false;
+ RHS = *SimplifiedRHS;
+
+ LLVMContext &Ctx = Cmp.getContext();
+ // Handle the trivial case first in which we don't even need to think about
+ // null or non-null.
+ if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
+ Constant *NewV =
+ ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
+ addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
+ getAnchorScope());
+ return true;
+ }
+
+ // From now on we only handle equalities (==, !=).
+ ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
+ if (!ICmp || !ICmp->isEquality())
+ return false;
+
+ bool LHSIsNull = isa<ConstantPointerNull>(LHS);
+ bool RHSIsNull = isa<ConstantPointerNull>(RHS);
+ if (!LHSIsNull && !RHSIsNull)
+ return false;
+
+ // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
+ // non-nullptr operand and if we assume it's non-null we can conclude the
+ // result of the comparison.
+ assert((LHSIsNull || RHSIsNull) &&
+ "Expected nullptr versus non-nullptr comparison at this point");
+
+ // The index is the operand that we assume is not null.
+ unsigned PtrIdx = LHSIsNull;
+ auto &PtrNonNullAA = A.getAAFor<AANonNull>(
+ *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
+ DepClassTy::REQUIRED);
+ if (!PtrNonNullAA.isAssumedNonNull())
+ return false;
+
+ // The new value depends on the predicate, true for != and false for ==.
+ Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
+ ICmp->getPredicate() == CmpInst::ICMP_NE);
+ addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope());
+ return true;
+ }
+
+ bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist) {
+ const Instruction *CtxI = II.I.getCtxI();
+ bool UsedAssumedInformation = false;
+
+ Optional<Constant *> C =
+ A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
+ bool NoValueYet = !C.has_value();
+ if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
+ return true;
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
+ if (CI->isZero())
+ Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
+ else
+ Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
+ } else {
+ // We could not simplify the condition, assume both values.
+ Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
+ Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
+ }
+ return true;
+ }
+
+ bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist) {
+ SmallSetVector<Value *, 4> PotentialCopies;
+ SmallSetVector<Instruction *, 4> PotentialValueOrigins;
+ bool UsedAssumedInformation = false;
+ if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
+ PotentialValueOrigins, *this,
+ UsedAssumedInformation,
+ /* OnlyExact */ true)) {
+ LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
+ "loaded values for load instruction "
+ << LI << "\n");
+ return false;
+ }
+
+ // Do not simplify loads that are only used in llvm.assume if we cannot also
+ // remove all stores that may feed into the load. The reason is that the
+ // assume is probably worth something as long as the stores are around.
+ InformationCache &InfoCache = A.getInfoCache();
+ if (InfoCache.isOnlyUsedByAssume(LI)) {
+ if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
+ if (!I)
+ return true;
+ if (auto *SI = dyn_cast<StoreInst>(I))
+ return A.isAssumedDead(SI->getOperandUse(0), this,
+ /* LivenessAA */ nullptr,
+ UsedAssumedInformation,
+ /* CheckBBLivenessOnly */ false);
+ return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
+ UsedAssumedInformation,
+ /* CheckBBLivenessOnly */ false);
+ })) {
+ LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
+ "and we cannot delete all the stores: "
+ << LI << "\n");
+ return false;
+ }
+ }
+
+ // Values have to be dynamically unique or we loose the fact that a
+ // single llvm::Value might represent two runtime values (e.g.,
+ // stack locations in different recursive calls).
+ const Instruction *CtxI = II.I.getCtxI();
+ bool ScopeIsLocal = (II.S & AA::Intraprocedural);
+ bool AllLocal = ScopeIsLocal;
+ bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
+ AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
+ return AA::isDynamicallyUnique(A, *this, *PC);
+ });
+ if (!DynamicallyUnique) {
+ LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
+ "values are dynamically unique: "
+ << LI << "\n");
+ return false;
+ }
+
+ for (auto *PotentialCopy : PotentialCopies) {
+ if (AllLocal) {
+ Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
+ } else {
+ Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
+ }
+ }
+ if (!AllLocal && ScopeIsLocal)
+ addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
+ return true;
+ }
+
+ bool handlePHINode(
+ Attributor &A, PHINode &PHI, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist,
+ SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
+ auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
+ LivenessInfo &LI = LivenessAAs[&F];
+ if (!LI.LivenessAA)
+ LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
+ DepClassTy::NONE);
+ return LI;
+ };
+
+ LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
+ for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
+ BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
+ if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
+ LI.AnyDead = true;
+ continue;
+ }
+ Worklist.push_back(
+ {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S});
+ }
+ return true;
+ }
+
+ /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
+ /// simplify any operand of the instruction \p I. Return true if successful,
+ /// in that case Worklist will be updated.
+ bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist) {
+ bool SomeSimplified = false;
+ bool UsedAssumedInformation = false;
+
+ SmallVector<Value *, 8> NewOps(I.getNumOperands());
+ int Idx = 0;
+ for (Value *Op : I.operands()) {
+ const auto &SimplifiedOp = A.getAssumedSimplified(
+ IRPosition::value(*Op, getCallBaseContext()), *this,
+ UsedAssumedInformation, AA::Intraprocedural);
+ // If we are not sure about any operand we are not sure about the entire
+ // instruction, we'll wait.
+ if (!SimplifiedOp.has_value())
+ return true;
+
+ if (SimplifiedOp.value())
+ NewOps[Idx] = SimplifiedOp.value();
+ else
+ NewOps[Idx] = Op;
+
+ SomeSimplified |= (NewOps[Idx] != Op);
+ ++Idx;
+ }
+
+ // We won't bother with the InstSimplify interface if we didn't simplify any
+ // operand ourselves.
+ if (!SomeSimplified)
+ return false;
+
+ InformationCache &InfoCache = A.getInfoCache();
+ Function *F = I.getFunction();
+ const auto *DT =
+ InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
+ const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
+ auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
+ OptimizationRemarkEmitter *ORE = nullptr;
+
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ SimplifyQuery Q(DL, TLI, DT, AC, &I);
+ Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE);
+ if (!NewV || NewV == &I)
+ return false;
+
+ LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
+ << *NewV << "\n");
+ Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
+ return true;
+ }
+
+ bool simplifyInstruction(
+ Attributor &A, Instruction &I, ItemInfo II,
+ SmallVectorImpl<ItemInfo> &Worklist,
+ SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
+ if (auto *CI = dyn_cast<CmpInst>(&I))
+ if (handleCmp(A, *CI, II, Worklist))
+ return true;
+
+ switch (I.getOpcode()) {
+ case Instruction::Select:
+ return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
+ case Instruction::PHI:
+ return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
+ case Instruction::Load:
+ return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
+ default:
+ return handleGenericInst(A, I, II, Worklist);
+ };
+ return false;
+ }
+
+ void genericValueTraversal(Attributor &A) {
+ SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
+
+ Value *InitialV = &getAssociatedValue();
+ SmallSet<AA::ValueAndContext, 16> Visited;
+ SmallVector<ItemInfo, 16> Worklist;
+ Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
+
+ int Iteration = 0;
+ do {
+ ItemInfo II = Worklist.pop_back_val();
+ Value *V = II.I.getValue();
+ assert(V);
+ const Instruction *CtxI = II.I.getCtxI();
+ AA::ValueScope S = II.S;
+
+ // Check if we should process the current value. To prevent endless
+ // recursion keep a record of the values we followed!
+ if (!Visited.insert(II.I).second)
+ continue;
+
+ // Make sure we limit the compile time for complex expressions.
+ if (Iteration++ >= MaxPotentialValuesIterations) {
+ LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
+ << Iteration << "!\n");
+ addValue(A, getState(), *V, CtxI, S, getAnchorScope());
+ continue;
+ }
+
+ // Explicitly look through calls with a "returned" attribute if we do
+ // not have a pointer as stripPointerCasts only works on them.
+ Value *NewV = nullptr;
+ if (V->getType()->isPointerTy()) {
+ NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
+ } else {
+ auto *CB = dyn_cast<CallBase>(V);
+ if (CB && CB->getCalledFunction()) {
+ for (Argument &Arg : CB->getCalledFunction()->args())
+ if (Arg.hasReturnedAttr()) {
+ NewV = CB->getArgOperand(Arg.getArgNo());
+ break;
+ }
+ }
+ }
+ if (NewV && NewV != V) {
+ Worklist.push_back({{*NewV, CtxI}, S});
+ continue;
+ }
+
+ if (auto *I = dyn_cast<Instruction>(V)) {
+ if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
+ continue;
+ }
+
+ if (V != InitialV || isa<Argument>(V))
+ if (recurseForValue(A, IRPosition::value(*V), II.S))
+ continue;
+
+ // If we haven't stripped anything we give up.
+ if (V == InitialV && CtxI == getCtxI()) {
+ indicatePessimisticFixpoint();
+ return;
+ }
+
+ addValue(A, getState(), *V, CtxI, S, getAnchorScope());
+ } while (!Worklist.empty());
+
+ // If we actually used liveness information so we have to record a
+ // dependence.
+ for (auto &It : LivenessAAs)
+ if (It.second.AnyDead)
+ A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
+ }
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_FLOATING_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
+ using Base = AAPotentialValuesImpl;
+ AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
+ : Base(IRP, A) {}
+
+ /// See AbstractAttribute::initialize(..).
+ void initialize(Attributor &A) override {
+ auto &Arg = cast<Argument>(getAssociatedValue());
+ if (Arg.hasPointeeInMemoryValueAttr())
+ indicatePessimisticFixpoint();
+ }
+
+ /// See AbstractAttribute::updateImpl(...).
+ ChangeStatus updateImpl(Attributor &A) override {
+ auto AssumedBefore = getAssumed();
+
+ unsigned CSArgNo = getCallSiteArgNo();
+
+ bool UsedAssumedInformation = false;
+ SmallVector<AA::ValueAndContext> Values;
+ auto CallSitePred = [&](AbstractCallSite ACS) {
+ const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo);
+ if (CSArgIRP.getPositionKind() == IRP_INVALID)
+ return false;
+
+ if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
+ AA::Interprocedural,
+ UsedAssumedInformation))
+ return false;
+
+ return isValidState();
+ };
+
+ if (!A.checkForAllCallSites(CallSitePred, *this,
+ /* RequireAllCallSites */ true,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+
+ Function *Fn = getAssociatedFunction();
+ bool AnyNonLocal = false;
+ for (auto &It : Values) {
+ if (isa<Constant>(It.getValue())) {
+ addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
+ getAnchorScope());
+ continue;
+ }
+ if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
+ return indicatePessimisticFixpoint();
+
+ if (auto *Arg = dyn_cast<Argument>(It.getValue()))
+ if (Arg->getParent() == Fn) {
+ addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
+ getAnchorScope());
+ continue;
+ }
+ addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
+ getAnchorScope());
+ AnyNonLocal = true;
+ }
+ if (undefIsContained())
+ unionAssumedWithUndef();
+ if (AnyNonLocal)
+ giveUpOnIntraprocedural(A);
+
+ return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
+ : ChangeStatus::CHANGED;
+ }
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_ARG_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesReturned
+ : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
+ using Base =
+ AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
+ AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
+ : Base(IRP, A) {}
+
+ /// See AbstractAttribute::initialize(..).
+ void initialize(Attributor &A) override {
+ if (A.hasSimplificationCallback(getIRPosition()))
+ indicatePessimisticFixpoint();
+ else
+ AAPotentialValues::initialize(A);
+ }
+
+ ChangeStatus manifest(Attributor &A) override {
+ // We queried AAValueSimplify for the returned values so they will be
+ // replaced if a simplified form was found. Nothing to do here.
+ return ChangeStatus::UNCHANGED;
+ }
+
+ ChangeStatus indicatePessimisticFixpoint() override {
+ return AAPotentialValues::indicatePessimisticFixpoint();
+ }
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_FNRET_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesFunction : AAPotentialValuesImpl {
+ AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValuesImpl(IRP, A) {}
+
+ /// See AbstractAttribute::updateImpl(...).
+ ChangeStatus updateImpl(Attributor &A) override {
+ llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
+ "not be called");
+ }
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_FN_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
+ AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValuesFunction(IRP, A) {}
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_CS_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
+ AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValuesImpl(IRP, A) {}
+
+ /// See AbstractAttribute::updateImpl(...).
+ ChangeStatus updateImpl(Attributor &A) override {
+ auto AssumedBefore = getAssumed();
+
+ Function *Callee = getAssociatedFunction();
+ if (!Callee)
+ return indicatePessimisticFixpoint();
+
+ bool UsedAssumedInformation = false;
+ auto *CB = cast<CallBase>(getCtxI());
+ if (CB->isMustTailCall() &&
+ !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+
+ SmallVector<AA::ValueAndContext> Values;
+ if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
+ Values, AA::Intraprocedural,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+
+ Function *Caller = CB->getCaller();
+
+ bool AnyNonLocal = false;
+ for (auto &It : Values) {
+ Value *V = It.getValue();
+ Optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
+ V, *CB, *this, UsedAssumedInformation);
+ if (!CallerV.has_value()) {
+ // Nothing to do as long as no value was determined.
+ continue;
+ }
+ V = CallerV.value() ? CallerV.value() : V;
+ if (AA::isDynamicallyUnique(A, *this, *V) &&
+ AA::isValidInScope(*V, Caller)) {
+ if (CallerV.value()) {
+ SmallVector<AA::ValueAndContext> ArgValues;
+ IRPosition IRP = IRPosition::value(*V);
+ if (auto *Arg = dyn_cast<Argument>(V))
+ if (Arg->getParent() == CB->getCalledFunction())
+ IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
+ if (recurseForValue(A, IRP, AA::AnyScope))
+ continue;
+ }
+ addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
+ } else {
+ AnyNonLocal = true;
+ break;
+ }
+ }
+ if (AnyNonLocal) {
+ Values.clear();
+ if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
+ Values, AA::Interprocedural,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+ AnyNonLocal = false;
+ getState() = PotentialLLVMValuesState::getBestState();
+ for (auto &It : Values) {
+ Value *V = It.getValue();
+ if (!AA::isDynamicallyUnique(A, *this, *V))
+ return indicatePessimisticFixpoint();
+ if (AA::isValidInScope(*V, Caller)) {
+ addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
+ } else {
+ AnyNonLocal = true;
+ addValue(A, getState(), *V, CB, AA::Interprocedural,
+ getAnchorScope());
+ }
+ }
+ if (AnyNonLocal)
+ giveUpOnIntraprocedural(A);
+ }
+ return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
+ : ChangeStatus::CHANGED;
+ }
+
+ ChangeStatus indicatePessimisticFixpoint() override {
+ return AAPotentialValues::indicatePessimisticFixpoint();
+ }
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_CSRET_ATTR(potential_values)
+ }
+};
+
+struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
+ AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
+ : AAPotentialValuesFloating(IRP, A) {}
+
+ /// See AbstractAttribute::trackStatistics()
+ void trackStatistics() const override {
+ STATS_DECLTRACK_CSARG_ATTR(potential_values)
+ }
+};
+} // namespace
+
/// ---------------------- Assumption Propagation ------------------------------
namespace {
struct AAAssumptionInfoImpl : public AAAssumptionInfo {
@@ -10323,6 +10749,7 @@ const char AAMemoryBehavior::ID = 0;
const char AAMemoryLocation::ID = 0;
const char AAValueConstantRange::ID = 0;
const char AAPotentialConstantValues::ID = 0;
+const char AAPotentialValues::ID = 0;
const char AANoUndef::ID = 0;
const char AACallEdges::ID = 0;
const char AAFunctionReachability::ID = 0;
@@ -10441,6 +10868,7 @@ CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 56e2df14ff38..360ec24a0509 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -1147,6 +1147,14 @@ void llvm::thinLTOInternalizeModule(Module &TheModule,
// Declare a callback for the internalize pass that will ask for every
// candidate GlobalValue if it can be internalized or not.
auto MustPreserveGV = [&](const GlobalValue &GV) -> bool {
+ // It may be the case that GV is on a chain of an ifunc, its alias and
+ // subsequent aliases. In this case, the summary for the value is not
+ // available.
+ if (isa<GlobalIFunc>(&GV) ||
+ (isa<GlobalAlias>(&GV) &&
+ isa<GlobalIFunc>(cast<GlobalAlias>(&GV)->getAliaseeObject())))
+ return true;
+
// Lookup the linkage recorded in the summaries during global analysis.
auto GS = DefinedGlobals.find(GV.getGUID());
if (GS == DefinedGlobals.end()) {
@@ -1277,7 +1285,7 @@ Expected<bool> FunctionImporter::importFunctions(
}
}
for (GlobalAlias &GA : SrcModule->aliases()) {
- if (!GA.hasName())
+ if (!GA.hasName() || isa<GlobalIFunc>(GA.getAliaseeObject()))
continue;
auto GUID = GA.getGUID();
auto Import = ImportGUIDs.count(GUID);
@@ -1413,29 +1421,6 @@ static bool doImportingForModule(Module &M) {
return *Result;
}
-namespace {
-
-/// Pass that performs cross-module function import provided a summary file.
-class FunctionImportLegacyPass : public ModulePass {
-public:
- /// Pass identification, replacement for typeid
- static char ID;
-
- explicit FunctionImportLegacyPass() : ModulePass(ID) {}
-
- /// Specify pass name for debug output
- StringRef getPassName() const override { return "Function Importing"; }
-
- bool runOnModule(Module &M) override {
- if (skipModule(M))
- return false;
-
- return doImportingForModule(M);
- }
-};
-
-} // end anonymous namespace
-
PreservedAnalyses FunctionImportPass::run(Module &M,
ModuleAnalysisManager &AM) {
if (!doImportingForModule(M))
@@ -1443,15 +1428,3 @@ PreservedAnalyses FunctionImportPass::run(Module &M,
return PreservedAnalyses::none();
}
-
-char FunctionImportLegacyPass::ID = 0;
-INITIALIZE_PASS(FunctionImportLegacyPass, "function-import",
- "Summary Based Function Import", false, false)
-
-namespace llvm {
-
-Pass *createFunctionImportPass() {
- return new FunctionImportLegacyPass();
-}
-
-} // end namespace llvm
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 1ad6e2b2a1d2..ec26db8bfc0b 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1040,7 +1040,7 @@ static bool tryToOptimizeStoreOfAllocationToGlobal(GlobalVariable *GV,
CallInst *CI,
const DataLayout &DL,
TargetLibraryInfo *TLI) {
- if (!isAllocRemovable(CI, TLI))
+ if (!isRemovableAlloc(CI, TLI))
// Must be able to remove the call when we get done..
return false;
diff --git a/llvm/lib/Transforms/IPO/IPO.cpp b/llvm/lib/Transforms/IPO/IPO.cpp
index ec2b80012ed6..dfd434e61d5b 100644
--- a/llvm/lib/Transforms/IPO/IPO.cpp
+++ b/llvm/lib/Transforms/IPO/IPO.cpp
@@ -44,7 +44,6 @@ void llvm::initializeIPO(PassRegistry &Registry) {
initializeLoopExtractorLegacyPassPass(Registry);
initializeBlockExtractorLegacyPassPass(Registry);
initializeSingleLoopExtractorPass(Registry);
- initializeLowerTypeTestsPass(Registry);
initializeMergeFunctionsLegacyPassPass(Registry);
initializePartialInlinerLegacyPassPass(Registry);
initializeAttributorLegacyPassPass(Registry);
@@ -60,9 +59,6 @@ void llvm::initializeIPO(PassRegistry &Registry) {
initializeStripNonDebugSymbolsPass(Registry);
initializeBarrierNoopPass(Registry);
initializeEliminateAvailableExternallyLegacyPassPass(Registry);
- initializeSampleProfileLoaderLegacyPassPass(Registry);
- initializeFunctionImportLegacyPassPass(Registry);
- initializeWholeProgramDevirtPass(Registry);
}
void LLVMInitializeIPO(LLVMPassRegistryRef R) {
diff --git a/llvm/lib/Transforms/IPO/Internalize.cpp b/llvm/lib/Transforms/IPO/Internalize.cpp
index 5aa5b905f06c..85b1a8303d33 100644
--- a/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -28,6 +28,7 @@
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
@@ -40,13 +41,13 @@ STATISTIC(NumAliases, "Number of aliases internalized");
STATISTIC(NumFunctions, "Number of functions internalized");
STATISTIC(NumGlobals, "Number of global vars internalized");
-// APIFile - A file which contains a list of symbols that should not be marked
-// external.
+// APIFile - A file which contains a list of symbol glob patterns that should
+// not be marked external.
static cl::opt<std::string>
APIFile("internalize-public-api-file", cl::value_desc("filename"),
cl::desc("A file containing list of symbol names to preserve"));
-// APIList - A list of symbols that should not be marked internal.
+// APIList - A list of symbol glob patterns that should not be marked internal.
static cl::list<std::string>
APIList("internalize-public-api-list", cl::value_desc("list"),
cl::desc("A list of symbol names to preserve"), cl::CommaSeparated);
@@ -59,29 +60,44 @@ public:
PreserveAPIList() {
if (!APIFile.empty())
LoadFile(APIFile);
- ExternalNames.insert(APIList.begin(), APIList.end());
+ for (StringRef Pattern : APIList)
+ addGlob(Pattern);
}
bool operator()(const GlobalValue &GV) {
- return ExternalNames.count(GV.getName());
+ return llvm::any_of(
+ ExternalNames, [&](GlobPattern &GP) { return GP.match(GV.getName()); });
}
private:
// Contains the set of symbols loaded from file
- StringSet<> ExternalNames;
+ SmallVector<GlobPattern> ExternalNames;
+
+ void addGlob(StringRef Pattern) {
+ auto GlobOrErr = GlobPattern::create(Pattern);
+ if (!GlobOrErr) {
+ errs() << "WARNING: when loading pattern: '"
+ << toString(GlobOrErr.takeError()) << "' ignoring";
+ return;
+ }
+ ExternalNames.emplace_back(std::move(*GlobOrErr));
+ }
void LoadFile(StringRef Filename) {
// Load the APIFile...
- ErrorOr<std::unique_ptr<MemoryBuffer>> Buf =
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
MemoryBuffer::getFile(Filename);
- if (!Buf) {
+ if (!BufOrErr) {
errs() << "WARNING: Internalize couldn't load file '" << Filename
<< "'! Continuing as if it's empty.\n";
return; // Just continue as if the file were empty
}
- for (line_iterator I(*Buf->get(), true), E; I != E; ++I)
- ExternalNames.insert(*I);
+ Buf = std::move(*BufOrErr);
+ for (line_iterator I(*Buf, true), E; I != E; ++I)
+ addGlob(*I);
}
+
+ std::shared_ptr<MemoryBuffer> Buf;
};
} // end anonymous namespace
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index d5f1d291f41f..6bf25df101fa 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -528,50 +528,8 @@ public:
// arguments. For testing purposes only.
static bool runForTesting(Module &M);
};
-
-struct LowerTypeTests : public ModulePass {
- static char ID;
-
- bool UseCommandLine = false;
-
- ModuleSummaryIndex *ExportSummary;
- const ModuleSummaryIndex *ImportSummary;
- bool DropTypeTests;
-
- LowerTypeTests() : ModulePass(ID), UseCommandLine(true) {
- initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
- }
-
- LowerTypeTests(ModuleSummaryIndex *ExportSummary,
- const ModuleSummaryIndex *ImportSummary, bool DropTypeTests)
- : ModulePass(ID), ExportSummary(ExportSummary),
- ImportSummary(ImportSummary),
- DropTypeTests(DropTypeTests || ClDropTypeTests) {
- initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnModule(Module &M) override {
- if (UseCommandLine)
- return LowerTypeTestsModule::runForTesting(M);
- return LowerTypeTestsModule(M, ExportSummary, ImportSummary, DropTypeTests)
- .lower();
- }
-};
-
} // end anonymous namespace
-char LowerTypeTests::ID = 0;
-
-INITIALIZE_PASS(LowerTypeTests, "lowertypetests", "Lower type metadata", false,
- false)
-
-ModulePass *
-llvm::createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
- const ModuleSummaryIndex *ImportSummary,
- bool DropTypeTests) {
- return new LowerTypeTests(ExportSummary, ImportSummary, DropTypeTests);
-}
-
/// Build a bit set for TypeId using the object layouts in
/// GlobalLayout.
BitSetInfo LowerTypeTestsModule::buildBitSet(
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 8e0ca8c6c997..0b42fc151991 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -4808,7 +4808,7 @@ void OpenMPOpt::registerAAs(bool IsModulePass) {
if (auto *LI = dyn_cast<LoadInst>(&I)) {
bool UsedAssumedInformation = false;
A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr,
- UsedAssumedInformation);
+ UsedAssumedInformation, AA::Interprocedural);
} else if (auto *SI = dyn_cast<StoreInst>(&I)) {
A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI));
}
diff --git a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 8eef82675e86..f1b6f2bb7de4 100644
--- a/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -91,14 +91,6 @@ cl::opt<bool> EnableDFAJumpThreading("enable-dfa-jump-thread",
cl::desc("Enable DFA jump threading."),
cl::init(false), cl::Hidden);
-static cl::opt<bool>
- EnablePrepareForThinLTO("prepare-for-thinlto", cl::init(false), cl::Hidden,
- cl::desc("Enable preparation for ThinLTO."));
-
-static cl::opt<bool>
- EnablePerformThinLTO("perform-thinlto", cl::init(false), cl::Hidden,
- cl::desc("Enable performing ThinLTO."));
-
cl::opt<bool> EnableHotColdSplit("hot-cold-split",
cl::desc("Enable hot-cold splitting pass"));
@@ -192,15 +184,6 @@ PassManagerBuilder::PassManagerBuilder() {
VerifyInput = false;
VerifyOutput = false;
MergeFunctions = false;
- PrepareForLTO = false;
- EnablePGOInstrGen = false;
- EnablePGOCSInstrGen = false;
- EnablePGOCSInstrUse = false;
- PGOInstrGen = "";
- PGOInstrUse = "";
- PGOSampleUse = "";
- PrepareForThinLTO = EnablePrepareForThinLTO;
- PerformThinLTO = EnablePerformThinLTO;
DivergentTarget = false;
CallGraphProfile = true;
}
@@ -390,7 +373,7 @@ void PassManagerBuilder::addFunctionSimplificationPasses(
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/false));
// Rotate Loop - disable header duplication at -Oz
- MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, PrepareForLTO));
+ MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, false));
// TODO: Investigate promotion cap for O1.
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
/*AllowSpeculation=*/true));
@@ -470,10 +453,6 @@ void PassManagerBuilder::addFunctionSimplificationPasses(
// Clean up after everything.
MPM.add(createInstructionCombiningPass());
addExtensionsToPM(EP_Peephole, MPM);
-
- if (EnableCHR && OptLevel >= 3 &&
- (!PGOInstrUse.empty() || !PGOSampleUse.empty() || EnablePGOCSInstrGen))
- MPM.add(createControlHeightReductionLegacyPass());
}
/// FIXME: Should LTO cause any differences to this set of passes?
@@ -598,15 +577,6 @@ void PassManagerBuilder::populateModulePassManager(
legacy::PassManagerBase &MPM) {
MPM.add(createAnnotation2MetadataLegacyPass());
- if (!PGOSampleUse.empty()) {
- MPM.add(createPruneEHPass());
- // In ThinLTO mode, when flattened profile is used, all the available
- // profile information will be annotated in PreLink phase so there is
- // no need to load the profile again in PostLink.
- if (!(FlattenedProfileUsed && PerformThinLTO))
- MPM.add(createSampleProfileLoaderPass(PGOSampleUse));
- }
-
// Allow forcing function attributes as a debugging and tuning aid.
MPM.add(createForceFunctionAttrsLegacyPass());
@@ -628,26 +598,8 @@ void PassManagerBuilder::populateModulePassManager(
else if (GlobalExtensionsNotEmpty() || !Extensions.empty())
MPM.add(createBarrierNoopPass());
- if (PerformThinLTO) {
- MPM.add(createLowerTypeTestsPass(nullptr, nullptr, true));
- // Drop available_externally and unreferenced globals. This is necessary
- // with ThinLTO in order to avoid leaving undefined references to dead
- // globals in the object file.
- MPM.add(createEliminateAvailableExternallyPass());
- MPM.add(createGlobalDCEPass());
- }
-
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
- if (PrepareForLTO || PrepareForThinLTO) {
- MPM.add(createCanonicalizeAliasesPass());
- // Rename anon globals to be able to export them in the summary.
- // This has to be done after we add the extensions to the pass manager
- // as there could be passes (e.g. Adddress sanitizer) which introduce
- // new unnamed globals.
- MPM.add(createNameAnonGlobalPass());
- }
-
MPM.add(createAnnotationRemarksLegacyPass());
return;
}
@@ -658,25 +610,6 @@ void PassManagerBuilder::populateModulePassManager(
addInitialAliasAnalysisPasses(MPM);
- // For ThinLTO there are two passes of indirect call promotion. The
- // first is during the compile phase when PerformThinLTO=false and
- // intra-module indirect call targets are promoted. The second is during
- // the ThinLTO backend when PerformThinLTO=true, when we promote imported
- // inter-module indirect calls. For that we perform indirect call promotion
- // earlier in the pass pipeline, here before globalopt. Otherwise imported
- // available_externally functions look unreferenced and are removed.
- if (PerformThinLTO) {
- MPM.add(createLowerTypeTestsPass(nullptr, nullptr, true));
- }
-
- // For SamplePGO in ThinLTO compile phase, we do not want to unroll loops
- // as it will change the CFG too much to make the 2nd profile annotation
- // in backend more difficult.
- bool PrepareForThinLTOUsingPGOSampleProfile =
- PrepareForThinLTO && !PGOSampleUse.empty();
- if (PrepareForThinLTOUsingPGOSampleProfile)
- DisableUnrollLoops = true;
-
// Infer attributes about declarations if possible.
MPM.add(createInferFunctionAttrsLegacyPass());
@@ -744,7 +677,7 @@ void PassManagerBuilder::populateModulePassManager(
if (RunPartialInlining)
MPM.add(createPartialInliningPass());
- if (OptLevel > 1 && !PrepareForLTO && !PrepareForThinLTO)
+ if (OptLevel > 1)
// Remove avail extern fns and globals definitions if we aren't
// compiling an object file for later LTO. For LTO we want to preserve
// these so they are eligible for inlining at link-time. Note if they
@@ -756,9 +689,6 @@ void PassManagerBuilder::populateModulePassManager(
// and saves running remaining passes on the eliminated functions.
MPM.add(createEliminateAvailableExternallyPass());
- if (EnableOrderFileInstrumentation)
- MPM.add(createInstrOrderFilePass());
-
MPM.add(createReversePostOrderFunctionAttrsPass());
// The inliner performs some kind of dead code elimination as it goes,
@@ -772,24 +702,6 @@ void PassManagerBuilder::populateModulePassManager(
MPM.add(createGlobalDCEPass());
}
- // If we are planning to perform ThinLTO later, let's not bloat the code with
- // unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes
- // during ThinLTO and perform the rest of the optimizations afterward.
- if (PrepareForThinLTO) {
- // Ensure we perform any last passes, but do so before renaming anonymous
- // globals in case the passes add any.
- addExtensionsToPM(EP_OptimizerLast, MPM);
- MPM.add(createCanonicalizeAliasesPass());
- // Rename anon globals to be able to export them in the summary.
- MPM.add(createNameAnonGlobalPass());
- return;
- }
-
- if (PerformThinLTO)
- // Optimize globals now when performing ThinLTO, this enables more
- // optimizations later.
- MPM.add(createGlobalOptimizerPass());
-
// Scheduling LoopVersioningLICM when inlining is over, because after that
// we may see more accurate aliasing. Reason to run this late is that too
// early versioning may prevent further inlining due to increase of code
@@ -834,7 +746,7 @@ void PassManagerBuilder::populateModulePassManager(
// Re-rotate loops in all our loop nests. These may have fallout out of
// rotated form due to GVN or other transformations, and the vectorizer relies
// on the rotated form. Disable header duplication at -Oz.
- MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, PrepareForLTO));
+ MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, false));
// Distribute loops to allow partial vectorization. I.e. isolate dependences
// into separate loop that would otherwise inhibit vectorization. This is
@@ -856,7 +768,7 @@ void PassManagerBuilder::populateModulePassManager(
// See comment in the new PM for justification of scheduling splitting at
// this stage (\ref buildModuleSimplificationPipeline).
- if (EnableHotColdSplit && !(PrepareForLTO || PrepareForThinLTO))
+ if (EnableHotColdSplit)
MPM.add(createHotColdSplittingPass());
if (EnableIROutliner)
@@ -865,10 +777,6 @@ void PassManagerBuilder::populateModulePassManager(
if (MergeFunctions)
MPM.add(createMergeFunctionsPass());
- // Add Module flag "CG Profile" based on Branch Frequency Information.
- if (CallGraphProfile)
- MPM.add(createCGProfileLegacyPass());
-
// LoopSink pass sinks instructions hoisted by LICM, which serves as a
// canonicalization pass that enables other optimizations. As a result,
// LoopSink pass needs to be a very late IR pass to avoid undoing LICM
@@ -889,12 +797,6 @@ void PassManagerBuilder::populateModulePassManager(
addExtensionsToPM(EP_OptimizerLast, MPM);
- if (PrepareForLTO) {
- MPM.add(createCanonicalizeAliasesPass());
- // Rename anon globals to be able to handle them in the summary
- MPM.add(createNameAnonGlobalPass());
- }
-
MPM.add(createAnnotationRemarksLegacyPass());
}
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 55fee213cd5f..f76b886e810a 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -546,53 +546,6 @@ private:
return AnnotatedPassName.c_str();
}
};
-
-class SampleProfileLoaderLegacyPass : public ModulePass {
-public:
- // Class identification, replacement for typeinfo
- static char ID;
-
- SampleProfileLoaderLegacyPass(
- StringRef Name = SampleProfileFile,
- ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
- : ModulePass(ID), SampleLoader(
- Name, SampleProfileRemappingFile, LTOPhase,
- [&](Function &F) -> AssumptionCache & {
- return ACT->getAssumptionCache(F);
- },
- [&](Function &F) -> TargetTransformInfo & {
- return TTIWP->getTTI(F);
- },
- [&](Function &F) -> TargetLibraryInfo & {
- return TLIWP->getTLI(F);
- }) {
- initializeSampleProfileLoaderLegacyPassPass(
- *PassRegistry::getPassRegistry());
- }
-
- void dump() { SampleLoader.dump(); }
-
- bool doInitialization(Module &M) override {
- return SampleLoader.doInitialization(M);
- }
-
- StringRef getPassName() const override { return "Sample profile pass"; }
- bool runOnModule(Module &M) override;
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- AU.addRequired<ProfileSummaryInfoWrapperPass>();
- }
-
-private:
- SampleProfileLoader SampleLoader;
- AssumptionCacheTracker *ACT = nullptr;
- TargetTransformInfoWrapperPass *TTIWP = nullptr;
- TargetLibraryInfoWrapperPass *TLIWP = nullptr;
-};
-
} // end anonymous namespace
ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
@@ -734,8 +687,8 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
assert(L && R && "Expect non-null FunctionSamples");
- if (L->getEntrySamples() != R->getEntrySamples())
- return L->getEntrySamples() > R->getEntrySamples();
+ if (L->getHeadSamplesEstimate() != R->getHeadSamplesEstimate())
+ return L->getHeadSamplesEstimate() > R->getHeadSamplesEstimate();
return FunctionSamples::getGUID(L->getName()) <
FunctionSamples::getGUID(R->getName());
};
@@ -750,7 +703,7 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
// as that already includes both inlined callee and non-inlined ones..
Sum = 0;
for (const auto *const FS : CalleeSamples) {
- Sum += FS->getEntrySamples();
+ Sum += FS->getHeadSamplesEstimate();
R.push_back(FS);
}
llvm::sort(R, FSCompare);
@@ -771,7 +724,7 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
if (M->empty())
return R;
for (const auto &NameFS : *M) {
- Sum += NameFS.second.getEntrySamples();
+ Sum += NameFS.second.getHeadSamplesEstimate();
R.push_back(&NameFS.second);
}
llvm::sort(R, FSCompare);
@@ -1090,7 +1043,7 @@ void SampleProfileLoader::findExternalInlineCandidate(
bool PreInline =
UsePreInlinerDecision &&
CalleeSample->getContext().hasAttribute(ContextShouldBeInlined);
- if (!PreInline && CalleeSample->getEntrySamples() < Threshold)
+ if (!PreInline && CalleeSample->getHeadSamplesEstimate() < Threshold)
continue;
StringRef Name = CalleeSample->getFuncName();
@@ -1171,7 +1124,8 @@ bool SampleProfileLoader::inlineHotFunctions(
assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
"GUIDToFuncNameMap has to be populated");
AllCandidates.push_back(CB);
- if (FS->getEntrySamples() > 0 || FunctionSamples::ProfileIsCS)
+ if (FS->getHeadSamplesEstimate() > 0 ||
+ FunctionSamples::ProfileIsCS)
LocalNotInlinedCallSites.try_emplace(CB, FS);
if (callsiteIsHot(FS, PSI, ProfAccForSymsInList))
Hot = true;
@@ -1211,7 +1165,7 @@ bool SampleProfileLoader::inlineHotFunctions(
if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList))
continue;
- Candidate = {I, FS, FS->getEntrySamples(), 1.0};
+ Candidate = {I, FS, FS->getHeadSamplesEstimate(), 1.0};
if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum)) {
LocalNotInlinedCallSites.erase(I);
LocalChanged = true;
@@ -1325,7 +1279,7 @@ bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
Factor = Probe->Factor;
uint64_t CallsiteCount =
- CalleeSamples ? CalleeSamples->getEntrySamples() * Factor : 0;
+ CalleeSamples ? CalleeSamples->getHeadSamplesEstimate() * Factor : 0;
*NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor};
return true;
}
@@ -1481,7 +1435,7 @@ bool SampleProfileLoader::inlineHotFunctionsWithPriority(
continue;
}
uint64_t EntryCountDistributed =
- FS->getEntrySamples() * Candidate.CallsiteDistribution;
+ FS->getHeadSamplesEstimate() * Candidate.CallsiteDistribution;
// In addition to regular inline cost check, we also need to make sure
// ICP isn't introducing excessive speculative checks even if individual
// target looks beneficial to promote and inline. That means we should
@@ -1568,7 +1522,7 @@ void SampleProfileLoader::promoteMergeNotInlinedContextSamples(
++NumCSNotInlined;
const FunctionSamples *FS = Pair.getSecond();
- if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) {
+ if (FS->getTotalSamples() == 0 && FS->getHeadSamplesEstimate() == 0) {
continue;
}
@@ -1586,7 +1540,7 @@ void SampleProfileLoader::promoteMergeNotInlinedContextSamples(
// Use entry samples as head samples during the merge, as inlinees
// don't have head samples.
const_cast<FunctionSamples *>(FS)->addHeadSamples(
- FS->getEntrySamples());
+ FS->getHeadSamplesEstimate());
// Note that we have to do the merge right after processing function.
// This allows OutlineFS's profile to be used for annotation during
@@ -1599,7 +1553,7 @@ void SampleProfileLoader::promoteMergeNotInlinedContextSamples(
} else {
auto pair =
notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
- pair.first->second.entryCount += FS->getEntrySamples();
+ pair.first->second.entryCount += FS->getHeadSamplesEstimate();
}
}
}
@@ -1663,7 +1617,7 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) {
if (const FunctionSamplesMap *M =
FS->findFunctionSamplesMapAt(CallSite)) {
for (const auto &NameFS : *M)
- Sum += NameFS.second.getEntrySamples();
+ Sum += NameFS.second.getHeadSamplesEstimate();
}
}
if (Sum)
@@ -1825,17 +1779,6 @@ bool SampleProfileLoader::emitAnnotations(Function &F) {
return Changed;
}
-char SampleProfileLoaderLegacyPass::ID = 0;
-
-INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
- "Sample Profile loader", false, false)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
-INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
- "Sample Profile loader", false, false)
-
std::unique_ptr<ProfiledCallGraph>
SampleProfileLoader::buildProfiledCallGraph(CallGraph &CG) {
std::unique_ptr<ProfiledCallGraph> ProfiledCG;
@@ -2073,14 +2016,6 @@ bool SampleProfileLoader::doInitialization(Module &M,
return true;
}
-ModulePass *llvm::createSampleProfileLoaderPass() {
- return new SampleProfileLoaderLegacyPass();
-}
-
-ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
- return new SampleProfileLoaderLegacyPass(Name);
-}
-
bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
ProfileSummaryInfo *_PSI, CallGraph *CG) {
GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
@@ -2141,15 +2076,6 @@ bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
return retval;
}
-bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
- ACT = &getAnalysis<AssumptionCacheTracker>();
- TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
- TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>();
- ProfileSummaryInfo *PSI =
- &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
- return SampleLoader.runOnModule(M, nullptr, PSI, nullptr);
-}
-
bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
LLVM_DEBUG(dbgs() << "\n\nProcessing Function " << F.getName() << "\n");
DILocation2SampleMap.clear();
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 898a213d0849..ad00c116ce0a 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -747,78 +747,8 @@ struct DevirtIndex {
void run();
};
-
-struct WholeProgramDevirt : public ModulePass {
- static char ID;
-
- bool UseCommandLine = false;
-
- ModuleSummaryIndex *ExportSummary = nullptr;
- const ModuleSummaryIndex *ImportSummary = nullptr;
-
- WholeProgramDevirt() : ModulePass(ID), UseCommandLine(true) {
- initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
- }
-
- WholeProgramDevirt(ModuleSummaryIndex *ExportSummary,
- const ModuleSummaryIndex *ImportSummary)
- : ModulePass(ID), ExportSummary(ExportSummary),
- ImportSummary(ImportSummary) {
- initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnModule(Module &M) override {
- if (skipModule(M))
- return false;
-
- // In the new pass manager, we can request the optimization
- // remark emitter pass on a per-function-basis, which the
- // OREGetter will do for us.
- // In the old pass manager, this is harder, so we just build
- // an optimization remark emitter on the fly, when we need it.
- std::unique_ptr<OptimizationRemarkEmitter> ORE;
- auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
- ORE = std::make_unique<OptimizationRemarkEmitter>(F);
- return *ORE;
- };
-
- auto LookupDomTree = [this](Function &F) -> DominatorTree & {
- return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
- };
-
- if (UseCommandLine)
- return DevirtModule::runForTesting(M, LegacyAARGetter(*this), OREGetter,
- LookupDomTree);
-
- return DevirtModule(M, LegacyAARGetter(*this), OREGetter, LookupDomTree,
- ExportSummary, ImportSummary)
- .run();
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- AU.addRequired<DominatorTreeWrapperPass>();
- }
-};
-
} // end anonymous namespace
-INITIALIZE_PASS_BEGIN(WholeProgramDevirt, "wholeprogramdevirt",
- "Whole program devirtualization", false, false)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(WholeProgramDevirt, "wholeprogramdevirt",
- "Whole program devirtualization", false, false)
-char WholeProgramDevirt::ID = 0;
-
-ModulePass *
-llvm::createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
- const ModuleSummaryIndex *ImportSummary) {
- return new WholeProgramDevirt(ExportSummary, ImportSummary);
-}
-
PreservedAnalyses WholeProgramDevirtPass::run(Module &M,
ModuleAnalysisManager &AM) {
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();