summaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp')
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp235
1 files changed, 161 insertions, 74 deletions
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 537813b6b752..1a24ae3dba15 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -28,6 +28,7 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/PhiValues.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallSite.h"
@@ -85,15 +86,16 @@ const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
// depth otherwise the algorithm in aliasGEP will assert.
static const unsigned MaxLookupSearchDepth = 6;
-bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA,
+bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
// We don't care if this analysis itself is preserved, it has no state. But
// we need to check that the analyses it depends on have been. Note that we
// may be created without handles to some analyses and in that case don't
// depend on them.
- if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
- (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
- (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
+ if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
+ (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
+ (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
+ (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
return true;
// Otherwise this analysis result remains valid.
@@ -132,7 +134,10 @@ static bool isNonEscapingLocalObject(const Value *V) {
/// Returns true if the pointer is one which would have been considered an
/// escape by isNonEscapingLocalObject.
static bool isEscapeSource(const Value *V) {
- if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
+ if (ImmutableCallSite(V))
+ return true;
+
+ if (isa<Argument>(V))
return true;
// The load case works because isNonEscapingLocalObject considers all
@@ -147,10 +152,12 @@ static bool isEscapeSource(const Value *V) {
/// Returns the size of the object specified by V or UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc,
bool RoundToAlign = false) {
uint64_t Size;
ObjectSizeOpts Opts;
Opts.RoundToAlign = RoundToAlign;
+ Opts.NullIsUnknownSize = NullIsValidLoc;
if (getObjectSize(V, Size, DL, &TLI, Opts))
return Size;
return MemoryLocation::UnknownSize;
@@ -160,7 +167,8 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
/// Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
+ const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc) {
// Note that the meanings of the "object" are slightly different in the
// following contexts:
// c1: llvm::getObjectSize()
@@ -192,15 +200,16 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+ /*RoundToAlign*/ true);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
}
/// Returns true if we can prove that the object specified by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
- uint64_t ObjectSize = getObjectSize(V, DL, TLI);
+ const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
}
@@ -285,6 +294,19 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+
+ // We're trying to linearize an expression of the kind:
+ // shl i8 -128, 36
+ // where the shift count exceeds the bitwidth of the type.
+ // We can't decompose this further (the expression would return
+ // a poison value).
+ if (Offset.getBitWidth() < RHS.getLimitedValue() ||
+ Scale.getBitWidth() < RHS.getLimitedValue()) {
+ Scale = 1;
+ Offset = 0;
+ return V;
+ }
+
Offset <<= RHS.getLimitedValue();
Scale <<= RHS.getLimitedValue();
// the semantics of nsw and nuw for left shifts don't match those of
@@ -414,11 +436,21 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
if (!GEPOp) {
- if (auto CS = ImmutableCallSite(V))
- if (const Value *RV = CS.getReturnedArgOperand()) {
- V = RV;
+ if (auto CS = ImmutableCallSite(V)) {
+ // CaptureTracking can know about special capturing properties of some
+ // intrinsics like launder.invariant.group, that can't be expressed with
+ // the attributes, but have properties like returning aliasing pointer.
+ // Because some analysis may assume that nocaptured pointer is not
+ // returned from some special intrinsic (because function would have to
+ // be marked with returns attribute), it is crucial to use this function
+ // because it should be in sync with CaptureTracking. Not using it may
+ // cause weird miscompilations where 2 aliasing pointers are assumed to
+ // noalias.
+ if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
+ V = RP;
continue;
}
+ }
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
@@ -490,6 +522,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
SExtBits, DL, 0, AC, DT, NSW, NUW);
+ // All GEP math happens in the width of the pointer type,
+ // so we can truncate the value to 64-bits as we don't handle
+ // currently pointers larger than 64 bits and we would crash
+ // later. TODO: Make `Scale` an APInt to avoid this problem.
+ if (IndexScale.getBitWidth() > 64)
+ IndexScale = IndexScale.sextOrTrunc(64);
+
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
@@ -832,8 +871,11 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
IsMustAlias = false;
// Early return if we improved mod ref information
- if (!isModAndRefSet(Result))
+ if (!isModAndRefSet(Result)) {
+ if (isNoModRef(Result))
+ return ModRefInfo::NoModRef;
return IsMustAlias ? setMust(Result) : clearMust(Result);
+ }
}
// If the CallSite is to malloc or calloc, we can assume that it doesn't
@@ -854,7 +896,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// operands, i.e., source and destination of any given memcpy must no-alias.
// If Loc must-aliases either one of these two locations, then it necessarily
// no-aliases the other.
- if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
+ if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
AliasResult SrcAA, DestAA;
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
@@ -958,12 +1000,12 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
/// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
/// both having the exact same pointer operand.
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
- uint64_t V1Size,
+ LocationSize V1Size,
const GEPOperator *GEP2,
- uint64_t V2Size,
+ LocationSize V2Size,
const DataLayout &DL) {
- assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
@@ -1135,8 +1177,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// the highest %f1 can be is (%alloca + 3). This means %random can not be higher
// than (%alloca - 1), and so is not inbounds, a contradiction.
bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
- const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
- uint64_t ObjectAccessSize) {
+ const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
+ LocationSize ObjectAccessSize) {
// If the object access size is unknown, or the GEP isn't inbounds, bail.
if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
return false;
@@ -1153,13 +1195,13 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
DecompObject.OtherOffset;
// If the GEP has no variable indices, we know the precise offset
- // from the base, then use it. If the GEP has variable indices, we're in
- // a bit more trouble: we can't count on the constant offsets that come
- // from non-struct sources, since these can be "rewound" by a negative
- // variable offset. So use only offsets that came from structs.
+ // from the base, then use it. If the GEP has variable indices,
+ // we can't get exact GEP offset to identify pointer alias. So return
+ // false in that case.
+ if (!DecompGEP.VarIndices.empty())
+ return false;
int64_t GEPBaseOffset = DecompGEP.StructOffset;
- if (DecompGEP.VarIndices.empty())
- GEPBaseOffset += DecompGEP.OtherOffset;
+ GEPBaseOffset += DecompGEP.OtherOffset;
return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
}
@@ -1170,11 +1212,11 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
/// We know that V1 is a GEP, but we don't know anything about V2.
/// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// V2.
-AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
- const AAMDNodes &V1AAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderlyingV1,
- const Value *UnderlyingV2) {
+AliasResult
+BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
+ const AAMDNodes &V1AAInfo, const Value *V2,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderlyingV1, const Value *UnderlyingV2) {
DecomposedGEP DecompGEP1, DecompGEP2;
bool GEP1MaxLookupReached =
DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
@@ -1241,8 +1283,8 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// If we know the two GEPs are based off of the exact same pointer (and not
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
- if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
@@ -1403,9 +1445,10 @@ static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
/// against another.
-AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
+AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
+ LocationSize SISize,
const AAMDNodes &SIAAInfo,
- const Value *V2, uint64_t V2Size,
+ const Value *V2, LocationSize V2Size,
const AAMDNodes &V2AAInfo,
const Value *UnderV2) {
// If the values are Selects with the same condition, we can do a more precise
@@ -1438,9 +1481,10 @@ AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
/// another.
-AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
+AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ LocationSize V2Size,
+ const AAMDNodes &V2AAInfo,
const Value *UnderV2) {
// Track phi nodes we have visited. We use this information when we determine
// value equivalence.
@@ -1485,34 +1529,70 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
return Alias;
}
- SmallPtrSet<Value *, 4> UniqueSrc;
SmallVector<Value *, 4> V1Srcs;
bool isRecursive = false;
- for (Value *PV1 : PN->incoming_values()) {
- if (isa<PHINode>(PV1))
- // If any of the source itself is a PHI, return MayAlias conservatively
- // to avoid compile time explosion. The worst possible case is if both
- // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
- // and 'n' are the number of PHI sources.
+ if (PV) {
+ // If we have PhiValues then use it to get the underlying phi values.
+ const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
+ // If we have more phi values than the search depth then return MayAlias
+ // conservatively to avoid compile time explosion. The worst possible case
+ // is if both sides are PHI nodes. In which case, this is O(m x n) time
+ // where 'm' and 'n' are the number of PHI sources.
+ if (PhiValueSet.size() > MaxLookupSearchDepth)
return MayAlias;
-
- if (EnableRecPhiAnalysis)
- if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
- // Check whether the incoming value is a GEP that advances the pointer
- // result of this PHI node (e.g. in a loop). If this is the case, we
- // would recurse and always get a MayAlias. Handle this case specially
- // below.
- if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
- isa<ConstantInt>(PV1GEP->idx_begin())) {
- isRecursive = true;
- continue;
+ // Add the values to V1Srcs
+ for (Value *PV1 : PhiValueSet) {
+ if (EnableRecPhiAnalysis) {
+ if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
+ // Check whether the incoming value is a GEP that advances the pointer
+ // result of this PHI node (e.g. in a loop). If this is the case, we
+ // would recurse and always get a MayAlias. Handle this case specially
+ // below.
+ if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
+ isa<ConstantInt>(PV1GEP->idx_begin())) {
+ isRecursive = true;
+ continue;
+ }
}
}
-
- if (UniqueSrc.insert(PV1).second)
V1Srcs.push_back(PV1);
+ }
+ } else {
+ // If we don't have PhiInfo then just look at the operands of the phi itself
+ // FIXME: Remove this once we can guarantee that we have PhiInfo always
+ SmallPtrSet<Value *, 4> UniqueSrc;
+ for (Value *PV1 : PN->incoming_values()) {
+ if (isa<PHINode>(PV1))
+ // If any of the source itself is a PHI, return MayAlias conservatively
+ // to avoid compile time explosion. The worst possible case is if both
+ // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
+ // and 'n' are the number of PHI sources.
+ return MayAlias;
+
+ if (EnableRecPhiAnalysis)
+ if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
+ // Check whether the incoming value is a GEP that advances the pointer
+ // result of this PHI node (e.g. in a loop). If this is the case, we
+ // would recurse and always get a MayAlias. Handle this case specially
+ // below.
+ if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
+ isa<ConstantInt>(PV1GEP->idx_begin())) {
+ isRecursive = true;
+ continue;
+ }
+ }
+
+ if (UniqueSrc.insert(PV1).second)
+ V1Srcs.push_back(PV1);
+ }
}
+ // If V1Srcs is empty then that means that the phi has no underlying non-phi
+ // value. This should only be possible in blocks unreachable from the entry
+ // block, but return MayAlias just in case.
+ if (V1Srcs.empty())
+ return MayAlias;
+
// If this PHI node is recursive, set the size of the accessed memory to
// unknown to represent all the possible values the GEP could advance the
// pointer to.
@@ -1545,9 +1625,9 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
/// array references.
-AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
+AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
AAMDNodes V1AAInfo, const Value *V2,
- uint64_t V2Size, AAMDNodes V2AAInfo,
+ LocationSize V2Size, AAMDNodes V2AAInfo,
const Value *O1, const Value *O2) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are.
@@ -1555,8 +1635,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias;
// Strip off any casts if they exist.
- V1 = V1->stripPointerCastsAndBarriers();
- V2 = V2->stripPointerCastsAndBarriers();
+ V1 = V1->stripPointerCastsAndInvariantGroups();
+ V2 = V2->stripPointerCastsAndInvariantGroups();
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
// value for undef that aliases nothing in the program.
@@ -1585,10 +1665,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (O1 != O2) {
@@ -1624,10 +1704,11 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
+ bool NullIsValidLocation = NullPointerIsDefined(&F);
if ((V1Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
+ isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) ||
(V2Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O1, V2Size, DL, TLI)))
+ isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1687,8 +1768,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
if (O1 == O2)
if (V1Size != MemoryLocation::UnknownSize &&
V2Size != MemoryLocation::UnknownSize &&
- (isObjectSize(O1, V1Size, DL, TLI) ||
- isObjectSize(O2, V2Size, DL, TLI)))
+ (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) ||
+ isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation)))
return AliasCache[Locs] = PartialAlias;
// Recurse back into the best AA results we have, potentially with refined
@@ -1771,8 +1852,8 @@ void BasicAAResult::GetIndexDifference(
}
bool BasicAAResult::constantOffsetHeuristic(
- const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
- uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
+ const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size,
+ LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC,
DominatorTree *DT) {
if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
@@ -1832,10 +1913,12 @@ AnalysisKey BasicAA::Key;
BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
return BasicAAResult(F.getParent()->getDataLayout(),
+ F,
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<AssumptionAnalysis>(F),
&AM.getResult<DominatorTreeAnalysis>(F),
- AM.getCachedResult<LoopAnalysis>(F));
+ AM.getCachedResult<LoopAnalysis>(F),
+ AM.getCachedResult<PhiValuesAnalysis>(F));
}
BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
@@ -1847,12 +1930,12 @@ char BasicAAWrapperPass::ID = 0;
void BasicAAWrapperPass::anchor() {}
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
- "Basic Alias Analysis (stateless AA impl)", true, true)
+ "Basic Alias Analysis (stateless AA impl)", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
- "Basic Alias Analysis (stateless AA impl)", true, true)
+ "Basic Alias Analysis (stateless AA impl)", false, true)
FunctionPass *llvm::createBasicAAWrapperPass() {
return new BasicAAWrapperPass();
@@ -1863,10 +1946,12 @@ bool BasicAAWrapperPass::runOnFunction(Function &F) {
auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
+ auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
- Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
+ Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
ACT.getAssumptionCache(F), &DTWP.getDomTree(),
- LIWP ? &LIWP->getLoopInfo() : nullptr));
+ LIWP ? &LIWP->getLoopInfo() : nullptr,
+ PVWP ? &PVWP->getResult() : nullptr));
return false;
}
@@ -1876,11 +1961,13 @@ void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addUsedIfAvailable<PhiValuesWrapperPass>();
}
BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
return BasicAAResult(
F.getParent()->getDataLayout(),
+ F,
P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
}