summaryrefslogtreecommitdiff
path: root/lib/Analysis/BasicAliasAnalysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis/BasicAliasAnalysis.cpp')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp145
1 files changed, 95 insertions, 50 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 537813b6b752..96326347b712 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -85,15 +85,15 @@ const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
// depth otherwise the algorithm in aliasGEP will assert.
static const unsigned MaxLookupSearchDepth = 6;
-bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA,
+bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
// We don't care if this analysis itself is preserved, it has no state. But
// we need to check that the analyses it depends on have been. Note that we
// may be created without handles to some analyses and in that case don't
// depend on them.
- if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
- (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
- (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
+ if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
+ (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
+ (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)))
return true;
// Otherwise this analysis result remains valid.
@@ -132,7 +132,10 @@ static bool isNonEscapingLocalObject(const Value *V) {
/// Returns true if the pointer is one which would have been considered an
/// escape by isNonEscapingLocalObject.
static bool isEscapeSource(const Value *V) {
- if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
+ if (ImmutableCallSite(V))
+ return true;
+
+ if (isa<Argument>(V))
return true;
// The load case works because isNonEscapingLocalObject considers all
@@ -147,10 +150,12 @@ static bool isEscapeSource(const Value *V) {
/// Returns the size of the object specified by V or UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc,
bool RoundToAlign = false) {
uint64_t Size;
ObjectSizeOpts Opts;
Opts.RoundToAlign = RoundToAlign;
+ Opts.NullIsUnknownSize = NullIsValidLoc;
if (getObjectSize(V, Size, DL, &TLI, Opts))
return Size;
return MemoryLocation::UnknownSize;
@@ -160,7 +165,8 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
/// Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
+ const TargetLibraryInfo &TLI,
+ bool NullIsValidLoc) {
// Note that the meanings of the "object" are slightly different in the
// following contexts:
// c1: llvm::getObjectSize()
@@ -192,15 +198,16 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
+ /*RoundToAlign*/ true);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
}
/// Returns true if we can prove that the object specified by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
- const TargetLibraryInfo &TLI) {
- uint64_t ObjectSize = getObjectSize(V, DL, TLI);
+ const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
+ uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
}
@@ -285,6 +292,19 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
case Instruction::Shl:
V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+
+ // We're trying to linearize an expression of the kind:
+ // shl i8 -128, 36
+ // where the shift count exceeds the bitwidth of the type.
+ // We can't decompose this further (the expression would return
+ // a poison value).
+ if (Offset.getBitWidth() < RHS.getLimitedValue() ||
+ Scale.getBitWidth() < RHS.getLimitedValue()) {
+ Scale = 1;
+ Offset = 0;
+ return V;
+ }
+
Offset <<= RHS.getLimitedValue();
Scale <<= RHS.getLimitedValue();
// the semantics of nsw and nuw for left shifts don't match those of
@@ -414,11 +434,21 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
if (!GEPOp) {
- if (auto CS = ImmutableCallSite(V))
- if (const Value *RV = CS.getReturnedArgOperand()) {
- V = RV;
+ if (auto CS = ImmutableCallSite(V)) {
+ // CaptureTracking can know about special capturing properties of some
+ // intrinsics like launder.invariant.group, that can't be expressed with
+ // the attributes, but have properties like returning aliasing pointer.
+ // Because some analysis may assume that nocaptured pointer is not
+ // returned from some special intrinsic (because function would have to
+ // be marked with returns attribute), it is crucial to use this function
+ // because it should be in sync with CaptureTracking. Not using it may
+ // cause weird miscompilations where 2 aliasing pointers are assumed to
+ // noalias.
+ if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
+ V = RP;
continue;
}
+ }
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
@@ -490,6 +520,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
SExtBits, DL, 0, AC, DT, NSW, NUW);
+ // All GEP math happens in the width of the pointer type,
+ // so we can truncate the value to 64-bits as we don't handle
+ // currently pointers larger than 64 bits and we would crash
+ // later. TODO: Make `Scale` an APInt to avoid this problem.
+ if (IndexScale.getBitWidth() > 64)
+ IndexScale = IndexScale.sextOrTrunc(64);
+
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
@@ -832,8 +869,11 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
IsMustAlias = false;
// Early return if we improved mod ref information
- if (!isModAndRefSet(Result))
+ if (!isModAndRefSet(Result)) {
+ if (isNoModRef(Result))
+ return ModRefInfo::NoModRef;
return IsMustAlias ? setMust(Result) : clearMust(Result);
+ }
}
// If the CallSite is to malloc or calloc, we can assume that it doesn't
@@ -854,7 +894,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// operands, i.e., source and destination of any given memcpy must no-alias.
// If Loc must-aliases either one of these two locations, then it necessarily
// no-aliases the other.
- if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
+ if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
AliasResult SrcAA, DestAA;
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
@@ -958,12 +998,12 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
/// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
/// both having the exact same pointer operand.
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
- uint64_t V1Size,
+ LocationSize V1Size,
const GEPOperator *GEP2,
- uint64_t V2Size,
+ LocationSize V2Size,
const DataLayout &DL) {
- assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
@@ -1135,8 +1175,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// the highest %f1 can be is (%alloca + 3). This means %random can not be higher
// than (%alloca - 1), and so is not inbounds, a contradiction.
bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
- const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
- uint64_t ObjectAccessSize) {
+ const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
+ LocationSize ObjectAccessSize) {
// If the object access size is unknown, or the GEP isn't inbounds, bail.
if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
return false;
@@ -1153,13 +1193,13 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
DecompObject.OtherOffset;
// If the GEP has no variable indices, we know the precise offset
- // from the base, then use it. If the GEP has variable indices, we're in
- // a bit more trouble: we can't count on the constant offsets that come
- // from non-struct sources, since these can be "rewound" by a negative
- // variable offset. So use only offsets that came from structs.
+ // from the base, then use it. If the GEP has variable indices,
+ // we can't get exact GEP offset to identify pointer alias. So return
+ // false in that case.
+ if (!DecompGEP.VarIndices.empty())
+ return false;
int64_t GEPBaseOffset = DecompGEP.StructOffset;
- if (DecompGEP.VarIndices.empty())
- GEPBaseOffset += DecompGEP.OtherOffset;
+ GEPBaseOffset += DecompGEP.OtherOffset;
return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
}
@@ -1170,11 +1210,11 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
/// We know that V1 is a GEP, but we don't know anything about V2.
/// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// V2.
-AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
- const AAMDNodes &V1AAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
- const Value *UnderlyingV1,
- const Value *UnderlyingV2) {
+AliasResult
+BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
+ const AAMDNodes &V1AAInfo, const Value *V2,
+ LocationSize V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderlyingV1, const Value *UnderlyingV2) {
DecomposedGEP DecompGEP1, DecompGEP2;
bool GEP1MaxLookupReached =
DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
@@ -1241,8 +1281,8 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// If we know the two GEPs are based off of the exact same pointer (and not
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
- if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
@@ -1403,9 +1443,10 @@ static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
/// against another.
-AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
+AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
+ LocationSize SISize,
const AAMDNodes &SIAAInfo,
- const Value *V2, uint64_t V2Size,
+ const Value *V2, LocationSize V2Size,
const AAMDNodes &V2AAInfo,
const Value *UnderV2) {
// If the values are Selects with the same condition, we can do a more precise
@@ -1438,9 +1479,10 @@ AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
/// another.
-AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
+AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
- uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ LocationSize V2Size,
+ const AAMDNodes &V2AAInfo,
const Value *UnderV2) {
// Track phi nodes we have visited. We use this information when we determine
// value equivalence.
@@ -1545,9 +1587,9 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
/// array references.
-AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
+AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
AAMDNodes V1AAInfo, const Value *V2,
- uint64_t V2Size, AAMDNodes V2AAInfo,
+ LocationSize V2Size, AAMDNodes V2AAInfo,
const Value *O1, const Value *O2) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are.
@@ -1555,8 +1597,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias;
// Strip off any casts if they exist.
- V1 = V1->stripPointerCastsAndBarriers();
- V2 = V2->stripPointerCastsAndBarriers();
+ V1 = V1->stripPointerCastsAndInvariantGroups();
+ V2 = V2->stripPointerCastsAndInvariantGroups();
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
// value for undef that aliases nothing in the program.
@@ -1585,10 +1627,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
- if (CPN->getType()->getAddressSpace() == 0)
+ if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
return NoAlias;
if (O1 != O2) {
@@ -1624,10 +1666,11 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
+ bool NullIsValidLocation = NullPointerIsDefined(&F);
if ((V1Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
+ isObjectSmallerThan(O2, V1Size, DL, TLI, NullIsValidLocation)) ||
(V2Size != MemoryLocation::UnknownSize &&
- isObjectSmallerThan(O1, V2Size, DL, TLI)))
+ isObjectSmallerThan(O1, V2Size, DL, TLI, NullIsValidLocation)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1687,8 +1730,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
if (O1 == O2)
if (V1Size != MemoryLocation::UnknownSize &&
V2Size != MemoryLocation::UnknownSize &&
- (isObjectSize(O1, V1Size, DL, TLI) ||
- isObjectSize(O2, V2Size, DL, TLI)))
+ (isObjectSize(O1, V1Size, DL, TLI, NullIsValidLocation) ||
+ isObjectSize(O2, V2Size, DL, TLI, NullIsValidLocation)))
return AliasCache[Locs] = PartialAlias;
// Recurse back into the best AA results we have, potentially with refined
@@ -1771,8 +1814,8 @@ void BasicAAResult::GetIndexDifference(
}
bool BasicAAResult::constantOffsetHeuristic(
- const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
- uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
+ const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size,
+ LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC,
DominatorTree *DT) {
if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
@@ -1832,6 +1875,7 @@ AnalysisKey BasicAA::Key;
BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
return BasicAAResult(F.getParent()->getDataLayout(),
+ F,
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<AssumptionAnalysis>(F),
&AM.getResult<DominatorTreeAnalysis>(F),
@@ -1864,7 +1908,7 @@ bool BasicAAWrapperPass::runOnFunction(Function &F) {
auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
- Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
+ Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
ACT.getAssumptionCache(F), &DTWP.getDomTree(),
LIWP ? &LIWP->getLoopInfo() : nullptr));
@@ -1881,6 +1925,7 @@ void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
return BasicAAResult(
F.getParent()->getDataLayout(),
+ F,
P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
}