diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-12-18 20:10:56 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-12-18 20:10:56 +0000 |
commit | 044eb2f6afba375a914ac9d8024f8f5142bb912e (patch) | |
tree | 1475247dc9f9fe5be155ebd4c9069c75aadf8c20 /lib/Analysis/BasicAliasAnalysis.cpp | |
parent | eb70dddbd77e120e5d490bd8fbe7ff3f8fa81c6b (diff) |
Notes
Diffstat (limited to 'lib/Analysis/BasicAliasAnalysis.cpp')
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 103 |
1 files changed, 64 insertions, 39 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index e682a644ef2c..81b9f842249e 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -14,6 +14,8 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" @@ -23,21 +25,40 @@ #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/MemoryLocation.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Metadata.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/User.h" +#include "llvm/IR/Value.h" #include "llvm/Pass.h" -#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/KnownBits.h" -#include <algorithm> +#include <cassert> +#include <cstdint> +#include <cstdlib> +#include <utility> #define DEBUG_TYPE "basicaa" @@ -223,7 +244,6 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { - // If we've been called recursively, then Offset and Scale will be wider // than the BOp operands. We'll always zext it here as we'll process sign // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). @@ -574,7 +594,6 @@ bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, // Otherwise be conservative. Visited.clear(); return AAResultBase::pointsToConstantMemory(Loc, OrLocal); - } while (!Worklist.empty() && --MaxLookup); Visited.clear(); @@ -598,6 +617,10 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) { if (CS.onlyAccessesArgMemory()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); + else if (CS.onlyAccessesInaccessibleMemory()) + Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); + else if (CS.onlyAccessesInaccessibleMemOrArgMem()) + Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); // If CS has operand bundles then aliasing attributes from the function it // calls do not directly apply to the CallSite. This can be made more @@ -662,16 +685,15 @@ static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - // Checking for known builtin intrinsics and target library functions. if (isWriteOnlyParam(CS, ArgIdx, TLI)) - return MRI_Mod; + return ModRefInfo::Mod; if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) - return MRI_Ref; + return ModRefInfo::Ref; if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getArgModRefInfo(CS, ArgIdx); } @@ -748,7 +770,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, if (isa<AllocaInst>(Object)) if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) if (CI->isTailCall()) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If the pointer is to a locally allocated object that does not escape, // then the call can not mod/ref the pointer unless the call takes the pointer @@ -758,7 +780,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // Optimistically assume that call doesn't touch Object and check this // assumption in the following loop. - ModRefInfo Result = MRI_NoModRef; + ModRefInfo Result = ModRefInfo::NoModRef; unsigned OperandNo = 0; for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); @@ -787,21 +809,21 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // Operand aliases 'Object', but call doesn't modify it. Strengthen // initial assumption and keep looking in case if there are more aliases. if (CS.onlyReadsMemory(OperandNo)) { - Result = static_cast<ModRefInfo>(Result | MRI_Ref); + Result = setRef(Result); continue; } // Operand aliases 'Object' but call only writes into it. if (CS.doesNotReadMemory(OperandNo)) { - Result = static_cast<ModRefInfo>(Result | MRI_Mod); + Result = setMod(Result); continue; } // This operand aliases 'Object' and call reads and writes into it. - Result = MRI_ModRef; + Result = ModRefInfo::ModRef; break; } // Early return if we improved mod ref information - if (Result != MRI_ModRef) + if (!isModAndRefSet(Result)) return Result; } @@ -810,13 +832,13 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // routines do not read values visible in the IR. TODO: Consider special // casing realloc and strdup routines which access only their arguments as // well. Or alternatively, replace all of this with inaccessiblememonly once - // that's implemented fully. + // that's implemented fully. auto *Inst = CS.getInstruction(); if (isMallocOrCallocLikeFn(Inst, &TLI)) { // Be conservative if the accessed pointer may alias the allocation - // fallback to the generic handling below. if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // The semantics of memcpy intrinsics forbid overlap between their respective @@ -829,18 +851,18 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc)) == MustAlias) // Loc is exactly the memcpy source thus disjoint from memcpy dest. - return MRI_Ref; + return ModRefInfo::Ref; if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc)) == MustAlias) // The converse case. - return MRI_Mod; + return ModRefInfo::Mod; // It's also possible for Loc to alias both src and dest, or neither. - ModRefInfo rv = MRI_NoModRef; + ModRefInfo rv = ModRefInfo::NoModRef; if (SrcAA != NoAlias) - rv = static_cast<ModRefInfo>(rv | MRI_Ref); + rv = setRef(rv); if (DestAA != NoAlias) - rv = static_cast<ModRefInfo>(rv | MRI_Mod); + rv = setMod(rv); return rv; } @@ -848,7 +870,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // proper control dependencies will be maintained, it never aliases any // particular memory location. if (isIntrinsicCall(CS, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mods any @@ -858,7 +880,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // heap state at the point the guard is issued needs to be consistent in case // the guard invokes the "deopt" continuation. if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) - return MRI_Ref; + return ModRefInfo::Ref; // Like assumes, invariant.start intrinsics were also marked as arbitrarily // writing so that proper control dependencies are maintained but they never @@ -884,7 +906,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // rules of invariant.start) and print 40, while the first program always // prints 50. if (isIntrinsicCall(CS, Intrinsic::invariant_start)) - return MRI_Ref; + return ModRefInfo::Ref; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS, Loc); @@ -897,7 +919,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1, // particular memory location. if (isIntrinsicCall(CS1, Intrinsic::assume) || isIntrinsicCall(CS2, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mod any @@ -911,10 +933,14 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1, // possibilities for guard intrinsics. if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) - return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS2))) + ? ModRefInfo::Ref + : ModRefInfo::NoModRef; if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) - return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS1))) + ? ModRefInfo::Mod + : ModRefInfo::NoModRef; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS1, CS2); @@ -927,7 +953,6 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL) { - assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() == GEP2->getPointerOperand()->stripPointerCastsAndBarriers() && GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && @@ -1196,8 +1221,10 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, // If we get a No or May, then return it immediately, no amount of analysis // will improve this situation. - if (BaseAlias != MustAlias) + if (BaseAlias != MustAlias) { + assert(BaseAlias == NoAlias || BaseAlias == MayAlias); return BaseAlias; + } // Otherwise, we have a MustAlias. Since the base pointers alias each other // exactly, see if the computed offset from the common pointer tells us @@ -1236,13 +1263,15 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), V2, MemoryLocation::UnknownSize, V2AAInfo, nullptr, UnderlyingV2); - if (R != MustAlias) + if (R != MustAlias) { // If V2 may alias GEP base pointer, conservatively returns MayAlias. // If V2 is known not to alias GEP base pointer, then the two values // cannot alias per GEP semantics: "Any memory access must be done through // a pointer value associated with an address range of the memory access, // otherwise the behavior is undefined.". + assert(R == NoAlias || R == MayAlias); return R; + } // If the max search depth is reached the result is undefined if (GEP1MaxLookupReached) @@ -1569,11 +1598,6 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) return NoAlias; - // Most objects can't alias null. - if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || - (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) - return NoAlias; - // If one pointer is the result of a call/invoke or load and the other is a // non-escaping local object within the same function, then we know the // object couldn't escape to a point where the call could return it. @@ -1652,9 +1676,9 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, // If both pointers are pointing into the same object and one of them // accesses the entire object, then the accesses must overlap in some way. if (O1 == O2) - if ((V1Size != MemoryLocation::UnknownSize && - isObjectSize(O1, V1Size, DL, TLI)) || - (V2Size != MemoryLocation::UnknownSize && + if (V1Size != MemoryLocation::UnknownSize && + V2Size != MemoryLocation::UnknownSize && + (isObjectSize(O1, V1Size, DL, TLI) || isObjectSize(O2, V2Size, DL, TLI))) return AliasCache[Locs] = PartialAlias; @@ -1810,6 +1834,7 @@ BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { } char BasicAAWrapperPass::ID = 0; + void BasicAAWrapperPass::anchor() {} INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", |