diff options
| author | Dimitry Andric <dim@FreeBSD.org> | 2017-12-18 20:10:56 +0000 | 
|---|---|---|
| committer | Dimitry Andric <dim@FreeBSD.org> | 2017-12-18 20:10:56 +0000 | 
| commit | 044eb2f6afba375a914ac9d8024f8f5142bb912e (patch) | |
| tree | 1475247dc9f9fe5be155ebd4c9069c75aadf8c20 /lib/Analysis/MemorySSA.cpp | |
| parent | eb70dddbd77e120e5d490bd8fbe7ff3f8fa81c6b (diff) | |
Notes
Diffstat (limited to 'lib/Analysis/MemorySSA.cpp')
| -rw-r--r-- | lib/Analysis/MemorySSA.cpp | 157 | 
1 files changed, 98 insertions, 59 deletions
| diff --git a/lib/Analysis/MemorySSA.cpp b/lib/Analysis/MemorySSA.cpp index 86de474c7aa9..8fe190e8bcf8 100644 --- a/lib/Analysis/MemorySSA.cpp +++ b/lib/Analysis/MemorySSA.cpp @@ -1,48 +1,63 @@ -//===-- MemorySSA.cpp - Memory SSA Builder---------------------------===// +//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//  //  //                     The LLVM Compiler Infrastructure  //  // This file is distributed under the University of Illinois Open Source  // License. See LICENSE.TXT for details.  // -//===----------------------------------------------------------------===// +//===----------------------------------------------------------------------===//  //  // This file implements the MemorySSA class.  // -//===----------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +  #include "llvm/Analysis/MemorySSA.h"  #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h"  #include "llvm/ADT/DenseSet.h"  #include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/GraphTraits.h" -#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/Hashing.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/Optional.h"  #include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SmallBitVector.h"  #include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/SmallSet.h" -#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/iterator.h" +#include "llvm/ADT/iterator_range.h"  #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/CFG.h" -#include "llvm/Analysis/GlobalsModRef.h"  #include "llvm/Analysis/IteratedDominanceFrontier.h"  #include "llvm/Analysis/MemoryLocation.h" -#include "llvm/Analysis/PHITransAddr.h"  #include "llvm/IR/AssemblyAnnotationWriter.h" -#include "llvm/IR/DataLayout.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CallSite.h"  #include "llvm/IR/Dominators.h" -#include "llvm/IR/GlobalVariable.h" -#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h"  #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h"  #include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Metadata.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/PatternMatch.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/Use.h" +#include "llvm/Pass.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h"  #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h"  #include "llvm/Support/FormattedStream.h" +#include "llvm/Support/raw_ostream.h"  #include <algorithm> +#include <cassert> +#include <iterator> +#include <memory> +#include <utility> -#define DEBUG_TYPE "memoryssa"  using namespace llvm; + +#define DEBUG_TYPE "memoryssa" +  INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,                        true)  INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) @@ -66,30 +81,34 @@ static cl::opt<bool>                      cl::desc("Verify MemorySSA in legacy printer pass."));  namespace llvm { +  /// \brief An assembly annotator class to print Memory SSA information in  /// comments.  class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {    friend class MemorySSA; +    const MemorySSA *MSSA;  public:    MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} -  virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, -                                        formatted_raw_ostream &OS) { +  void emitBasicBlockStartAnnot(const BasicBlock *BB, +                                formatted_raw_ostream &OS) override {      if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))        OS << "; " << *MA << "\n";    } -  virtual void emitInstructionAnnot(const Instruction *I, -                                    formatted_raw_ostream &OS) { +  void emitInstructionAnnot(const Instruction *I, +                            formatted_raw_ostream &OS) override {      if (MemoryAccess *MA = MSSA->getMemoryAccess(I))        OS << "; " << *MA << "\n";    }  }; -} + +} // end namespace llvm  namespace { +  /// Our current alias analysis API differentiates heavily between calls and  /// non-calls, and functions called on one usually assert on the other.  /// This class encapsulates the distinction to simplify other code that wants @@ -97,7 +116,9 @@ namespace {  /// For example, this class is used as a densemap key in the use optimizer.  class MemoryLocOrCall {  public: -  MemoryLocOrCall() : IsCall(false) {} +  bool IsCall = false; + +  MemoryLocOrCall() = default;    MemoryLocOrCall(MemoryUseOrDef *MUD)        : MemoryLocOrCall(MUD->getMemoryInst()) {}    MemoryLocOrCall(const MemoryUseOrDef *MUD) @@ -116,14 +137,13 @@ public:      }    } -  explicit MemoryLocOrCall(const MemoryLocation &Loc) -      : IsCall(false), Loc(Loc) {} +  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} -  bool IsCall;    ImmutableCallSite getCS() const {      assert(IsCall);      return CS;    } +    MemoryLocation getLoc() const {      assert(!IsCall);      return Loc; @@ -144,16 +164,20 @@ private:      MemoryLocation Loc;    };  }; -} + +} // end anonymous namespace  namespace llvm { +  template <> struct DenseMapInfo<MemoryLocOrCall> {    static inline MemoryLocOrCall getEmptyKey() {      return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());    } +    static inline MemoryLocOrCall getTombstoneKey() {      return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());    } +    static unsigned getHashValue(const MemoryLocOrCall &MLOC) {      if (MLOC.IsCall)        return hash_combine(MLOC.IsCall, @@ -162,6 +186,7 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {      return hash_combine(          MLOC.IsCall, DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));    } +    static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {      return LHS == RHS;    } @@ -169,6 +194,8 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {  enum class Reorderability { Always, IfNoAlias, Never }; +} // end namespace llvm +  /// This does one-way checks to see if Use could theoretically be hoisted above  /// MayClobber. This will not check the other way around.  /// @@ -235,7 +262,7 @@ static bool instructionClobbersQuery(MemoryDef *MD,    if (UseCS) {      ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); -    return I != MRI_NoModRef; +    return isModOrRefSet(I);    }    if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) { @@ -251,7 +278,7 @@ static bool instructionClobbersQuery(MemoryDef *MD,      }    } -  return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod; +  return isModSet(AA.getModRefInfo(DefInst, UseLoc));  }  static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU, @@ -271,22 +298,21 @@ bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,                                          AliasAnalysis &AA) {    return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);  } -}  namespace { +  struct UpwardsMemoryQuery {    // True if our original query started off as a call -  bool IsCall; +  bool IsCall = false;    // The pointer location we started the query with. This will be empty if    // IsCall is true.    MemoryLocation StartingLoc;    // This is the instruction we were querying about. -  const Instruction *Inst; +  const Instruction *Inst = nullptr;    // The MemoryAccess we actually got called with, used to test local domination -  const MemoryAccess *OriginalAccess; +  const MemoryAccess *OriginalAccess = nullptr; -  UpwardsMemoryQuery() -      : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {} +  UpwardsMemoryQuery() = default;    UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)        : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { @@ -295,6 +321,8 @@ struct UpwardsMemoryQuery {    }  }; +} // end anonymous namespace +  static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,                             AliasAnalysis &AA) {    Instruction *Inst = MD->getMemoryInst(); @@ -394,6 +422,8 @@ checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,           "ClobberAt never acted as a clobber");  } +namespace { +  /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up  /// in one class.  class ClobberWalker { @@ -569,7 +599,7 @@ class ClobberWalker {    struct generic_def_path_iterator        : public iterator_facade_base<generic_def_path_iterator<T, Walker>,                                      std::forward_iterator_tag, T *> { -    generic_def_path_iterator() : W(nullptr), N(None) {} +    generic_def_path_iterator() = default;      generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}      T &operator*() const { return curNode(); } @@ -588,8 +618,8 @@ class ClobberWalker {    private:      T &curNode() const { return W->Paths[*N]; } -    Walker *W; -    Optional<ListIndex> N; +    Walker *W = nullptr; +    Optional<ListIndex> N = None;    };    using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; @@ -664,7 +694,7 @@ class ClobberWalker {      };      MemoryPhi *Current = Phi; -    while (1) { +    while (true) {        assert(!MSSA.isLiveOnEntryDef(Current) &&               "liveOnEntry wasn't treated as a clobber?"); @@ -842,30 +872,33 @@ struct RenamePassData {    RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,                   MemoryAccess *M)        : DTN(D), ChildIt(It), IncomingVal(M) {} +    void swap(RenamePassData &RHS) {      std::swap(DTN, RHS.DTN);      std::swap(ChildIt, RHS.ChildIt);      std::swap(IncomingVal, RHS.IncomingVal);    }  }; -} // anonymous namespace + +} // end anonymous namespace  namespace llvm { +  /// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no  /// longer does caching on its own,  /// but the name has been retained for the moment.  class MemorySSA::CachingWalker final : public MemorySSAWalker {    ClobberWalker Walker; -  bool AutoResetWalker; +  bool AutoResetWalker = true;    MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); -  void verifyRemoved(MemoryAccess *);  public:    CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); -  ~CachingWalker() override; +  ~CachingWalker() override = default;    using MemorySSAWalker::getClobberingMemoryAccess; +    MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;    MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,                                            const MemoryLocation &) override; @@ -884,6 +917,8 @@ public:    }  }; +} // end namespace llvm +  void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,                                      bool RenameAllUses) {    // Pass through values to our successors @@ -1032,17 +1067,20 @@ MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {    auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));    if (Res.second) -    Res.first->second = make_unique<AccessList>(); +    Res.first->second = llvm::make_unique<AccessList>();    return Res.first->second.get();  } +  MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {    auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));    if (Res.second) -    Res.first->second = make_unique<DefsList>(); +    Res.first->second = llvm::make_unique<DefsList>();    return Res.first->second.get();  } +namespace llvm { +  /// This class is a batch walker of all MemoryUse's in the program, and points  /// their defining access at the thing that actually clobbers them.  Because it  /// is a batch walker that touches everything, it does not operate like the @@ -1077,15 +1115,19 @@ private:      unsigned long LastKill;      bool LastKillValid;    }; +    void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,                             SmallVectorImpl<MemoryAccess *> &,                             DenseMap<MemoryLocOrCall, MemlocStackInfo> &); +    MemorySSA *MSSA;    MemorySSAWalker *Walker;    AliasAnalysis *AA;    DominatorTree *DT;  }; +} // end namespace llvm +  /// Optimize the uses in a given block This is basically the SSA renaming  /// algorithm, with one caveat: We are able to use a single stack for all  /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is @@ -1281,8 +1323,9 @@ void MemorySSA::buildMemorySSA() {    // semantics do *not* imply that something with no immediate uses can simply    // be removed.    BasicBlock &StartingPoint = F.getEntryBlock(); -  LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, -                                          &StartingPoint, NextID++); +  LiveOnEntryDef = +      llvm::make_unique<MemoryDef>(F.getContext(), nullptr, nullptr, +                                   &StartingPoint, NextID++);    DenseMap<const BasicBlock *, unsigned int> BBNumbers;    unsigned NextBBNum = 0; @@ -1343,7 +1386,7 @@ MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {    if (Walker)      return Walker.get(); -  Walker = make_unique<CachingWalker>(this, AA, DT); +  Walker = llvm::make_unique<CachingWalker>(this, AA, DT);    return Walker.get();  } @@ -1462,6 +1505,7 @@ static inline bool isOrdered(const Instruction *I) {    }    return false;  } +  /// \brief Helper function to create new memory accesses  MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {    // The assume intrinsic has a control dependency which we model by claiming @@ -1473,7 +1517,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {        return nullptr;    // Find out what affect this instruction has on memory. -  ModRefInfo ModRef = AA->getModRefInfo(I); +  ModRefInfo ModRef = AA->getModRefInfo(I, None);    // The isOrdered check is used to ensure that volatiles end up as defs    // (atomics end up as ModRef right now anyway).  Until we separate the    // ordering chain from the memory chain, this enables people to see at least @@ -1482,8 +1526,8 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {    // Separate memory aliasing and ordering into two different chains so that we    // can precisely represent both "what memory will this read/write/is clobbered    // by" and "what instructions can I move this past". -  bool Def = bool(ModRef & MRI_Mod) || isOrdered(I); -  bool Use = bool(ModRef & MRI_Ref); +  bool Def = isModSet(ModRef) || isOrdered(I); +  bool Use = isRefSet(ModRef);    // It's possible for an instruction to not modify memory at all. During    // construction, we ignore them. @@ -1675,7 +1719,6 @@ void MemorySSA::verifyDomination(Function &F) const {  /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use  /// appears in the use list of \p Def. -  void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {  #ifndef NDEBUG    // The live on entry use may cause us to get a NULL def here @@ -1739,7 +1782,6 @@ void MemorySSA::renumberBlock(const BasicBlock *B) const {  /// \returns True if \p Dominator dominates \p Dominatee.  bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,                                   const MemoryAccess *Dominatee) const { -    const BasicBlock *DominatorBlock = Dominator->getBlock();    assert((DominatorBlock == Dominatee->getBlock()) && @@ -1887,7 +1929,7 @@ MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,                                                   FunctionAnalysisManager &AM) {    auto &DT = AM.getResult<DominatorTreeAnalysis>(F);    auto &AA = AM.getResult<AAManager>(F); -  return MemorySSAAnalysis::Result(make_unique<MemorySSA>(F, &AA, &DT)); +  return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));  }  PreservedAnalyses MemorySSAPrinterPass::run(Function &F, @@ -1936,9 +1978,7 @@ MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}  MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,                                          DominatorTree *D) -    : MemorySSAWalker(M), Walker(*M, *A, *D), AutoResetWalker(true) {} - -MemorySSA::CachingWalker::~CachingWalker() {} +    : MemorySSAWalker(M), Walker(*M, *A, *D) {}  void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) @@ -2059,7 +2099,6 @@ MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(      return Use->getDefiningAccess();    return StartingAccess;  } -} // namespace llvm  void MemoryPhi::deleteMe(DerivedUser *Self) {    delete static_cast<MemoryPhi *>(Self); | 
