aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/CFLAndersAliasAnalysis.cpp15
-rw-r--r--lib/Analysis/CFLSteensAliasAnalysis.cpp29
-rw-r--r--lib/Analysis/InlineCost.cpp33
-rw-r--r--lib/Analysis/IteratedDominanceFrontier.cpp12
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp4
-rw-r--r--lib/Analysis/OptimizationDiagnosticInfo.cpp13
-rw-r--r--lib/Analysis/RegionInfo.cpp40
-rw-r--r--lib/Analysis/ScalarEvolution.cpp243
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp9
-rw-r--r--lib/Analysis/TypeBasedAliasAnalysis.cpp20
10 files changed, 206 insertions, 212 deletions
diff --git a/lib/Analysis/CFLAndersAliasAnalysis.cpp b/lib/Analysis/CFLAndersAliasAnalysis.cpp
index ddd5123d0eff..0de7ad98af46 100644
--- a/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -68,17 +68,6 @@ CFLAndersAAResult::CFLAndersAAResult(CFLAndersAAResult &&RHS)
: AAResultBase(std::move(RHS)), TLI(RHS.TLI) {}
CFLAndersAAResult::~CFLAndersAAResult() {}
-static const Function *parentFunctionOfValue(const Value *Val) {
- if (auto *Inst = dyn_cast<Instruction>(Val)) {
- auto *Bb = Inst->getParent();
- return Bb->getParent();
- }
-
- if (auto *Arg = dyn_cast<Argument>(Val))
- return Arg->getParent();
- return nullptr;
-}
-
namespace {
enum class MatchState : uint8_t {
@@ -789,10 +778,10 @@ void CFLAndersAAResult::scan(const Function &Fn) {
// resize and invalidating the reference returned by operator[]
auto FunInfo = buildInfoFrom(Fn);
Cache[&Fn] = std::move(FunInfo);
- Handles.push_front(FunctionHandle(const_cast<Function *>(&Fn), this));
+ Handles.emplace_front(const_cast<Function *>(&Fn), this);
}
-void CFLAndersAAResult::evict(const Function &Fn) { Cache.erase(&Fn); }
+void CFLAndersAAResult::evict(const Function *Fn) { Cache.erase(Fn); }
const Optional<CFLAndersAAResult::FunctionInfo> &
CFLAndersAAResult::ensureCached(const Function &Fn) {
diff --git a/lib/Analysis/CFLSteensAliasAnalysis.cpp b/lib/Analysis/CFLSteensAliasAnalysis.cpp
index 6e4263920e58..adbdd82012a3 100644
--- a/lib/Analysis/CFLSteensAliasAnalysis.cpp
+++ b/lib/Analysis/CFLSteensAliasAnalysis.cpp
@@ -88,19 +88,6 @@ const StratifiedIndex StratifiedLink::SetSentinel =
//===----------------------------------------------------------------------===//
/// Determines whether it would be pointless to add the given Value to our sets.
-static bool canSkipAddingToSets(Value *Val);
-
-static Optional<Function *> parentFunctionOfValue(Value *Val) {
- if (auto *Inst = dyn_cast<Instruction>(Val)) {
- auto *Bb = Inst->getParent();
- return Bb->getParent();
- }
-
- if (auto *Arg = dyn_cast<Argument>(Val))
- return Arg->getParent();
- return None;
-}
-
static bool canSkipAddingToSets(Value *Val) {
// Constants can share instances, which may falsely unify multiple
// sets, e.g. in
@@ -245,7 +232,7 @@ void CFLSteensAAResult::scan(Function *Fn) {
auto FunInfo = buildSetsFrom(Fn);
Cache[Fn] = std::move(FunInfo);
- Handles.push_front(FunctionHandle(Fn, this));
+ Handles.emplace_front(Fn, this);
}
void CFLSteensAAResult::evict(Function *Fn) { Cache.erase(Fn); }
@@ -281,9 +268,9 @@ AliasResult CFLSteensAAResult::query(const MemoryLocation &LocA,
return NoAlias;
Function *Fn = nullptr;
- auto MaybeFnA = parentFunctionOfValue(ValA);
- auto MaybeFnB = parentFunctionOfValue(ValB);
- if (!MaybeFnA.hasValue() && !MaybeFnB.hasValue()) {
+ Function *MaybeFnA = const_cast<Function *>(parentFunctionOfValue(ValA));
+ Function *MaybeFnB = const_cast<Function *>(parentFunctionOfValue(ValB));
+ if (!MaybeFnA && !MaybeFnB) {
// The only times this is known to happen are when globals + InlineAsm are
// involved
DEBUG(dbgs()
@@ -291,12 +278,12 @@ AliasResult CFLSteensAAResult::query(const MemoryLocation &LocA,
return MayAlias;
}
- if (MaybeFnA.hasValue()) {
- Fn = *MaybeFnA;
- assert((!MaybeFnB.hasValue() || *MaybeFnB == *MaybeFnA) &&
+ if (MaybeFnA) {
+ Fn = MaybeFnA;
+ assert((!MaybeFnB || MaybeFnB == MaybeFnA) &&
"Interprocedural queries not supported");
} else {
- Fn = *MaybeFnB;
+ Fn = MaybeFnB;
}
assert(Fn != nullptr);
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 77ad6f1e166f..35693666aa03 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -66,6 +66,12 @@ static cl::opt<int>
cl::ZeroOrMore,
cl::desc("Threshold for hot callsites "));
+static cl::opt<int> ColdCallSiteRelFreq(
+ "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
+ cl::desc("Maxmimum block frequency, expressed as a percentage of caller's "
+ "entry frequency, for a callsite to be cold in the absence of "
+ "profile information."));
+
namespace {
class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
@@ -172,6 +178,9 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
/// Return true if size growth is allowed when inlining the callee at CS.
bool allowSizeGrowth(CallSite CS);
+ /// Return true if \p CS is a cold callsite.
+ bool isColdCallSite(CallSite CS, BlockFrequencyInfo *CallerBFI);
+
// Custom analysis routines.
bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues);
@@ -631,6 +640,26 @@ bool CallAnalyzer::allowSizeGrowth(CallSite CS) {
return true;
}
+bool CallAnalyzer::isColdCallSite(CallSite CS, BlockFrequencyInfo *CallerBFI) {
+ // If global profile summary is available, then callsite's coldness is
+ // determined based on that.
+ if (PSI->hasProfileSummary())
+ return PSI->isColdCallSite(CS, CallerBFI);
+ if (!CallerBFI)
+ return false;
+
+ // In the absence of global profile summary, determine if the callsite is cold
+ // relative to caller's entry. We could potentially cache the computation of
+ // scaled entry frequency, but the added complexity is not worth it unless
+ // this scaling shows up high in the profiles.
+ const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
+ auto CallSiteBB = CS.getInstruction()->getParent();
+ auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
+ auto CallerEntryFreq =
+ CallerBFI->getBlockFreq(&(CS.getCaller()->getEntryBlock()));
+ return CallSiteFreq < CallerEntryFreq * ColdProb;
+}
+
void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
// If no size growth is allowed for this inlining, set Threshold to 0.
if (!allowSizeGrowth(CS)) {
@@ -676,7 +705,7 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
if (PSI->isHotCallSite(CS, CallerBFI)) {
DEBUG(dbgs() << "Hot callsite.\n");
Threshold = Params.HotCallSiteThreshold.getValue();
- } else if (PSI->isColdCallSite(CS, CallerBFI)) {
+ } else if (isColdCallSite(CS, CallerBFI)) {
DEBUG(dbgs() << "Cold callsite.\n");
Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
}
@@ -1010,7 +1039,7 @@ bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
if (isa<ConstantInt>(V))
return true;
- // Assume the most general case where the swith is lowered into
+ // Assume the most general case where the switch is lowered into
// either a jump table, bit test, or a balanced binary tree consisting of
// case clusters without merging adjacent clusters with the same
// destination. We do not consider the switches that are lowered with a mix
diff --git a/lib/Analysis/IteratedDominanceFrontier.cpp b/lib/Analysis/IteratedDominanceFrontier.cpp
index 2a736ec0379c..0e02850df349 100644
--- a/lib/Analysis/IteratedDominanceFrontier.cpp
+++ b/lib/Analysis/IteratedDominanceFrontier.cpp
@@ -20,14 +20,6 @@ namespace llvm {
template <class NodeTy>
void IDFCalculator<NodeTy>::calculate(
SmallVectorImpl<BasicBlock *> &PHIBlocks) {
- // If we haven't computed dominator tree levels, do so now.
- if (DomLevels.empty()) {
- for (auto DFI = df_begin(DT.getRootNode()), DFE = df_end(DT.getRootNode());
- DFI != DFE; ++DFI) {
- DomLevels[*DFI] = DFI.getPathLength() - 1;
- }
- }
-
// Use a priority queue keyed on dominator tree level so that inserted nodes
// are handled from the bottom of the dominator tree upwards.
typedef std::pair<DomTreeNode *, unsigned> DomTreeNodePair;
@@ -37,7 +29,7 @@ void IDFCalculator<NodeTy>::calculate(
for (BasicBlock *BB : *DefBlocks) {
if (DomTreeNode *Node = DT.getNode(BB))
- PQ.push(std::make_pair(Node, DomLevels.lookup(Node)));
+ PQ.push({Node, Node->getLevel()});
}
SmallVector<DomTreeNode *, 32> Worklist;
@@ -72,7 +64,7 @@ void IDFCalculator<NodeTy>::calculate(
if (SuccNode->getIDom() == Node)
continue;
- unsigned SuccLevel = DomLevels.lookup(SuccNode);
+ const unsigned SuccLevel = SuccNode->getLevel();
if (SuccLevel > RootLevel)
continue;
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 7983d62c2f7a..f88d54b21e1e 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -400,8 +400,8 @@ static APInt getSizeWithOverflow(const SizeOffsetType &Data) {
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
-/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
-/// byval arguments, and global variables.
+/// If RoundToAlign is true, then Size is rounded up to the alignment of
+/// allocas, byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) {
ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts);
diff --git a/lib/Analysis/OptimizationDiagnosticInfo.cpp b/lib/Analysis/OptimizationDiagnosticInfo.cpp
index e38e530c052d..eb259fd7a384 100644
--- a/lib/Analysis/OptimizationDiagnosticInfo.cpp
+++ b/lib/Analysis/OptimizationDiagnosticInfo.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
OptimizationRemarkEmitter::OptimizationRemarkEmitter(const Function *F)
: F(F), BFI(nullptr) {
- if (!F->getContext().getDiagnosticHotnessRequested())
+ if (!F->getContext().getDiagnosticsHotnessRequested())
return;
// First create a dominator tree.
@@ -155,6 +155,13 @@ void OptimizationRemarkEmitter::emit(
DiagnosticInfoOptimizationBase &OptDiagBase) {
auto &OptDiag = cast<DiagnosticInfoIROptimization>(OptDiagBase);
computeHotness(OptDiag);
+ // If a diagnostic has a hotness value, then only emit it if its hotness
+ // meets the threshold.
+ if (OptDiag.getHotness() &&
+ *OptDiag.getHotness() <
+ F->getContext().getDiagnosticsHotnessThreshold()) {
+ return;
+ }
yaml::Output *Out = F->getContext().getDiagnosticsOutputFile();
if (Out) {
@@ -176,7 +183,7 @@ OptimizationRemarkEmitterWrapperPass::OptimizationRemarkEmitterWrapperPass()
bool OptimizationRemarkEmitterWrapperPass::runOnFunction(Function &Fn) {
BlockFrequencyInfo *BFI;
- if (Fn.getContext().getDiagnosticHotnessRequested())
+ if (Fn.getContext().getDiagnosticsHotnessRequested())
BFI = &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
else
BFI = nullptr;
@@ -198,7 +205,7 @@ OptimizationRemarkEmitterAnalysis::run(Function &F,
FunctionAnalysisManager &AM) {
BlockFrequencyInfo *BFI;
- if (F.getContext().getDiagnosticHotnessRequested())
+ if (F.getContext().getDiagnosticsHotnessRequested())
BFI = &AM.getResult<BlockFrequencyAnalysis>(F);
else
BFI = nullptr;
diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp
index 63ef8d28d44a..900487323005 100644
--- a/lib/Analysis/RegionInfo.cpp
+++ b/lib/Analysis/RegionInfo.cpp
@@ -10,28 +10,29 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/RegionInfo.h"
-#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/RegionInfoImpl.h"
-#include "llvm/Analysis/RegionIterator.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
#ifndef NDEBUG
#include "llvm/Analysis/RegionPrinter.h"
#endif
+#include "llvm/Analysis/RegionInfoImpl.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "region"
namespace llvm {
+
template class RegionBase<RegionTraits<Function>>;
template class RegionNodeBase<RegionTraits<Function>>;
template class RegionInfoBase<RegionTraits<Function>>;
-}
+
+} // end namespace llvm
STATISTIC(numRegions, "The # of regions");
STATISTIC(numSimpleRegions, "The # of simple regions");
@@ -44,7 +45,6 @@ VerifyRegionInfoX(
cl::location(RegionInfoBase<RegionTraits<Function>>::VerifyRegionInfo),
cl::desc("Verify region info (time consuming)"));
-
static cl::opt<Region::PrintStyle, true> printStyleX("print-region-style",
cl::location(RegionInfo::printStyle),
cl::Hidden,
@@ -56,7 +56,6 @@ static cl::opt<Region::PrintStyle, true> printStyleX("print-region-style",
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator")));
-
//===----------------------------------------------------------------------===//
// Region implementation
//
@@ -68,20 +67,15 @@ Region::Region(BasicBlock *Entry, BasicBlock *Exit,
}
-Region::~Region() { }
+Region::~Region() = default;
//===----------------------------------------------------------------------===//
// RegionInfo implementation
//
-RegionInfo::RegionInfo() :
- RegionInfoBase<RegionTraits<Function>>() {
-
-}
+RegionInfo::RegionInfo() = default;
-RegionInfo::~RegionInfo() {
-
-}
+RegionInfo::~RegionInfo() = default;
bool RegionInfo::invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &) {
@@ -126,9 +120,7 @@ RegionInfoPass::RegionInfoPass() : FunctionPass(ID) {
initializeRegionInfoPassPass(*PassRegistry::getPassRegistry());
}
-RegionInfoPass::~RegionInfoPass() {
-
-}
+RegionInfoPass::~RegionInfoPass() = default;
bool RegionInfoPass::runOnFunction(Function &F) {
releaseMemory();
@@ -181,10 +173,12 @@ INITIALIZE_PASS_END(RegionInfoPass, "regions",
// the link time optimization.
namespace llvm {
+
FunctionPass *createRegionInfoPass() {
return new RegionInfoPass();
}
-}
+
+} // end namespace llvm
//===----------------------------------------------------------------------===//
// RegionInfoAnalysis implementation
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 73a95ec405c7..678ad3af5e85 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -157,6 +157,11 @@ static cl::opt<unsigned> MaxConstantEvolvingDepth(
"scalar-evolution-max-constant-evolving-depth", cl::Hidden,
cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
+static cl::opt<unsigned>
+ MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden,
+ cl::desc("Maximum depth of recursive SExt/ZExt"),
+ cl::init(8));
+
//===----------------------------------------------------------------------===//
// SCEV class definitions
//===----------------------------------------------------------------------===//
@@ -1285,8 +1290,8 @@ static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
namespace {
struct ExtendOpTraitsBase {
- typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
- const SCEV *, Type *, ScalarEvolution::ExtendCacheTy &Cache);
+ typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
+ unsigned);
};
// Used to make code generic over signed and unsigned overflow.
@@ -1315,9 +1320,8 @@ struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
}
};
-const ExtendOpTraitsBase::GetExtendExprTy
- ExtendOpTraits<SCEVSignExtendExpr>::GetExtendExpr =
- &ScalarEvolution::getSignExtendExprCached;
+const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
+ SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
template <>
struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
@@ -1332,9 +1336,8 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
}
};
-const ExtendOpTraitsBase::GetExtendExprTy
- ExtendOpTraits<SCEVZeroExtendExpr>::GetExtendExpr =
- &ScalarEvolution::getZeroExtendExprCached;
+const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
+ SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
}
// The recurrence AR has been shown to have no signed/unsigned wrap or something
@@ -1346,8 +1349,7 @@ const ExtendOpTraitsBase::GetExtendExprTy
// "sext/zext(PostIncAR)"
template <typename ExtendOpTy>
static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
- ScalarEvolution *SE,
- ScalarEvolution::ExtendCacheTy &Cache) {
+ ScalarEvolution *SE, unsigned Depth) {
auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
@@ -1394,9 +1396,9 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
const SCEV *OperandExtendedStart =
- SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Cache),
- (SE->*GetExtendExpr)(Step, WideTy, Cache));
- if ((SE->*GetExtendExpr)(Start, WideTy, Cache) == OperandExtendedStart) {
+ SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
+ (SE->*GetExtendExpr)(Step, WideTy, Depth));
+ if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
if (PreAR && AR->getNoWrapFlags(WrapType)) {
// If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
// or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
@@ -1422,16 +1424,16 @@ static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
template <typename ExtendOpTy>
static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
ScalarEvolution *SE,
- ScalarEvolution::ExtendCacheTy &Cache) {
+ unsigned Depth) {
auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
- const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Cache);
+ const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
if (!PreStart)
- return (SE->*GetExtendExpr)(AR->getStart(), Ty, Cache);
+ return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
- return SE->getAddExpr(
- (SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, Cache),
- (SE->*GetExtendExpr)(PreStart, Ty, Cache));
+ return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
+ Depth),
+ (SE->*GetExtendExpr)(PreStart, Ty, Depth));
}
// Try to prove away overflow by looking at "nearby" add recurrences. A
@@ -1511,31 +1513,8 @@ bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
return false;
}
-const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty) {
- // Use the local cache to prevent exponential behavior of
- // getZeroExtendExprImpl.
- ExtendCacheTy Cache;
- return getZeroExtendExprCached(Op, Ty, Cache);
-}
-
-/// Query \p Cache before calling getZeroExtendExprImpl. If there is no
-/// related entry in the \p Cache, call getZeroExtendExprImpl and save
-/// the result in the \p Cache.
-const SCEV *ScalarEvolution::getZeroExtendExprCached(const SCEV *Op, Type *Ty,
- ExtendCacheTy &Cache) {
- auto It = Cache.find({Op, Ty});
- if (It != Cache.end())
- return It->second;
- const SCEV *ZExt = getZeroExtendExprImpl(Op, Ty, Cache);
- auto InsertResult = Cache.insert({{Op, Ty}, ZExt});
- assert(InsertResult.second && "Expect the key was not in the cache");
- (void)InsertResult;
- return ZExt;
-}
-
-/// The real implementation of getZeroExtendExpr.
-const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
- ExtendCacheTy &Cache) {
+const SCEV *
+ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1545,11 +1524,11 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
- return getZeroExtendExprCached(SZ->getOperand(), Ty, Cache);
+ return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
// Before doing any expensive analysis, check to see if we've already
// computed a SCEV for this Op and Ty.
@@ -1559,6 +1538,12 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
ID.AddPointer(Ty);
void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ if (Depth > MaxExtDepth) {
+ SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+ }
// zext(trunc(x)) --> zext(x) or x or trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
@@ -1593,8 +1578,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// we don't need to do any further analysis.
if (AR->hasNoUnsignedWrap())
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
+ getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
@@ -1618,22 +1603,29 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
- const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *ZAdd =
- getZeroExtendExprCached(getAddExpr(Start, ZMul), WideTy, Cache);
- const SCEV *WideStart = getZeroExtendExprCached(Start, WideTy, Cache);
+ const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
+ SCEV::FlagAnyWrap, Depth + 1);
+ const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
+ SCEV::FlagAnyWrap,
+ Depth + 1),
+ WideTy, Depth + 1);
+ const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
const SCEV *WideMaxBECount =
- getZeroExtendExprCached(CastedMaxBECount, WideTy, Cache);
- const SCEV *OperandExtendedAdd = getAddExpr(
- WideStart, getMulExpr(WideMaxBECount, getZeroExtendExprCached(
- Step, WideTy, Cache)));
+ getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
+ const SCEV *OperandExtendedAdd =
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
+ getZeroExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NUW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getZeroExtendExprCached(Step, Ty, Cache), L,
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getZeroExtendExpr(Step, Ty, Depth + 1), L,
AR->getNoWrapFlags());
}
// Similar to above, only this time treat the step value as signed.
@@ -1641,15 +1633,19 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
OperandExtendedAdd =
getAddExpr(WideStart,
getMulExpr(WideMaxBECount,
- getSignExtendExpr(Step, WideTy)));
+ getSignExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (ZAdd == OperandExtendedAdd) {
// Cache knowledge of AR NW, which is propagated to this AddRec.
// Negative step causes unsigned wrap, but it still can't self-wrap.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L,
+ AR->getNoWrapFlags());
}
}
}
@@ -1680,8 +1676,9 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getZeroExtendExprCached(Step, Ty, Cache), L,
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getZeroExtendExpr(Step, Ty, Depth + 1), L,
AR->getNoWrapFlags());
}
} else if (isKnownNegative(Step)) {
@@ -1697,8 +1694,10 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L,
+ AR->getNoWrapFlags());
}
}
}
@@ -1706,8 +1705,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache),
- getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
+ getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
}
}
@@ -1718,8 +1717,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
// commute the zero extension with the addition operation.
SmallVector<const SCEV *, 4> Ops;
for (const auto *Op : SA->operands())
- Ops.push_back(getZeroExtendExprCached(Op, Ty, Cache));
- return getAddExpr(Ops, SCEV::FlagNUW);
+ Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
+ return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
}
@@ -1732,31 +1731,8 @@ const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
return S;
}
-const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty) {
- // Use the local cache to prevent exponential behavior of
- // getSignExtendExprImpl.
- ExtendCacheTy Cache;
- return getSignExtendExprCached(Op, Ty, Cache);
-}
-
-/// Query \p Cache before calling getSignExtendExprImpl. If there is no
-/// related entry in the \p Cache, call getSignExtendExprImpl and save
-/// the result in the \p Cache.
-const SCEV *ScalarEvolution::getSignExtendExprCached(const SCEV *Op, Type *Ty,
- ExtendCacheTy &Cache) {
- auto It = Cache.find({Op, Ty});
- if (It != Cache.end())
- return It->second;
- const SCEV *SExt = getSignExtendExprImpl(Op, Ty, Cache);
- auto InsertResult = Cache.insert({{Op, Ty}, SExt});
- assert(InsertResult.second && "Expect the key was not in the cache");
- (void)InsertResult;
- return SExt;
-}
-
-/// The real implementation of getSignExtendExpr.
-const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
- ExtendCacheTy &Cache) {
+const SCEV *
+ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1766,15 +1742,15 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
- return getSignExtendExprCached(SS->getOperand(), Ty, Cache);
+ return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
// sext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
- return getZeroExtendExpr(SZ->getOperand(), Ty);
+ return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
// Before doing any expensive analysis, check to see if we've already
// computed a SCEV for this Op and Ty.
@@ -1784,6 +1760,13 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
ID.AddPointer(Ty);
void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // Limit recursion depth.
+ if (Depth > MaxExtDepth) {
+ SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
+ UniqueSCEVs.InsertNode(S, IP);
+ return S;
+ }
// sext(trunc(x)) --> sext(x) or x or trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
@@ -1809,8 +1792,9 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
const APInt &C2 = SC2->getAPInt();
if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
C2.ugt(C1) && C2.isPowerOf2())
- return getAddExpr(getSignExtendExprCached(SC1, Ty, Cache),
- getSignExtendExprCached(SMul, Ty, Cache));
+ return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1),
+ getSignExtendExpr(SMul, Ty, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
}
}
}
@@ -1821,8 +1805,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// commute the sign extension with the addition operation.
SmallVector<const SCEV *, 4> Ops;
for (const auto *Op : SA->operands())
- Ops.push_back(getSignExtendExprCached(Op, Ty, Cache));
- return getAddExpr(Ops, SCEV::FlagNSW);
+ Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
+ return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
}
}
// If the input value is a chrec scev, and we can prove that the value
@@ -1845,8 +1829,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// we don't need to do any further analysis.
if (AR->hasNoSignedWrap())
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExprCached(Step, Ty, Cache), L, SCEV::FlagNSW);
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
@@ -1870,22 +1854,29 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
if (MaxBECount == RecastedMaxBECount) {
Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
- const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
- const SCEV *SAdd =
- getSignExtendExprCached(getAddExpr(Start, SMul), WideTy, Cache);
- const SCEV *WideStart = getSignExtendExprCached(Start, WideTy, Cache);
+ const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
+ SCEV::FlagAnyWrap, Depth + 1);
+ const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
+ SCEV::FlagAnyWrap,
+ Depth + 1),
+ WideTy, Depth + 1);
+ const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
const SCEV *WideMaxBECount =
- getZeroExtendExpr(CastedMaxBECount, WideTy);
- const SCEV *OperandExtendedAdd = getAddExpr(
- WideStart, getMulExpr(WideMaxBECount, getSignExtendExprCached(
- Step, WideTy, Cache)));
+ getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
+ const SCEV *OperandExtendedAdd =
+ getAddExpr(WideStart,
+ getMulExpr(WideMaxBECount,
+ getSignExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (SAdd == OperandExtendedAdd) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExprCached(Step, Ty, Cache), L,
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L,
AR->getNoWrapFlags());
}
// Similar to above, only this time treat the step value as unsigned.
@@ -1893,7 +1884,9 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
OperandExtendedAdd =
getAddExpr(WideStart,
getMulExpr(WideMaxBECount,
- getZeroExtendExpr(Step, WideTy)));
+ getZeroExtendExpr(Step, WideTy, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
if (SAdd == OperandExtendedAdd) {
// If AR wraps around then
//
@@ -1907,8 +1900,10 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// Return the expression with the addrec on the outside.
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
- getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
+ Depth + 1),
+ getZeroExtendExpr(Step, Ty, Depth + 1), L,
+ AR->getNoWrapFlags());
}
}
}
@@ -1939,9 +1934,8 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
// Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExprCached(Step, Ty, Cache), L,
- AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
}
}
@@ -1955,25 +1949,26 @@ const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty,
const APInt &C2 = SC2->getAPInt();
if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
C2.isPowerOf2()) {
- Start = getSignExtendExprCached(Start, Ty, Cache);
+ Start = getSignExtendExpr(Start, Ty, Depth + 1);
const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L,
AR->getNoWrapFlags());
- return getAddExpr(Start, getSignExtendExprCached(NewAR, Ty, Cache));
+ return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
}
}
if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
return getAddRecExpr(
- getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache),
- getSignExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags());
+ getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
+ getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
}
}
// If the input value is provably positive and we could not simplify
// away the sext build a zext instead.
if (isKnownNonNegative(Op))
- return getZeroExtendExpr(Op, Ty);
+ return getZeroExtendExpr(Op, Ty, Depth + 1);
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index 92328f6e5efd..f938a9a52065 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -89,8 +89,9 @@ TargetTransformInfo::getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize);
}
-int TargetTransformInfo::getUserCost(const User *U) const {
- int Cost = TTIImpl->getUserCost(U);
+int TargetTransformInfo::getUserCost(const User *U,
+ ArrayRef<const Value *> Operands) const {
+ int Cost = TTIImpl->getUserCost(U, Operands);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
@@ -116,8 +117,8 @@ bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
}
void TargetTransformInfo::getUnrollingPreferences(
- Loop *L, UnrollingPreferences &UP) const {
- return TTIImpl->getUnrollingPreferences(L, UP);
+ Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP) const {
+ return TTIImpl->getUnrollingPreferences(L, SE, UP);
}
bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp
index cd9972ab56a6..86c528de267a 100644
--- a/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -23,10 +23,10 @@
//
// The scalar TBAA metadata format is very simple. TBAA MDNodes have up to
// three fields, e.g.:
-// !0 = metadata !{ metadata !"an example type tree" }
-// !1 = metadata !{ metadata !"int", metadata !0 }
-// !2 = metadata !{ metadata !"float", metadata !0 }
-// !3 = metadata !{ metadata !"const float", metadata !2, i64 1 }
+// !0 = !{ !"an example type tree" }
+// !1 = !{ !"int", !0 }
+// !2 = !{ !"float", !0 }
+// !3 = !{ !"const float", !2, i64 1 }
//
// The first field is an identity field. It can be any value, usually
// an MDString, which uniquely identifies the type. The most important
@@ -74,13 +74,13 @@
// instruction. The base type is !4 (struct B), the access type is !2 (scalar
// type short) and the offset is 4.
//
-// !0 = metadata !{metadata !"Simple C/C++ TBAA"}
-// !1 = metadata !{metadata !"omnipotent char", metadata !0} // Scalar type node
-// !2 = metadata !{metadata !"short", metadata !1} // Scalar type node
-// !3 = metadata !{metadata !"A", metadata !2, i64 0} // Struct type node
-// !4 = metadata !{metadata !"B", metadata !2, i64 0, metadata !3, i64 4}
+// !0 = !{!"Simple C/C++ TBAA"}
+// !1 = !{!"omnipotent char", !0} // Scalar type node
+// !2 = !{!"short", !1} // Scalar type node
+// !3 = !{!"A", !2, i64 0} // Struct type node
+// !4 = !{!"B", !2, i64 0, !3, i64 4}
// // Struct type node
-// !5 = metadata !{metadata !4, metadata !2, i64 4} // Path tag node
+// !5 = !{!4, !2, i64 4} // Path tag node
//
// The struct type nodes and the scalar type nodes form a type DAG.
// Root (!0)