summaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp')
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp102
1 files changed, 82 insertions, 20 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 04c7e856b5d4a..7b03bbfcdfe4b 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -74,15 +74,16 @@ cl::opt<unsigned> MemOPSizeLarge(
namespace {
-cl::opt<bool> DoNameCompression("enable-name-compression",
- cl::desc("Enable name string compression"),
- cl::init(true));
-
cl::opt<bool> DoHashBasedCounterSplit(
"hash-based-counter-split",
cl::desc("Rename counter variable of a comdat function based on cfg hash"),
cl::init(true));
+cl::opt<bool> RuntimeCounterRelocation(
+ "runtime-counter-relocation",
+ cl::desc("Enable relocating counters at runtime."),
+ cl::init(false));
+
cl::opt<bool> ValueProfileStaticAlloc(
"vp-static-alloc",
cl::desc("Do static counter allocation for value profiler"),
@@ -109,6 +110,12 @@ cl::opt<bool> AtomicCounterUpdatePromoted(
" for promoted counters only"),
cl::init(false));
+cl::opt<bool> AtomicFirstCounter(
+ "atomic-first-counter", cl::ZeroOrMore,
+ cl::desc("Use atomic fetch add for first counter in a function (usually "
+ "the entry counter)"),
+ cl::init(false));
+
// If the option is not specified, the default behavior about whether
// counter promotion is done depends on how instrumentaiton lowering
// pipeline is setup, i.e., the default value of true of this option
@@ -151,7 +158,9 @@ public:
InstrProfilingLegacyPass() : ModulePass(ID) {}
InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false)
- : ModulePass(ID), InstrProf(Options, IsCS) {}
+ : ModulePass(ID), InstrProf(Options, IsCS) {
+ initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
StringRef getPassName() const override {
return "Frontend instrumentation-based coverage lowering";
@@ -242,9 +251,14 @@ public:
: LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
LI(LI), BFI(BFI) {
+ // Skip collection of ExitBlocks and InsertPts for loops that will not be
+ // able to have counters promoted.
SmallVector<BasicBlock *, 8> LoopExitBlocks;
SmallPtrSet<BasicBlock *, 8> BlockSet;
+
L.getExitBlocks(LoopExitBlocks);
+ if (!isPromotionPossible(&L, LoopExitBlocks))
+ return;
for (BasicBlock *ExitBlock : LoopExitBlocks) {
if (BlockSet.insert(ExitBlock).second) {
@@ -313,21 +327,31 @@ private:
return true;
}
- // Returns the max number of Counter Promotions for LP.
- unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
+ // Check whether the loop satisfies the basic conditions needed to perform
+ // Counter Promotions.
+ bool isPromotionPossible(Loop *LP,
+ const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) {
// We can't insert into a catchswitch.
- SmallVector<BasicBlock *, 8> LoopExitBlocks;
- LP->getExitBlocks(LoopExitBlocks);
if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
return isa<CatchSwitchInst>(Exit->getTerminator());
}))
- return 0;
+ return false;
if (!LP->hasDedicatedExits())
- return 0;
+ return false;
BasicBlock *PH = LP->getLoopPreheader();
if (!PH)
+ return false;
+
+ return true;
+ }
+
+ // Returns the max number of Counter Promotions for LP.
+ unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
+ SmallVector<BasicBlock *, 8> LoopExitBlocks;
+ LP->getExitBlocks(LoopExitBlocks);
+ if (!isPromotionPossible(LP, LoopExitBlocks))
return 0;
SmallVector<BasicBlock *, 8> ExitingBlocks;
@@ -431,6 +455,13 @@ bool InstrProfiling::lowerIntrinsics(Function *F) {
return true;
}
+bool InstrProfiling::isRuntimeCounterRelocationEnabled() const {
+ if (RuntimeCounterRelocation.getNumOccurrences() > 0)
+ return RuntimeCounterRelocation;
+
+ return TT.isOSFuchsia();
+}
+
bool InstrProfiling::isCounterPromotionEnabled() const {
if (DoCounterPromotion.getNumOccurrences() > 0)
return DoCounterPromotion;
@@ -611,11 +642,19 @@ void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
llvm::InstrProfValueKind::IPVK_MemOPSize);
CallInst *Call = nullptr;
auto *TLI = &GetTLI(*Ind->getFunction());
+
+ // To support value profiling calls within Windows exception handlers, funclet
+ // information contained within operand bundles needs to be copied over to
+ // the library call. This is required for the IR to be processed by the
+ // WinEHPrepare pass.
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ Ind->getOperandBundlesAsDefs(OpBundles);
if (!IsRange) {
Value *Args[3] = {Ind->getTargetValue(),
Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
Builder.getInt32(Index)};
- Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args);
+ Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args,
+ OpBundles);
} else {
Value *Args[6] = {
Ind->getTargetValue(),
@@ -624,8 +663,8 @@ void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
Builder.getInt64(MemOPSizeRangeStart),
Builder.getInt64(MemOPSizeRangeLast),
Builder.getInt64(MemOPSizeLarge == 0 ? INT64_MIN : MemOPSizeLarge)};
- Call =
- Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI, true), Args);
+ Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI, true),
+ Args, OpBundles);
}
if (auto AK = TLI->getExtAttrForI32Param(false))
Call->addParamAttr(2, AK);
@@ -641,7 +680,30 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(),
Counters, 0, Index);
- if (Options.Atomic || AtomicCounterUpdateAll) {
+ if (isRuntimeCounterRelocationEnabled()) {
+ Type *Int64Ty = Type::getInt64Ty(M->getContext());
+ Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext());
+ Function *Fn = Inc->getParent()->getParent();
+ Instruction &I = Fn->getEntryBlock().front();
+ LoadInst *LI = dyn_cast<LoadInst>(&I);
+ if (!LI) {
+ IRBuilder<> Builder(&I);
+ Type *Int64Ty = Type::getInt64Ty(M->getContext());
+ GlobalVariable *Bias = M->getGlobalVariable(getInstrProfCounterBiasVarName());
+ if (!Bias) {
+ Bias = new GlobalVariable(*M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage,
+ Constant::getNullValue(Int64Ty),
+ getInstrProfCounterBiasVarName());
+ Bias->setVisibility(GlobalVariable::HiddenVisibility);
+ }
+ LI = Builder.CreateLoad(Int64Ty, Bias);
+ }
+ auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI);
+ Addr = Builder.CreateIntToPtr(Add, Int64PtrTy);
+ }
+
+ if (Options.Atomic || AtomicCounterUpdateAll ||
+ (Index == 0 && AtomicFirstCounter)) {
Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
AtomicOrdering::Monotonic);
} else {
@@ -916,7 +978,7 @@ void InstrProfiling::emitNameData() {
std::string CompressedNameStr;
if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr,
- DoNameCompression)) {
+ DoInstrProfNameCompression)) {
report_fatal_error(toString(std::move(E)), false);
}
@@ -932,7 +994,7 @@ void InstrProfiling::emitNameData() {
// On COFF, it's important to reduce the alignment down to 1 to prevent the
// linker from inserting padding before the start of the names section or
// between names entries.
- NamesVar->setAlignment(Align::None());
+ NamesVar->setAlignment(Align(1));
UsedVars.push_back(NamesVar);
for (auto *NamePtr : ReferencedNames)
@@ -979,9 +1041,9 @@ void InstrProfiling::emitRegistration() {
}
bool InstrProfiling::emitRuntimeHook() {
- // We expect the linker to be invoked with -u<hook_var> flag for linux,
- // for which case there is no need to emit the user function.
- if (TT.isOSLinux())
+ // We expect the linker to be invoked with -u<hook_var> flag for Linux or
+ // Fuchsia, in which case there is no need to emit the user function.
+ if (TT.isOSLinux() || TT.isOSFuchsia())
return false;
// If the module's provided its own runtime, we don't need to do anything.