aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation')
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp212
-rw-r--r--llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp25
-rw-r--r--llvm/lib/Transforms/Instrumentation/CGProfile.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp14
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp110
-rw-r--r--llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp57
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp326
-rw-r--r--llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp6
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp432
-rw-r--r--llvm/lib/Transforms/Instrumentation/Instrumentation.cpp7
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemProfiler.cpp85
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp625
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp82
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp25
-rw-r--r--llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp99
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp86
17 files changed, 1176 insertions, 1019 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index bde5fba20f3b..b175e6f93f3e 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -201,8 +201,8 @@ static cl::opt<bool> ClRecover(
static cl::opt<bool> ClInsertVersionCheck(
"asan-guard-against-version-mismatch",
- cl::desc("Guard against compiler/runtime version mismatch."),
- cl::Hidden, cl::init(true));
+ cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
+ cl::init(true));
// This flag may need to be replaced with -f[no-]asan-reads.
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
@@ -323,10 +323,9 @@ static cl::opt<unsigned> ClRealignStack(
static cl::opt<int> ClInstrumentationWithCallsThreshold(
"asan-instrumentation-with-call-threshold",
- cl::desc(
- "If the function being instrumented contains more than "
- "this number of memory accesses, use callbacks instead of "
- "inline checks (-1 means never use callbacks)."),
+ cl::desc("If the function being instrumented contains more than "
+ "this number of memory accesses, use callbacks instead of "
+ "inline checks (-1 means never use callbacks)."),
cl::Hidden, cl::init(7000));
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
@@ -491,7 +490,8 @@ static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
bool IsMIPS32 = TargetTriple.isMIPS32();
bool IsMIPS64 = TargetTriple.isMIPS64();
bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
- bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
+ bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
+ TargetTriple.getArch() == Triple::aarch64_be;
bool IsLoongArch64 = TargetTriple.isLoongArch64();
bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
bool IsWindows = TargetTriple.isOSWindows();
@@ -644,8 +644,9 @@ namespace {
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer {
AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
- bool CompileKernel = false, bool Recover = false,
- bool UseAfterScope = false,
+ int InstrumentationWithCallsThreshold,
+ uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
+ bool Recover = false, bool UseAfterScope = false,
AsanDetectStackUseAfterReturnMode UseAfterReturn =
AsanDetectStackUseAfterReturnMode::Runtime)
: CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
@@ -654,12 +655,19 @@ struct AddressSanitizer {
UseAfterScope(UseAfterScope || ClUseAfterScope),
UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
: UseAfterReturn),
- SSGI(SSGI) {
+ SSGI(SSGI),
+ InstrumentationWithCallsThreshold(
+ ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
+ ? ClInstrumentationWithCallsThreshold
+ : InstrumentationWithCallsThreshold),
+ MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
+ ? ClMaxInlinePoisoningSize
+ : MaxInlinePoisoningSize) {
C = &(M.getContext());
DL = &M.getDataLayout();
LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
- Int8PtrTy = Type::getInt8PtrTy(*C);
+ PtrTy = PointerType::getUnqual(*C);
Int32Ty = Type::getInt32Ty(*C);
TargetTriple = Triple(M.getTargetTriple());
@@ -751,8 +759,8 @@ private:
bool UseAfterScope;
AsanDetectStackUseAfterReturnMode UseAfterReturn;
Type *IntptrTy;
- Type *Int8PtrTy;
Type *Int32Ty;
+ PointerType *PtrTy;
ShadowMapping Mapping;
FunctionCallee AsanHandleNoReturnFunc;
FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
@@ -773,17 +781,22 @@ private:
FunctionCallee AMDGPUAddressShared;
FunctionCallee AMDGPUAddressPrivate;
+ int InstrumentationWithCallsThreshold;
+ uint32_t MaxInlinePoisoningSize;
};
class ModuleAddressSanitizer {
public:
- ModuleAddressSanitizer(Module &M, bool CompileKernel = false,
- bool Recover = false, bool UseGlobalsGC = true,
- bool UseOdrIndicator = true,
+ ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
+ bool CompileKernel = false, bool Recover = false,
+ bool UseGlobalsGC = true, bool UseOdrIndicator = true,
AsanDtorKind DestructorKind = AsanDtorKind::Global,
AsanCtorKind ConstructorKind = AsanCtorKind::Global)
: CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
: CompileKernel),
+ InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
+ ? ClInsertVersionCheck
+ : InsertVersionCheck),
Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
// Enable aliases as they should have no downside with ODR indicators.
@@ -802,10 +815,13 @@ public:
// do globals-gc.
UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
DestructorKind(DestructorKind),
- ConstructorKind(ConstructorKind) {
+ ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
+ ? ClConstructorKind
+ : ConstructorKind) {
C = &(M.getContext());
int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
+ PtrTy = PointerType::getUnqual(*C);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
@@ -819,11 +835,11 @@ public:
private:
void initializeCallbacks(Module &M);
- bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
+ void instrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers);
- void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
+ void instrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers,
const std::string &UniqueModuleId);
@@ -854,6 +870,7 @@ private:
int GetAsanVersion(const Module &M) const;
bool CompileKernel;
+ bool InsertVersionCheck;
bool Recover;
bool UseGlobalsGC;
bool UsePrivateAlias;
@@ -862,6 +879,7 @@ private:
AsanDtorKind DestructorKind;
AsanCtorKind ConstructorKind;
Type *IntptrTy;
+ PointerType *PtrTy;
LLVMContext *C;
Triple TargetTriple;
ShadowMapping Mapping;
@@ -1148,22 +1166,22 @@ AddressSanitizerPass::AddressSanitizerPass(
AsanCtorKind ConstructorKind)
: Options(Options), UseGlobalGC(UseGlobalGC),
UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
- ConstructorKind(ClConstructorKind) {}
+ ConstructorKind(ConstructorKind) {}
PreservedAnalyses AddressSanitizerPass::run(Module &M,
ModuleAnalysisManager &MAM) {
- ModuleAddressSanitizer ModuleSanitizer(M, Options.CompileKernel,
- Options.Recover, UseGlobalGC,
- UseOdrIndicator, DestructorKind,
- ConstructorKind);
+ ModuleAddressSanitizer ModuleSanitizer(
+ M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
+ UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
bool Modified = false;
auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
const StackSafetyGlobalInfo *const SSGI =
ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
for (Function &F : M) {
- AddressSanitizer FunctionSanitizer(M, SSGI, Options.CompileKernel,
- Options.Recover, Options.UseAfterScope,
- Options.UseAfterReturn);
+ AddressSanitizer FunctionSanitizer(
+ M, SSGI, Options.InstrumentationWithCallsThreshold,
+ Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
+ Options.UseAfterScope, Options.UseAfterReturn);
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
Modified |= FunctionSanitizer.instrumentFunction(F, &TLI);
}
@@ -1188,17 +1206,17 @@ static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize) {
/// Check if \p G has been created by a trusted compiler pass.
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
// Do not instrument @llvm.global_ctors, @llvm.used, etc.
- if (G->getName().startswith("llvm.") ||
+ if (G->getName().starts_with("llvm.") ||
// Do not instrument gcov counter arrays.
- G->getName().startswith("__llvm_gcov_ctr") ||
+ G->getName().starts_with("__llvm_gcov_ctr") ||
// Do not instrument rtti proxy symbols for function sanitizer.
- G->getName().startswith("__llvm_rtti_proxy"))
+ G->getName().starts_with("__llvm_rtti_proxy"))
return true;
// Do not instrument asan globals.
- if (G->getName().startswith(kAsanGenPrefix) ||
- G->getName().startswith(kSanCovGenPrefix) ||
- G->getName().startswith(kODRGenPrefix))
+ if (G->getName().starts_with(kAsanGenPrefix) ||
+ G->getName().starts_with(kSanCovGenPrefix) ||
+ G->getName().starts_with(kODRGenPrefix))
return true;
return false;
@@ -1232,15 +1250,13 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
InstrumentationIRBuilder IRB(MI);
if (isa<MemTransferInst>(MI)) {
- IRB.CreateCall(
- isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+ IRB.CreateCall(isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+ {MI->getOperand(0), MI->getOperand(1),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
} else if (isa<MemSetInst>(MI)) {
IRB.CreateCall(
AsanMemset,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ {MI->getOperand(0),
IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
}
@@ -1570,7 +1586,7 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
} else if (Stride) {
Index = IRB.CreateMul(Index, Stride);
- Addr = IRB.CreateBitCast(Addr, Type::getInt8PtrTy(*C));
+ Addr = IRB.CreateBitCast(Addr, PointerType::getUnqual(*C));
InstrumentedAddress = IRB.CreateGEP(Type::getInt8Ty(*C), Addr, {Index});
} else {
InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
@@ -1695,9 +1711,8 @@ Instruction *AddressSanitizer::instrumentAMDGPUAddress(
return InsertBefore;
// Instrument generic addresses in supported addressspaces.
IRBuilder<> IRB(InsertBefore);
- Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy());
- Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong});
- Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong});
+ Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
+ Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
Value *AddrSpaceZeroLanding =
@@ -1728,7 +1743,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
IRB.CreateCall(
Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
- {IRB.CreatePointerCast(Addr, Int8PtrTy),
+ {IRB.CreatePointerCast(Addr, PtrTy),
ConstantInt::get(Int32Ty, AccessInfo.Packed)});
return;
}
@@ -1869,7 +1884,7 @@ ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
// When compiling the kernel, globals that are aliased by symbols prefixed
// by "__" are special and cannot be padded with a redzone.
- if (GA.getName().startswith("__"))
+ if (GA.getName().starts_with("__"))
return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
return nullptr;
@@ -1939,9 +1954,9 @@ bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
// Do not instrument function pointers to initialization and termination
// routines: dynamic linker will not properly handle redzones.
- if (Section.startswith(".preinit_array") ||
- Section.startswith(".init_array") ||
- Section.startswith(".fini_array")) {
+ if (Section.starts_with(".preinit_array") ||
+ Section.starts_with(".init_array") ||
+ Section.starts_with(".fini_array")) {
return false;
}
@@ -1978,7 +1993,7 @@ bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
// those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
// them.
if (ParsedSegment == "__OBJC" ||
- (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
+ (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
return false;
}
@@ -2006,7 +2021,7 @@ bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
if (CompileKernel) {
// Globals that prefixed by "__" are special and cannot be padded with a
// redzone.
- if (G->getName().startswith("__"))
+ if (G->getName().starts_with("__"))
return false;
}
@@ -2129,6 +2144,9 @@ ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
M, Initializer->getType(), false, Linkage, Initializer,
Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
Metadata->setSection(getGlobalMetadataSection());
+ // Place metadata in a large section for x86-64 ELF binaries to mitigate
+ // relocation pressure.
+ setGlobalVariableLargeSection(TargetTriple, *Metadata);
return Metadata;
}
@@ -2177,7 +2195,7 @@ void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
appendToCompilerUsed(M, MetadataGlobals);
}
-void ModuleAddressSanitizer::InstrumentGlobalsELF(
+void ModuleAddressSanitizer::instrumentGlobalsELF(
IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
ArrayRef<Constant *> MetadataInitializers,
const std::string &UniqueModuleId) {
@@ -2187,7 +2205,7 @@ void ModuleAddressSanitizer::InstrumentGlobalsELF(
// false negative odr violations at link time. If odr indicators are used, we
// keep the comdat sections, as link time odr violations will be dectected on
// the odr indicator symbols.
- bool UseComdatForGlobalsGC = UseOdrIndicator;
+ bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
@@ -2237,7 +2255,7 @@ void ModuleAddressSanitizer::InstrumentGlobalsELF(
// We also need to unregister globals at the end, e.g., when a shared library
// gets closed.
- if (DestructorKind != AsanDtorKind::None) {
+ if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
IRBuilder<> IrbDtor(CreateAsanModuleDtor(M));
IrbDtor.CreateCall(AsanUnregisterElfGlobals,
{IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
@@ -2343,10 +2361,8 @@ void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
// redzones and inserts this function into llvm.global_ctors.
// Sets *CtorComdat to true if the global registration code emitted into the
// asan constructor is comdat-compatible.
-bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
+void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, Module &M,
bool *CtorComdat) {
- *CtorComdat = false;
-
// Build set of globals that are aliased by some GA, where
// getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
@@ -2364,11 +2380,6 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
}
size_t n = GlobalsToChange.size();
- if (n == 0) {
- *CtorComdat = true;
- return false;
- }
-
auto &DL = M.getDataLayout();
// A global is described by a structure
@@ -2391,8 +2402,11 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
// We shouldn't merge same module names, as this string serves as unique
// module ID in runtime.
- GlobalVariable *ModuleName = createPrivateGlobalForString(
- M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix);
+ GlobalVariable *ModuleName =
+ n != 0
+ ? createPrivateGlobalForString(M, M.getModuleIdentifier(),
+ /*AllowMerging*/ false, kAsanGenPrefix)
+ : nullptr;
for (size_t i = 0; i < n; i++) {
GlobalVariable *G = GlobalsToChange[i];
@@ -2455,7 +2469,7 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
G->eraseFromParent();
NewGlobals[i] = NewGlobal;
- Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
+ Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
GlobalValue *InstrumentedGlobal = NewGlobal;
bool CanUsePrivateAliases =
@@ -2470,8 +2484,8 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
// ODR should not happen for local linkage.
if (NewGlobal->hasLocalLinkage()) {
- ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
- IRB.getInt8PtrTy());
+ ODRIndicator =
+ ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
} else if (UseOdrIndicator) {
// With local aliases, we need to provide another externally visible
// symbol __odr_asan_XXX to detect ODR violation.
@@ -2517,19 +2531,27 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
}
appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
- std::string ELFUniqueModuleId =
- (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M)
- : "";
-
- if (!ELFUniqueModuleId.empty()) {
- InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
+ if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
+ // Use COMDAT and register globals even if n == 0 to ensure that (a) the
+ // linkage unit will only have one module constructor, and (b) the register
+ // function will be called. The module destructor is not created when n ==
+ // 0.
*CtorComdat = true;
- } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
- InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
- } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
- InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
+ instrumentGlobalsELF(IRB, M, NewGlobals, Initializers,
+ getUniqueModuleId(&M));
+ } else if (n == 0) {
+ // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
+ // all compile units will have identical module constructor/destructor.
+ *CtorComdat = TargetTriple.isOSBinFormatELF();
} else {
- InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
+ *CtorComdat = false;
+ if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
+ InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
+ } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
+ InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
+ } else {
+ InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
+ }
}
// Create calls for poisoning before initializers run and unpoisoning after.
@@ -2537,7 +2559,6 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
createInitializerPoisonCalls(M, ModuleName);
LLVM_DEBUG(dbgs() << M);
- return true;
}
uint64_t
@@ -2588,7 +2609,7 @@ bool ModuleAddressSanitizer::instrumentModule(Module &M) {
} else {
std::string AsanVersion = std::to_string(GetAsanVersion(M));
std::string VersionCheckName =
- ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
+ InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
std::tie(AsanCtorFunction, std::ignore) =
createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName,
kAsanInitName, /*InitArgTypes=*/{},
@@ -2601,10 +2622,10 @@ bool ModuleAddressSanitizer::instrumentModule(Module &M) {
assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
if (AsanCtorFunction) {
IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
- InstrumentGlobals(IRB, M, &CtorComdat);
+ instrumentGlobals(IRB, M, &CtorComdat);
} else {
IRBuilder<> IRB(*C);
- InstrumentGlobals(IRB, M, &CtorComdat);
+ instrumentGlobals(IRB, M, &CtorComdat);
}
}
@@ -2684,15 +2705,12 @@ void AddressSanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo *T
? std::string("")
: ClMemoryAccessCallbackPrefix;
AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
- AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
+ PtrTy, PtrTy, PtrTy, IntptrTy);
+ AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
+ PtrTy, PtrTy, IntptrTy);
AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
TLI->getAttrList(C, {1}, /*Signed=*/false),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt32Ty(), IntptrTy);
+ PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
AsanHandleNoReturnFunc =
M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
@@ -2705,10 +2723,10 @@ void AddressSanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo *T
AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
ArrayType::get(IRB.getInt8Ty(), 0));
- AMDGPUAddressShared = M.getOrInsertFunction(
- kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy());
- AMDGPUAddressPrivate = M.getOrInsertFunction(
- kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy());
+ AMDGPUAddressShared =
+ M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
+ AMDGPUAddressPrivate =
+ M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
}
bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
@@ -2799,7 +2817,7 @@ bool AddressSanitizer::instrumentFunction(Function &F,
return false;
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false;
- if (F.getName().startswith("__asan_")) return false;
+ if (F.getName().starts_with("__asan_")) return false;
bool FunctionModified = false;
@@ -2890,9 +2908,9 @@ bool AddressSanitizer::instrumentFunction(Function &F,
}
}
- bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 &&
+ bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
OperandsToInstrument.size() + IntrinToInstrument.size() >
- (unsigned)ClInstrumentationWithCallsThreshold);
+ (unsigned)InstrumentationWithCallsThreshold);
const DataLayout &DL = F.getParent()->getDataLayout();
ObjectSizeOpts ObjSizeOpts;
ObjSizeOpts.RoundToAlign = true;
@@ -3034,7 +3052,7 @@ void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
IRB.CreateAlignedStore(
- Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()),
+ Poison, IRB.CreateIntToPtr(Ptr, PointerType::getUnqual(Poison->getContext())),
Align(1));
i += StoreSizeInBytes;
@@ -3066,7 +3084,7 @@ void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
}
- if (j - i >= ClMaxInlinePoisoningSize) {
+ if (j - i >= ASan.MaxInlinePoisoningSize) {
copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
IRB.CreateCall(AsanSetShadowFunc[Val],
{IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
@@ -3500,7 +3518,7 @@ void FunctionStackPoisoner::processStaticAllocas() {
IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
IRBPoison.CreateStore(
Constant::getNullValue(IRBPoison.getInt8Ty()),
- IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
+ IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
} else {
// For larger frames call __asan_stack_free_*.
IRBPoison.CreateCall(
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 709095184af5..ee5b81960417 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -37,6 +37,9 @@ using namespace llvm;
static cl::opt<bool> SingleTrapBB("bounds-checking-single-trap",
cl::desc("Use one trap block per function"));
+static cl::opt<bool> DebugTrapBB("bounds-checking-unique-traps",
+ cl::desc("Always use one trap per check"));
+
STATISTIC(ChecksAdded, "Bounds checks added");
STATISTIC(ChecksSkipped, "Bounds checks skipped");
STATISTIC(ChecksUnable, "Bounds checks unable to add");
@@ -180,19 +183,27 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
// will create a fresh block every time it is called.
BasicBlock *TrapBB = nullptr;
auto GetTrapBB = [&TrapBB](BuilderTy &IRB) {
- if (TrapBB && SingleTrapBB)
- return TrapBB;
-
Function *Fn = IRB.GetInsertBlock()->getParent();
- // FIXME: This debug location doesn't make a lot of sense in the
- // `SingleTrapBB` case.
auto DebugLoc = IRB.getCurrentDebugLocation();
IRBuilder<>::InsertPointGuard Guard(IRB);
+
+ if (TrapBB && SingleTrapBB && !DebugTrapBB)
+ return TrapBB;
+
TrapBB = BasicBlock::Create(Fn->getContext(), "trap", Fn);
IRB.SetInsertPoint(TrapBB);
- auto *F = Intrinsic::getDeclaration(Fn->getParent(), Intrinsic::trap);
- CallInst *TrapCall = IRB.CreateCall(F, {});
+ Intrinsic::ID IntrID = DebugTrapBB ? Intrinsic::ubsantrap : Intrinsic::trap;
+ auto *F = Intrinsic::getDeclaration(Fn->getParent(), IntrID);
+
+ CallInst *TrapCall;
+ if (DebugTrapBB) {
+ TrapCall =
+ IRB.CreateCall(F, ConstantInt::get(IRB.getInt8Ty(), Fn->size()));
+ } else {
+ TrapCall = IRB.CreateCall(F, {});
+ }
+
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
TrapCall->setDebugLoc(DebugLoc);
diff --git a/llvm/lib/Transforms/Instrumentation/CGProfile.cpp b/llvm/lib/Transforms/Instrumentation/CGProfile.cpp
index d53e12ad1ff5..e2e5f21b376b 100644
--- a/llvm/lib/Transforms/Instrumentation/CGProfile.cpp
+++ b/llvm/lib/Transforms/Instrumentation/CGProfile.cpp
@@ -66,7 +66,7 @@ static bool runCGProfilePass(
if (F.isDeclaration() || !F.getEntryCount())
continue;
auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
- if (BFI.getEntryFreq() == 0)
+ if (BFI.getEntryFreq() == BlockFrequency(0))
continue;
TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
for (auto &BB : F) {
diff --git a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
index 3e3be536defc..0a3d8d6000cf 100644
--- a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp
@@ -1593,8 +1593,8 @@ static void insertTrivialPHIs(CHRScope *Scope,
// Insert a trivial phi for I (phi [&I, P0], [&I, P1], ...) at
// ExitBlock. Replace I with the new phi in UI unless UI is another
// phi at ExitBlock.
- PHINode *PN = PHINode::Create(I.getType(), pred_size(ExitBlock), "",
- &ExitBlock->front());
+ PHINode *PN = PHINode::Create(I.getType(), pred_size(ExitBlock), "");
+ PN->insertBefore(ExitBlock->begin());
for (BasicBlock *Pred : predecessors(ExitBlock)) {
PN->addIncoming(&I, Pred);
}
@@ -1777,6 +1777,13 @@ void CHR::cloneScopeBlocks(CHRScope *Scope,
BasicBlock *NewBB = CloneBasicBlock(BB, VMap, ".nonchr", &F);
NewBlocks.push_back(NewBB);
VMap[BB] = NewBB;
+
+ // Unreachable predecessors will not be cloned and will not have an edge
+ // to the cloned block. As such, also remove them from any phi nodes.
+ for (PHINode &PN : make_early_inc_range(NewBB->phis()))
+ PN.removeIncomingValueIf([&](unsigned Idx) {
+ return !DT.isReachableFromEntry(PN.getIncomingBlock(Idx));
+ });
}
// Place the cloned blocks right after the original blocks (right before the
@@ -1871,8 +1878,7 @@ void CHR::fixupBranchesAndSelects(CHRScope *Scope,
static_cast<uint32_t>(CHRBranchBias.scale(1000)),
static_cast<uint32_t>(CHRBranchBias.getCompl().scale(1000)),
};
- MDBuilder MDB(F.getContext());
- MergedBR->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+ setBranchWeights(*MergedBR, Weights);
CHR_DEBUG(dbgs() << "CHR branch bias " << Weights[0] << ":" << Weights[1]
<< "\n");
}
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 8caee5bed8ed..2ba127bba6f6 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -564,7 +564,7 @@ class DataFlowSanitizer {
/// getShadowTy([n x T]) = [n x getShadowTy(T)]
/// getShadowTy(other type) = i16
Type *getShadowTy(Type *OrigTy);
- /// Returns the shadow type of of V's type.
+ /// Returns the shadow type of V's type.
Type *getShadowTy(Value *V);
const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
@@ -1145,7 +1145,7 @@ bool DataFlowSanitizer::initializeModule(Module &M) {
Mod = &M;
Ctx = &M.getContext();
- Int8Ptr = Type::getInt8PtrTy(*Ctx);
+ Int8Ptr = PointerType::getUnqual(*Ctx);
OriginTy = IntegerType::get(*Ctx, OriginWidthBits);
OriginPtrTy = PointerType::getUnqual(OriginTy);
PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
@@ -1162,19 +1162,19 @@ bool DataFlowSanitizer::initializeModule(Module &M) {
FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs,
/*isVarArg=*/false);
DFSanUnimplementedFnTy = FunctionType::get(
- Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
+ Type::getVoidTy(*Ctx), PointerType::getUnqual(*Ctx), /*isVarArg=*/false);
Type *DFSanWrapperExternWeakNullArgs[2] = {Int8Ptr, Int8Ptr};
DFSanWrapperExternWeakNullFnTy =
FunctionType::get(Type::getVoidTy(*Ctx), DFSanWrapperExternWeakNullArgs,
/*isVarArg=*/false);
Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy,
- Type::getInt8PtrTy(*Ctx), IntptrTy};
+ PointerType::getUnqual(*Ctx), IntptrTy};
DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
DFSanSetLabelArgs, /*isVarArg=*/false);
DFSanNonzeroLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), std::nullopt,
/*isVarArg=*/false);
DFSanVarargWrapperFnTy = FunctionType::get(
- Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
+ Type::getVoidTy(*Ctx), PointerType::getUnqual(*Ctx), /*isVarArg=*/false);
DFSanConditionalCallbackFnTy =
FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
/*isVarArg=*/false);
@@ -1288,7 +1288,7 @@ void DataFlowSanitizer::buildExternWeakCheckIfNeeded(IRBuilder<> &IRB,
// for a extern weak function, add a check here to help identify the issue.
if (GlobalValue::isExternalWeakLinkage(F->getLinkage())) {
std::vector<Value *> Args;
- Args.push_back(IRB.CreatePointerCast(F, IRB.getInt8PtrTy()));
+ Args.push_back(F);
Args.push_back(IRB.CreateGlobalStringPtr(F->getName()));
IRB.CreateCall(DFSanWrapperExternWeakNullFn, Args);
}
@@ -1553,7 +1553,7 @@ bool DataFlowSanitizer::runImpl(
assert(isa<Function>(C) && "Personality routine is not a function!");
Function *F = cast<Function>(C);
if (!isInstrumented(F))
- llvm::erase_value(FnsToInstrument, F);
+ llvm::erase(FnsToInstrument, F);
}
}
@@ -1575,7 +1575,7 @@ bool DataFlowSanitizer::runImpl(
// below will take care of instrumenting it.
Function *NewF =
buildWrapperFunction(F, "", GA.getLinkage(), F->getFunctionType());
- GA.replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA.getType()));
+ GA.replaceAllUsesWith(NewF);
NewF->takeName(&GA);
GA.eraseFromParent();
FnsToInstrument.push_back(NewF);
@@ -1622,9 +1622,6 @@ bool DataFlowSanitizer::runImpl(
WrapperLinkage, FT);
NewF->removeFnAttrs(ReadOnlyNoneAttrs);
- Value *WrappedFnCst =
- ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
-
// Extern weak functions can sometimes be null at execution time.
// Code will sometimes check if an extern weak function is null.
// This could look something like:
@@ -1657,9 +1654,9 @@ bool DataFlowSanitizer::runImpl(
}
return true;
};
- F.replaceUsesWithIf(WrappedFnCst, IsNotCmpUse);
+ F.replaceUsesWithIf(NewF, IsNotCmpUse);
- UnwrappedFnMap[WrappedFnCst] = &F;
+ UnwrappedFnMap[NewF] = &F;
*FI = NewF;
if (!F.isDeclaration()) {
@@ -2273,8 +2270,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
IRBuilder<> IRB(Pos);
CallInst *Call =
IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
- {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- ConstantInt::get(DFS.IntptrTy, Size)});
+ {Addr, ConstantInt::get(DFS.IntptrTy, Size)});
Call->addRetAttr(Attribute::ZExt);
return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits),
DFS.PrimitiveShadowTy),
@@ -2436,9 +2432,9 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
if (ClEventCallbacks) {
IRBuilder<> IRB(Pos);
- Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
+ Value *Addr = LI.getPointerOperand();
CallInst *CI =
- IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr8});
+ IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr});
CI->addParamAttr(0, Attribute::ZExt);
}
@@ -2530,10 +2526,9 @@ void DFSanFunction::storeOrigin(Instruction *Pos, Value *Addr, uint64_t Size,
}
if (shouldInstrumentWithCall()) {
- IRB.CreateCall(DFS.DFSanMaybeStoreOriginFn,
- {CollapsedShadow,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- ConstantInt::get(DFS.IntptrTy, Size), Origin});
+ IRB.CreateCall(
+ DFS.DFSanMaybeStoreOriginFn,
+ {CollapsedShadow, Addr, ConstantInt::get(DFS.IntptrTy, Size), Origin});
} else {
Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp");
DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
@@ -2554,9 +2549,7 @@ void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
- Value *ExtShadowAddr =
- IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
- IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
+ IRB.CreateAlignedStore(ExtZeroShadow, ShadowAddr, ShadowAlign);
// Do not write origins for 0 shadows because we do not trace origins for
// untainted sinks.
}
@@ -2611,11 +2604,9 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
ShadowVec, PrimitiveShadow,
ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
}
- Value *ShadowVecAddr =
- IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
do {
Value *CurShadowVecAddr =
- IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
+ IRB.CreateConstGEP1_32(ShadowVecTy, ShadowAddr, Offset);
IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
LeftSize -= ShadowVecSize;
++Offset;
@@ -2699,9 +2690,9 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
PrimitiveShadow, Origin, &SI);
if (ClEventCallbacks) {
IRBuilder<> IRB(&SI);
- Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
+ Value *Addr = SI.getPointerOperand();
CallInst *CI =
- IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr8});
+ IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr});
CI->addParamAttr(0, Attribute::ZExt);
}
}
@@ -2918,11 +2909,9 @@ void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
Value *ValOrigin = DFSF.DFS.shouldTrackOrigins()
? DFSF.getOrigin(I.getValue())
: DFSF.DFS.ZeroOrigin;
- IRB.CreateCall(
- DFSF.DFS.DFSanSetLabelFn,
- {ValShadow, ValOrigin,
- IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(*DFSF.DFS.Ctx)),
- IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
+ IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
+ {ValShadow, ValOrigin, I.getDest(),
+ IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
}
void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
@@ -2933,28 +2922,24 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
if (DFSF.DFS.shouldTrackOrigins()) {
IRB.CreateCall(
DFSF.DFS.DFSanMemOriginTransferFn,
- {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ {I.getArgOperand(0), I.getArgOperand(1),
IRB.CreateIntCast(I.getArgOperand(2), DFSF.DFS.IntptrTy, false)});
}
- Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
+ Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
Value *LenShadow =
IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
DFSF.DFS.ShadowWidthBytes));
- Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
- Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
- SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
auto *MTI = cast<MemTransferInst>(
IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
{DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
MTI->setDestAlignment(DFSF.getShadowAlign(I.getDestAlign().valueOrOne()));
MTI->setSourceAlignment(DFSF.getShadowAlign(I.getSourceAlign().valueOrOne()));
if (ClEventCallbacks) {
- IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,
- {RawDestShadow,
- IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
+ IRB.CreateCall(
+ DFSF.DFS.DFSanMemTransferCallbackFn,
+ {DestShadow, IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
}
}
@@ -3225,10 +3210,9 @@ void DFSanVisitor::visitLibAtomicLoad(CallBase &CB) {
// TODO: Support ClCombinePointerLabelsOnLoad
// TODO: Support ClEventCallbacks
- NextIRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginTransferFn,
- {NextIRB.CreatePointerCast(DstPtr, NextIRB.getInt8PtrTy()),
- NextIRB.CreatePointerCast(SrcPtr, NextIRB.getInt8PtrTy()),
- NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
+ NextIRB.CreateCall(
+ DFSF.DFS.DFSanMemShadowOriginTransferFn,
+ {DstPtr, SrcPtr, NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
}
Value *DFSanVisitor::makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
@@ -3264,10 +3248,9 @@ void DFSanVisitor::visitLibAtomicStore(CallBase &CB) {
// TODO: Support ClCombinePointerLabelsOnStore
// TODO: Support ClEventCallbacks
- IRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginTransferFn,
- {IRB.CreatePointerCast(DstPtr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(SrcPtr, IRB.getInt8PtrTy()),
- IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
+ IRB.CreateCall(
+ DFSF.DFS.DFSanMemShadowOriginTransferFn,
+ {DstPtr, SrcPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
}
void DFSanVisitor::visitLibAtomicExchange(CallBase &CB) {
@@ -3285,16 +3268,14 @@ void DFSanVisitor::visitLibAtomicExchange(CallBase &CB) {
// the additional complexity to address this is not warrented.
// Current Target to Dest
- IRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginTransferFn,
- {IRB.CreatePointerCast(DstPtr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(TargetPtr, IRB.getInt8PtrTy()),
- IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
+ IRB.CreateCall(
+ DFSF.DFS.DFSanMemShadowOriginTransferFn,
+ {DstPtr, TargetPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
// Current Src to Target (overriding)
- IRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginTransferFn,
- {IRB.CreatePointerCast(TargetPtr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(SrcPtr, IRB.getInt8PtrTy()),
- IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
+ IRB.CreateCall(
+ DFSF.DFS.DFSanMemShadowOriginTransferFn,
+ {TargetPtr, SrcPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
}
void DFSanVisitor::visitLibAtomicCompareExchange(CallBase &CB) {
@@ -3317,13 +3298,10 @@ void DFSanVisitor::visitLibAtomicCompareExchange(CallBase &CB) {
// If original call returned true, copy Desired to Target.
// If original call returned false, copy Target to Expected.
- NextIRB.CreateCall(
- DFSF.DFS.DFSanMemShadowOriginConditionalExchangeFn,
- {NextIRB.CreateIntCast(&CB, NextIRB.getInt8Ty(), false),
- NextIRB.CreatePointerCast(TargetPtr, NextIRB.getInt8PtrTy()),
- NextIRB.CreatePointerCast(ExpectedPtr, NextIRB.getInt8PtrTy()),
- NextIRB.CreatePointerCast(DesiredPtr, NextIRB.getInt8PtrTy()),
- NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
+ NextIRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginConditionalExchangeFn,
+ {NextIRB.CreateIntCast(&CB, NextIRB.getInt8Ty(), false),
+ TargetPtr, ExpectedPtr, DesiredPtr,
+ NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
}
void DFSanVisitor::visitCallBase(CallBase &CB) {
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 21f0b1a92293..1ff0a34bae24 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -148,7 +148,7 @@ private:
std::string mangleName(const DICompileUnit *CU, GCovFileType FileType);
GCOVOptions Options;
- support::endianness Endian;
+ llvm::endianness Endian;
raw_ostream *os;
// Checksum, produced by hash of EdgeDestinations
@@ -750,7 +750,7 @@ static BasicBlock *getInstrBB(CFGMST<Edge, BBInfo> &MST, Edge &E,
#ifndef NDEBUG
static void dumpEdges(CFGMST<Edge, BBInfo> &MST, GCOVFunction &GF) {
size_t ID = 0;
- for (auto &E : make_pointee_range(MST.AllEdges)) {
+ for (const auto &E : make_pointee_range(MST.allEdges())) {
GCOVBlock &Src = E.SrcBB ? GF.getBlock(E.SrcBB) : GF.getEntryBlock();
GCOVBlock &Dst = E.DestBB ? GF.getBlock(E.DestBB) : GF.getReturnBlock();
dbgs() << " Edge " << ID++ << ": " << Src.Number << "->" << Dst.Number
@@ -788,8 +788,8 @@ bool GCOVProfiler::emitProfileNotes(
std::vector<uint8_t> EdgeDestinations;
SmallVector<std::pair<GlobalVariable *, MDNode *>, 8> CountersBySP;
- Endian = M->getDataLayout().isLittleEndian() ? support::endianness::little
- : support::endianness::big;
+ Endian = M->getDataLayout().isLittleEndian() ? llvm::endianness::little
+ : llvm::endianness::big;
unsigned FunctionIdent = 0;
for (auto &F : M->functions()) {
DISubprogram *SP = F.getSubprogram();
@@ -820,8 +820,8 @@ bool GCOVProfiler::emitProfileNotes(
CFGMST<Edge, BBInfo> MST(F, /*InstrumentFuncEntry_=*/false, BPI, BFI);
// getInstrBB can split basic blocks and push elements to AllEdges.
- for (size_t I : llvm::seq<size_t>(0, MST.AllEdges.size())) {
- auto &E = *MST.AllEdges[I];
+ for (size_t I : llvm::seq<size_t>(0, MST.numEdges())) {
+ auto &E = *MST.allEdges()[I];
// For now, disable spanning tree optimization when fork or exec* is
// used.
if (HasExecOrFork)
@@ -836,16 +836,16 @@ bool GCOVProfiler::emitProfileNotes(
// Some non-tree edges are IndirectBr which cannot be split. Ignore them
// as well.
- llvm::erase_if(MST.AllEdges, [](std::unique_ptr<Edge> &E) {
+ llvm::erase_if(MST.allEdges(), [](std::unique_ptr<Edge> &E) {
return E->Removed || (!E->InMST && !E->Place);
});
const size_t Measured =
std::stable_partition(
- MST.AllEdges.begin(), MST.AllEdges.end(),
+ MST.allEdges().begin(), MST.allEdges().end(),
[](std::unique_ptr<Edge> &E) { return E->Place; }) -
- MST.AllEdges.begin();
+ MST.allEdges().begin();
for (size_t I : llvm::seq<size_t>(0, Measured)) {
- Edge &E = *MST.AllEdges[I];
+ Edge &E = *MST.allEdges()[I];
GCOVBlock &Src =
E.SrcBB ? Func.getBlock(E.SrcBB) : Func.getEntryBlock();
GCOVBlock &Dst =
@@ -854,13 +854,13 @@ bool GCOVProfiler::emitProfileNotes(
E.DstNumber = Dst.Number;
}
std::stable_sort(
- MST.AllEdges.begin(), MST.AllEdges.begin() + Measured,
+ MST.allEdges().begin(), MST.allEdges().begin() + Measured,
[](const std::unique_ptr<Edge> &L, const std::unique_ptr<Edge> &R) {
return L->SrcNumber != R->SrcNumber ? L->SrcNumber < R->SrcNumber
: L->DstNumber < R->DstNumber;
});
- for (const Edge &E : make_pointee_range(MST.AllEdges)) {
+ for (const Edge &E : make_pointee_range(MST.allEdges())) {
GCOVBlock &Src =
E.SrcBB ? Func.getBlock(E.SrcBB) : Func.getEntryBlock();
GCOVBlock &Dst =
@@ -898,7 +898,9 @@ bool GCOVProfiler::emitProfileNotes(
if (Line == Loc.getLine()) continue;
Line = Loc.getLine();
- if (SP != getDISubprogram(Loc.getScope()))
+ MDNode *Scope = Loc.getScope();
+ // TODO: Handle blocks from another file due to #line, #include, etc.
+ if (isa<DILexicalBlockFile>(Scope) || SP != getDISubprogram(Scope))
continue;
GCOVLines &Lines = Block.getFile(Filename);
@@ -915,7 +917,7 @@ bool GCOVProfiler::emitProfileNotes(
CountersBySP.emplace_back(Counters, SP);
for (size_t I : llvm::seq<size_t>(0, Measured)) {
- const Edge &E = *MST.AllEdges[I];
+ const Edge &E = *MST.allEdges()[I];
IRBuilder<> Builder(E.Place, E.Place->getFirstInsertionPt());
Value *V = Builder.CreateConstInBoundsGEP2_64(
Counters->getValueType(), Counters, 0, I);
@@ -955,7 +957,7 @@ bool GCOVProfiler::emitProfileNotes(
continue;
}
os = &out;
- if (Endian == support::endianness::big) {
+ if (Endian == llvm::endianness::big) {
out.write("gcno", 4);
out.write(Options.Version, 4);
} else {
@@ -1029,9 +1031,9 @@ void GCOVProfiler::emitGlobalConstructor(
FunctionCallee GCOVProfiler::getStartFileFunc(const TargetLibraryInfo *TLI) {
Type *Args[] = {
- Type::getInt8PtrTy(*Ctx), // const char *orig_filename
- Type::getInt32Ty(*Ctx), // uint32_t version
- Type::getInt32Ty(*Ctx), // uint32_t checksum
+ PointerType::getUnqual(*Ctx), // const char *orig_filename
+ Type::getInt32Ty(*Ctx), // uint32_t version
+ Type::getInt32Ty(*Ctx), // uint32_t checksum
};
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_start_file", FTy,
@@ -1051,8 +1053,8 @@ FunctionCallee GCOVProfiler::getEmitFunctionFunc(const TargetLibraryInfo *TLI) {
FunctionCallee GCOVProfiler::getEmitArcsFunc(const TargetLibraryInfo *TLI) {
Type *Args[] = {
- Type::getInt32Ty(*Ctx), // uint32_t num_counters
- Type::getInt64PtrTy(*Ctx), // uint64_t *counters
+ Type::getInt32Ty(*Ctx), // uint32_t num_counters
+ PointerType::getUnqual(*Ctx), // uint64_t *counters
};
FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), Args, false);
return M->getOrInsertFunction("llvm_gcda_emit_arcs", FTy,
@@ -1098,19 +1100,16 @@ Function *GCOVProfiler::insertCounterWriteout(
// Collect the relevant data into a large constant data structure that we can
// walk to write out everything.
StructType *StartFileCallArgsTy = StructType::create(
- {Builder.getInt8PtrTy(), Builder.getInt32Ty(), Builder.getInt32Ty()},
+ {Builder.getPtrTy(), Builder.getInt32Ty(), Builder.getInt32Ty()},
"start_file_args_ty");
StructType *EmitFunctionCallArgsTy = StructType::create(
{Builder.getInt32Ty(), Builder.getInt32Ty(), Builder.getInt32Ty()},
"emit_function_args_ty");
- StructType *EmitArcsCallArgsTy = StructType::create(
- {Builder.getInt32Ty(), Builder.getInt64Ty()->getPointerTo()},
- "emit_arcs_args_ty");
- StructType *FileInfoTy =
- StructType::create({StartFileCallArgsTy, Builder.getInt32Ty(),
- EmitFunctionCallArgsTy->getPointerTo(),
- EmitArcsCallArgsTy->getPointerTo()},
- "file_info");
+ auto *PtrTy = Builder.getPtrTy();
+ StructType *EmitArcsCallArgsTy =
+ StructType::create({Builder.getInt32Ty(), PtrTy}, "emit_arcs_args_ty");
+ StructType *FileInfoTy = StructType::create(
+ {StartFileCallArgsTy, Builder.getInt32Ty(), PtrTy, PtrTy}, "file_info");
Constant *Zero32 = Builder.getInt32(0);
// Build an explicit array of two zeros for use in ConstantExpr GEP building.
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 28db47a19092..f7f8fed643e9 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -17,9 +17,11 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/BinaryFormat/ELF.h"
@@ -42,7 +44,6 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/NoFolder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
@@ -52,6 +53,7 @@
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
@@ -134,7 +136,7 @@ static cl::opt<size_t> ClMaxLifetimes(
static cl::opt<bool>
ClUseAfterScope("hwasan-use-after-scope",
cl::desc("detect use after scope within function"),
- cl::Hidden, cl::init(false));
+ cl::Hidden, cl::init(true));
static cl::opt<bool> ClGenerateTagsWithCalls(
"hwasan-generate-tags-with-calls",
@@ -223,6 +225,10 @@ static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
cl::desc("inline all checks"),
cl::Hidden, cl::init(false));
+static cl::opt<bool> ClInlineFastPathChecks("hwasan-inline-fast-path-checks",
+ cl::desc("inline all checks"),
+ cl::Hidden, cl::init(false));
+
// Enabled from clang by "-fsanitize-hwaddress-experimental-aliasing".
static cl::opt<bool> ClUsePageAliases("hwasan-experimental-use-page-aliases",
cl::desc("Use page aliasing in HWASan"),
@@ -274,9 +280,18 @@ public:
initializeModule();
}
+ void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
+
+private:
+ struct ShadowTagCheckInfo {
+ Instruction *TagMismatchTerm = nullptr;
+ Value *PtrLong = nullptr;
+ Value *AddrLong = nullptr;
+ Value *PtrTag = nullptr;
+ Value *MemTag = nullptr;
+ };
void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
- void sanitizeFunction(Function &F, FunctionAnalysisManager &FAM);
void initializeModule();
void createHwasanCtorComdat();
@@ -291,18 +306,24 @@ public:
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
int64_t getAccessInfo(bool IsWrite, unsigned AccessSizeIndex);
+ ShadowTagCheckInfo insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
+ DomTreeUpdater &DTU, LoopInfo *LI);
void instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
- Instruction *InsertBefore);
+ Instruction *InsertBefore,
+ DomTreeUpdater &DTU, LoopInfo *LI);
void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
- Instruction *InsertBefore);
+ Instruction *InsertBefore, DomTreeUpdater &DTU,
+ LoopInfo *LI);
bool ignoreMemIntrinsic(MemIntrinsic *MI);
void instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentMemAccess(InterestingMemoryOperand &O);
+ bool instrumentMemAccess(InterestingMemoryOperand &O, DomTreeUpdater &DTU,
+ LoopInfo *LI);
bool ignoreAccess(Instruction *Inst, Value *Ptr);
void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ Instruction *I, const TargetLibraryInfo &TLI,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting);
void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
@@ -332,7 +353,6 @@ public:
void instrumentPersonalityFunctions();
-private:
LLVMContext *C;
Module &M;
const StackSafetyGlobalInfo *SSI;
@@ -364,7 +384,7 @@ private:
Type *VoidTy = Type::getVoidTy(M.getContext());
Type *IntptrTy;
- Type *Int8PtrTy;
+ PointerType *PtrTy;
Type *Int8Ty;
Type *Int32Ty;
Type *Int64Ty = Type::getInt64Ty(M.getContext());
@@ -372,6 +392,7 @@ private:
bool CompileKernel;
bool Recover;
bool OutlinedChecks;
+ bool InlineFastPath;
bool UseShortGranules;
bool InstrumentLandingPads;
bool InstrumentWithCalls;
@@ -420,6 +441,12 @@ PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
HWASan.sanitizeFunction(F, FAM);
PreservedAnalyses PA = PreservedAnalyses::none();
+ // DominatorTreeAnalysis, PostDominatorTreeAnalysis, and LoopAnalysis
+ // are incrementally updated throughout this pass whenever
+ // SplitBlockAndInsertIfThen is called.
+ PA.preserve<DominatorTreeAnalysis>();
+ PA.preserve<PostDominatorTreeAnalysis>();
+ PA.preserve<LoopAnalysis>();
// GlobalsAA is considered stateless and does not get invalidated unless
// explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
// make changes that require GlobalsAA to be invalidated.
@@ -560,7 +587,7 @@ void HWAddressSanitizer::initializeModule() {
C = &(M.getContext());
IRBuilder<> IRB(*C);
IntptrTy = IRB.getIntPtrTy(DL);
- Int8PtrTy = IRB.getInt8PtrTy();
+ PtrTy = IRB.getPtrTy();
Int8Ty = IRB.getInt8Ty();
Int32Ty = IRB.getInt32Ty();
@@ -579,6 +606,13 @@ void HWAddressSanitizer::initializeModule() {
TargetTriple.isOSBinFormatELF() &&
(ClInlineAllChecks.getNumOccurrences() ? !ClInlineAllChecks : !Recover);
+ InlineFastPath =
+ (ClInlineFastPathChecks.getNumOccurrences()
+ ? ClInlineFastPathChecks
+ : !(TargetTriple.isAndroid() ||
+ TargetTriple.isOSFuchsia())); // These platforms may prefer less
+ // inlining to reduce binary size.
+
if (ClMatchAllTag.getNumOccurrences()) {
if (ClMatchAllTag != -1) {
MatchAllTag = ClMatchAllTag & 0xFF;
@@ -633,19 +667,19 @@ void HWAddressSanitizer::initializeCallbacks(Module &M) {
FunctionType::get(VoidTy, {IntptrTy, IntptrTy, Int8Ty}, false);
HwasanMemoryAccessCallbackFnTy =
FunctionType::get(VoidTy, {IntptrTy, Int8Ty}, false);
- HwasanMemTransferFnTy = FunctionType::get(
- Int8PtrTy, {Int8PtrTy, Int8PtrTy, IntptrTy, Int8Ty}, false);
- HwasanMemsetFnTy = FunctionType::get(
- Int8PtrTy, {Int8PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
+ HwasanMemTransferFnTy =
+ FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy, Int8Ty}, false);
+ HwasanMemsetFnTy =
+ FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy, Int8Ty}, false);
} else {
HwasanMemoryAccessCallbackSizedFnTy =
FunctionType::get(VoidTy, {IntptrTy, IntptrTy}, false);
HwasanMemoryAccessCallbackFnTy =
FunctionType::get(VoidTy, {IntptrTy}, false);
HwasanMemTransferFnTy =
- FunctionType::get(Int8PtrTy, {Int8PtrTy, Int8PtrTy, IntptrTy}, false);
+ FunctionType::get(PtrTy, {PtrTy, PtrTy, IntptrTy}, false);
HwasanMemsetFnTy =
- FunctionType::get(Int8PtrTy, {Int8PtrTy, Int32Ty, IntptrTy}, false);
+ FunctionType::get(PtrTy, {PtrTy, Int32Ty, IntptrTy}, false);
}
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
@@ -679,7 +713,7 @@ void HWAddressSanitizer::initializeCallbacks(Module &M) {
MemIntrinCallbackPrefix + "memset" + MatchAllStr, HwasanMemsetFnTy);
HwasanTagMemoryFunc = M.getOrInsertFunction("__hwasan_tag_memory", VoidTy,
- Int8PtrTy, Int8Ty, IntptrTy);
+ PtrTy, Int8Ty, IntptrTy);
HwasanGenerateTagFunc =
M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
@@ -699,7 +733,7 @@ Value *HWAddressSanitizer::getOpaqueNoopCast(IRBuilder<> &IRB, Value *Val) {
// This prevents code bloat as a result of rematerializing trivial definitions
// such as constants or global addresses at every load and store.
InlineAsm *Asm =
- InlineAsm::get(FunctionType::get(Int8PtrTy, {Val->getType()}, false),
+ InlineAsm::get(FunctionType::get(PtrTy, {Val->getType()}, false),
StringRef(""), StringRef("=r,0"),
/*hasSideEffects=*/false);
return IRB.CreateCall(Asm, {Val}, ".hwasan.shadow");
@@ -713,15 +747,15 @@ Value *HWAddressSanitizer::getShadowNonTls(IRBuilder<> &IRB) {
if (Mapping.Offset != kDynamicShadowSentinel)
return getOpaqueNoopCast(
IRB, ConstantExpr::getIntToPtr(
- ConstantInt::get(IntptrTy, Mapping.Offset), Int8PtrTy));
+ ConstantInt::get(IntptrTy, Mapping.Offset), PtrTy));
if (Mapping.InGlobal)
return getDynamicShadowIfunc(IRB);
Value *GlobalDynamicAddress =
IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
- kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
- return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
+ kHwasanShadowMemoryDynamicAddress, PtrTy);
+ return IRB.CreateLoad(PtrTy, GlobalDynamicAddress);
}
bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
@@ -748,7 +782,8 @@ bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
}
void HWAddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+ Instruction *I, const TargetLibraryInfo &TLI,
+ SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
// Skip memory accesses inserted by another instrumentation.
if (I->hasMetadata(LLVMContext::MD_nosanitize))
return;
@@ -786,6 +821,7 @@ void HWAddressSanitizer::getInterestingMemoryOperands(
Type *Ty = CI->getParamByValType(ArgNo);
Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
}
+ maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
}
}
@@ -824,7 +860,7 @@ Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
// Mem >> Scale
Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
if (Mapping.Offset == 0)
- return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
+ return IRB.CreateIntToPtr(Shadow, PtrTy);
// (Mem >> Scale) + Offset
return IRB.CreateGEP(Int8Ty, ShadowBase, Shadow);
}
@@ -839,14 +875,48 @@ int64_t HWAddressSanitizer::getAccessInfo(bool IsWrite,
(AccessSizeIndex << HWASanAccessInfo::AccessSizeShift);
}
+HWAddressSanitizer::ShadowTagCheckInfo
+HWAddressSanitizer::insertShadowTagCheck(Value *Ptr, Instruction *InsertBefore,
+ DomTreeUpdater &DTU, LoopInfo *LI) {
+ ShadowTagCheckInfo R;
+
+ IRBuilder<> IRB(InsertBefore);
+
+ R.PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
+ R.PtrTag =
+ IRB.CreateTrunc(IRB.CreateLShr(R.PtrLong, PointerTagShift), Int8Ty);
+ R.AddrLong = untagPointer(IRB, R.PtrLong);
+ Value *Shadow = memToShadow(R.AddrLong, IRB);
+ R.MemTag = IRB.CreateLoad(Int8Ty, Shadow);
+ Value *TagMismatch = IRB.CreateICmpNE(R.PtrTag, R.MemTag);
+
+ if (MatchAllTag.has_value()) {
+ Value *TagNotIgnored = IRB.CreateICmpNE(
+ R.PtrTag, ConstantInt::get(R.PtrTag->getType(), *MatchAllTag));
+ TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
+ }
+
+ R.TagMismatchTerm = SplitBlockAndInsertIfThen(
+ TagMismatch, InsertBefore, false,
+ MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
+
+ return R;
+}
+
void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
- Instruction *InsertBefore) {
+ Instruction *InsertBefore,
+ DomTreeUpdater &DTU,
+ LoopInfo *LI) {
assert(!UsePageAliases);
const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
+
+ if (InlineFastPath)
+ InsertBefore =
+ insertShadowTagCheck(Ptr, InsertBefore, DTU, LI).TagMismatchTerm;
+
IRBuilder<> IRB(InsertBefore);
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
- Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
IRB.CreateCall(Intrinsic::getDeclaration(
M, UseShortGranules
? Intrinsic::hwasan_check_memaccess_shortgranules
@@ -856,55 +926,38 @@ void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
unsigned AccessSizeIndex,
- Instruction *InsertBefore) {
+ Instruction *InsertBefore,
+ DomTreeUpdater &DTU,
+ LoopInfo *LI) {
assert(!UsePageAliases);
const int64_t AccessInfo = getAccessInfo(IsWrite, AccessSizeIndex);
- IRBuilder<> IRB(InsertBefore);
-
- Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
- Value *PtrTag =
- IRB.CreateTrunc(IRB.CreateLShr(PtrLong, PointerTagShift), Int8Ty);
- Value *AddrLong = untagPointer(IRB, PtrLong);
- Value *Shadow = memToShadow(AddrLong, IRB);
- Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
- Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
-
- if (MatchAllTag.has_value()) {
- Value *TagNotIgnored = IRB.CreateICmpNE(
- PtrTag, ConstantInt::get(PtrTag->getType(), *MatchAllTag));
- TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
- }
- Instruction *CheckTerm =
- SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
- MDBuilder(*C).createBranchWeights(1, 100000));
+ ShadowTagCheckInfo TCI = insertShadowTagCheck(Ptr, InsertBefore, DTU, LI);
- IRB.SetInsertPoint(CheckTerm);
+ IRBuilder<> IRB(TCI.TagMismatchTerm);
Value *OutOfShortGranuleTagRange =
- IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
- Instruction *CheckFailTerm =
- SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
- MDBuilder(*C).createBranchWeights(1, 100000));
+ IRB.CreateICmpUGT(TCI.MemTag, ConstantInt::get(Int8Ty, 15));
+ Instruction *CheckFailTerm = SplitBlockAndInsertIfThen(
+ OutOfShortGranuleTagRange, TCI.TagMismatchTerm, !Recover,
+ MDBuilder(*C).createBranchWeights(1, 100000), &DTU, LI);
- IRB.SetInsertPoint(CheckTerm);
- Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
+ IRB.SetInsertPoint(TCI.TagMismatchTerm);
+ Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(TCI.PtrLong, 15), Int8Ty);
PtrLowBits = IRB.CreateAdd(
PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
- Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
- SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
- MDBuilder(*C).createBranchWeights(1, 100000),
- (DomTreeUpdater *)nullptr, nullptr,
- CheckFailTerm->getParent());
+ Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, TCI.MemTag);
+ SplitBlockAndInsertIfThen(PtrLowBitsOOB, TCI.TagMismatchTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
+ LI, CheckFailTerm->getParent());
- IRB.SetInsertPoint(CheckTerm);
- Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
- InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
+ IRB.SetInsertPoint(TCI.TagMismatchTerm);
+ Value *InlineTagAddr = IRB.CreateOr(TCI.AddrLong, 15);
+ InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, PtrTy);
Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
- Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
- SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
- MDBuilder(*C).createBranchWeights(1, 100000),
- (DomTreeUpdater *)nullptr, nullptr,
- CheckFailTerm->getParent());
+ Value *InlineTagMismatch = IRB.CreateICmpNE(TCI.PtrTag, InlineTag);
+ SplitBlockAndInsertIfThen(InlineTagMismatch, TCI.TagMismatchTerm, false,
+ MDBuilder(*C).createBranchWeights(1, 100000), &DTU,
+ LI, CheckFailTerm->getParent());
IRB.SetInsertPoint(CheckFailTerm);
InlineAsm *Asm;
@@ -912,7 +965,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
case Triple::x86_64:
// The signal handler will find the data address in rdi.
Asm = InlineAsm::get(
- FunctionType::get(VoidTy, {PtrLong->getType()}, false),
+ FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
"int3\nnopl " +
itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)) +
"(%rax)",
@@ -923,7 +976,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
case Triple::aarch64_be:
// The signal handler will find the data address in x0.
Asm = InlineAsm::get(
- FunctionType::get(VoidTy, {PtrLong->getType()}, false),
+ FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
"brk #" + itostr(0x900 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
"{x0}",
/*hasSideEffects=*/true);
@@ -931,7 +984,7 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
case Triple::riscv64:
// The signal handler will find the data address in x10.
Asm = InlineAsm::get(
- FunctionType::get(VoidTy, {PtrLong->getType()}, false),
+ FunctionType::get(VoidTy, {TCI.PtrLong->getType()}, false),
"ebreak\naddiw x0, x11, " +
itostr(0x40 + (AccessInfo & HWASanAccessInfo::RuntimeMask)),
"{x10}",
@@ -940,9 +993,10 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
default:
report_fatal_error("unsupported architecture");
}
- IRB.CreateCall(Asm, PtrLong);
+ IRB.CreateCall(Asm, TCI.PtrLong);
if (Recover)
- cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
+ cast<BranchInst>(CheckFailTerm)
+ ->setSuccessor(0, TCI.TagMismatchTerm->getParent());
}
bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
@@ -958,40 +1012,28 @@ bool HWAddressSanitizer::ignoreMemIntrinsic(MemIntrinsic *MI) {
void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
if (isa<MemTransferInst>(MI)) {
- if (UseMatchAllCallback) {
- IRB.CreateCall(
- isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false),
- ConstantInt::get(Int8Ty, *MatchAllTag)});
- } else {
- IRB.CreateCall(
- isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
- }
+ SmallVector<Value *, 4> Args{
+ MI->getOperand(0), MI->getOperand(1),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
+
+ if (UseMatchAllCallback)
+ Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
+ IRB.CreateCall(isa<MemMoveInst>(MI) ? HwasanMemmove : HwasanMemcpy, Args);
} else if (isa<MemSetInst>(MI)) {
- if (UseMatchAllCallback) {
- IRB.CreateCall(
- HwasanMemset,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false),
- ConstantInt::get(Int8Ty, *MatchAllTag)});
- } else {
- IRB.CreateCall(
- HwasanMemset,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
- }
+ SmallVector<Value *, 4> Args{
+ MI->getOperand(0),
+ IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)};
+ if (UseMatchAllCallback)
+ Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
+ IRB.CreateCall(HwasanMemset, Args);
}
MI->eraseFromParent();
}
-bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
+bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O,
+ DomTreeUpdater &DTU,
+ LoopInfo *LI) {
Value *Addr = O.getPtr();
LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
@@ -1006,34 +1048,26 @@ bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
*O.Alignment >= O.TypeStoreSize / 8)) {
size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeStoreSize);
if (InstrumentWithCalls) {
- if (UseMatchAllCallback) {
- IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
- {IRB.CreatePointerCast(Addr, IntptrTy),
- ConstantInt::get(Int8Ty, *MatchAllTag)});
- } else {
- IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
- IRB.CreatePointerCast(Addr, IntptrTy));
- }
+ SmallVector<Value *, 2> Args{IRB.CreatePointerCast(Addr, IntptrTy)};
+ if (UseMatchAllCallback)
+ Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
+ IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
+ Args);
} else if (OutlinedChecks) {
- instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
+ instrumentMemAccessOutline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
+ DTU, LI);
} else {
- instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
+ instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn(),
+ DTU, LI);
}
} else {
- if (UseMatchAllCallback) {
- IRB.CreateCall(
- HwasanMemoryAccessCallbackSized[O.IsWrite],
- {IRB.CreatePointerCast(Addr, IntptrTy),
- IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
- ConstantInt::get(IntptrTy, 8)),
- ConstantInt::get(Int8Ty, *MatchAllTag)});
- } else {
- IRB.CreateCall(
- HwasanMemoryAccessCallbackSized[O.IsWrite],
- {IRB.CreatePointerCast(Addr, IntptrTy),
- IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
- ConstantInt::get(IntptrTy, 8))});
- }
+ SmallVector<Value *, 3> Args{
+ IRB.CreatePointerCast(Addr, IntptrTy),
+ IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy, O.TypeStoreSize),
+ ConstantInt::get(IntptrTy, 8))};
+ if (UseMatchAllCallback)
+ Args.emplace_back(ConstantInt::get(Int8Ty, *MatchAllTag));
+ IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], Args);
}
untagPointerOperand(O.getInsn(), Addr);
@@ -1049,7 +1083,7 @@ void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
Tag = IRB.CreateTrunc(Tag, Int8Ty);
if (InstrumentWithCalls) {
IRB.CreateCall(HwasanTagMemoryFunc,
- {IRB.CreatePointerCast(AI, Int8PtrTy), Tag,
+ {IRB.CreatePointerCast(AI, PtrTy), Tag,
ConstantInt::get(IntptrTy, AlignedSize)});
} else {
size_t ShadowSize = Size >> Mapping.Scale;
@@ -1067,9 +1101,9 @@ void HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag,
const uint8_t SizeRemainder = Size % Mapping.getObjectAlignment().value();
IRB.CreateStore(ConstantInt::get(Int8Ty, SizeRemainder),
IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
- IRB.CreateStore(Tag, IRB.CreateConstGEP1_32(
- Int8Ty, IRB.CreatePointerCast(AI, Int8PtrTy),
- AlignedSize - 1));
+ IRB.CreateStore(
+ Tag, IRB.CreateConstGEP1_32(Int8Ty, IRB.CreatePointerCast(AI, PtrTy),
+ AlignedSize - 1));
}
}
}
@@ -1183,10 +1217,8 @@ Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
// in Bionic's libc/private/bionic_tls.h.
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
- Value *SlotPtr = IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(Int8Ty, IRB.CreateCall(ThreadPointerFunc), 0x30),
- Ty->getPointerTo(0));
- return SlotPtr;
+ return IRB.CreateConstGEP1_32(Int8Ty, IRB.CreateCall(ThreadPointerFunc),
+ 0x30);
}
if (ThreadPtrGlobal)
return ThreadPtrGlobal;
@@ -1208,7 +1240,7 @@ Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
Module *M = F->getParent();
auto *GetStackPointerFn = Intrinsic::getDeclaration(
M, Intrinsic::frameaddress,
- IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
+ IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
CachedSP = IRB.CreatePtrToInt(
IRB.CreateCall(GetStackPointerFn, {Constant::getNullValue(Int32Ty)}),
IntptrTy);
@@ -1271,8 +1303,8 @@ void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
// Store data to ring buffer.
Value *FrameRecordInfo = getFrameRecordInfo(IRB);
- Value *RecordPtr = IRB.CreateIntToPtr(ThreadLongMaybeUntagged,
- IntptrTy->getPointerTo(0));
+ Value *RecordPtr =
+ IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IRB.getPtrTy(0));
IRB.CreateStore(FrameRecordInfo, RecordPtr);
// Update the ring buffer. Top byte of ThreadLong defines the size of the
@@ -1309,7 +1341,7 @@ void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
ThreadLongMaybeUntagged,
ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
- ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
+ ShadowBase = IRB.CreateIntToPtr(ShadowBase, PtrTy);
}
}
@@ -1369,7 +1401,7 @@ bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
size_t Size = memtag::getAllocaSizeInBytes(*AI);
size_t AlignedSize = alignTo(Size, Mapping.getObjectAlignment());
- Value *AICast = IRB.CreatePointerCast(AI, Int8PtrTy);
+ Value *AICast = IRB.CreatePointerCast(AI, PtrTy);
auto HandleLifetime = [&](IntrinsicInst *II) {
// Set the lifetime intrinsic to cover the whole alloca. This reduces the
@@ -1462,6 +1494,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
SmallVector<Instruction *, 8> LandingPadVec;
+ const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
memtag::StackInfoBuilder SIB(SSI);
for (auto &Inst : instructions(F)) {
@@ -1472,7 +1505,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
LandingPadVec.push_back(&Inst);
- getInterestingMemoryOperands(&Inst, OperandsToInstrument);
+ getInterestingMemoryOperands(&Inst, TLI, OperandsToInstrument);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
if (!ignoreMemIntrinsic(MI))
@@ -1528,8 +1561,13 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
}
}
+ DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
+ PostDominatorTree *PDT = FAM.getCachedResult<PostDominatorTreeAnalysis>(F);
+ LoopInfo *LI = FAM.getCachedResult<LoopAnalysis>(F);
+ DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy);
for (auto &Operand : OperandsToInstrument)
- instrumentMemAccess(Operand);
+ instrumentMemAccess(Operand, DTU, LI);
+ DTU.flush();
if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
for (auto *Inst : IntrinToInstrument)
@@ -1624,7 +1662,7 @@ void HWAddressSanitizer::instrumentGlobals() {
if (GV.hasSanitizerMetadata() && GV.getSanitizerMetadata().NoHWAddress)
continue;
- if (GV.isDeclarationForLinker() || GV.getName().startswith("llvm.") ||
+ if (GV.isDeclarationForLinker() || GV.getName().starts_with("llvm.") ||
GV.isThreadLocal())
continue;
@@ -1682,8 +1720,8 @@ void HWAddressSanitizer::instrumentPersonalityFunctions() {
return;
FunctionCallee HwasanPersonalityWrapper = M.getOrInsertFunction(
- "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty,
- Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy, Int8PtrTy);
+ "__hwasan_personality_wrapper", Int32Ty, Int32Ty, Int32Ty, Int64Ty, PtrTy,
+ PtrTy, PtrTy, PtrTy, PtrTy);
FunctionCallee UnwindGetGR = M.getOrInsertFunction("_Unwind_GetGR", VoidTy);
FunctionCallee UnwindGetCFA = M.getOrInsertFunction("_Unwind_GetCFA", VoidTy);
@@ -1692,7 +1730,7 @@ void HWAddressSanitizer::instrumentPersonalityFunctions() {
if (P.first)
ThunkName += ("." + P.first->getName()).str();
FunctionType *ThunkFnTy = FunctionType::get(
- Int32Ty, {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int8PtrTy}, false);
+ Int32Ty, {Int32Ty, Int32Ty, Int64Ty, PtrTy, PtrTy}, false);
bool IsLocal = P.first && (!isa<GlobalValue>(P.first) ||
cast<GlobalValue>(P.first)->hasLocalLinkage());
auto *ThunkFn = Function::Create(ThunkFnTy,
@@ -1710,10 +1748,8 @@ void HWAddressSanitizer::instrumentPersonalityFunctions() {
HwasanPersonalityWrapper,
{ThunkFn->getArg(0), ThunkFn->getArg(1), ThunkFn->getArg(2),
ThunkFn->getArg(3), ThunkFn->getArg(4),
- P.first ? IRB.CreateBitCast(P.first, Int8PtrTy)
- : Constant::getNullValue(Int8PtrTy),
- IRB.CreateBitCast(UnwindGetGR.getCallee(), Int8PtrTy),
- IRB.CreateBitCast(UnwindGetCFA.getCallee(), Int8PtrTy)});
+ P.first ? P.first : Constant::getNullValue(PtrTy),
+ UnwindGetGR.getCallee(), UnwindGetCFA.getCallee()});
WrapperCall->setTailCall();
IRB.CreateRet(WrapperCall);
diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index 5c9799235017..7344fea17517 100644
--- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -26,6 +26,7 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Value.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Casting.h"
@@ -256,10 +257,7 @@ CallBase &llvm::pgo::promoteIndirectCall(CallBase &CB, Function *DirectCallee,
promoteCallWithIfThenElse(CB, DirectCallee, BranchWeights);
if (AttachProfToDirectCall) {
- MDBuilder MDB(NewInst.getContext());
- NewInst.setMetadata(
- LLVMContext::MD_prof,
- MDB.createBranchWeights({static_cast<uint32_t>(Count)}));
+ setBranchWeights(NewInst, {static_cast<uint32_t>(Count)});
}
using namespace ore;
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index a7b1953ce81c..d3282779d9f5 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass lowers instrprof_* intrinsics emitted by a frontend for profiling.
+// This pass lowers instrprof_* intrinsics emitted by an instrumentor.
// It also builds the data structures and initialization code needed for
// updating execution counts and emitting the profile at runtime.
//
@@ -14,6 +14,7 @@
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@@ -23,6 +24,7 @@
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
@@ -47,6 +49,9 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TargetParser/Triple.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <algorithm>
@@ -190,7 +195,8 @@ public:
auto *OrigBiasInst = dyn_cast<BinaryOperator>(AddrInst->getOperand(0));
assert(OrigBiasInst->getOpcode() == Instruction::BinaryOps::Add);
Value *BiasInst = Builder.Insert(OrigBiasInst->clone());
- Addr = Builder.CreateIntToPtr(BiasInst, Ty->getPointerTo());
+ Addr = Builder.CreateIntToPtr(BiasInst,
+ PointerType::getUnqual(Ty->getContext()));
}
if (AtomicCounterUpdatePromoted)
// automic update currently can only be promoted across the current
@@ -241,7 +247,10 @@ public:
return;
for (BasicBlock *ExitBlock : LoopExitBlocks) {
- if (BlockSet.insert(ExitBlock).second) {
+ if (BlockSet.insert(ExitBlock).second &&
+ llvm::none_of(predecessors(ExitBlock), [&](const BasicBlock *Pred) {
+ return llvm::isPresplitCoroSuspendExitEdge(*Pred, *ExitBlock);
+ })) {
ExitBlocks.push_back(ExitBlock);
InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
}
@@ -430,6 +439,15 @@ bool InstrProfiling::lowerIntrinsics(Function *F) {
} else if (auto *IPVP = dyn_cast<InstrProfValueProfileInst>(&Instr)) {
lowerValueProfileInst(IPVP);
MadeChange = true;
+ } else if (auto *IPMP = dyn_cast<InstrProfMCDCBitmapParameters>(&Instr)) {
+ IPMP->eraseFromParent();
+ MadeChange = true;
+ } else if (auto *IPBU = dyn_cast<InstrProfMCDCTVBitmapUpdate>(&Instr)) {
+ lowerMCDCTestVectorBitmapUpdate(IPBU);
+ MadeChange = true;
+ } else if (auto *IPTU = dyn_cast<InstrProfMCDCCondBitmapUpdate>(&Instr)) {
+ lowerMCDCCondBitmapUpdate(IPTU);
+ MadeChange = true;
}
}
}
@@ -544,19 +562,27 @@ bool InstrProfiling::run(
// the instrumented function. This is counting the number of instrumented
// target value sites to enter it as field in the profile data variable.
for (Function &F : M) {
- InstrProfInstBase *FirstProfInst = nullptr;
- for (BasicBlock &BB : F)
- for (auto I = BB.begin(), E = BB.end(); I != E; I++)
+ InstrProfCntrInstBase *FirstProfInst = nullptr;
+ for (BasicBlock &BB : F) {
+ for (auto I = BB.begin(), E = BB.end(); I != E; I++) {
if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I))
computeNumValueSiteCounts(Ind);
- else if (FirstProfInst == nullptr &&
- (isa<InstrProfIncrementInst>(I) || isa<InstrProfCoverInst>(I)))
- FirstProfInst = dyn_cast<InstrProfInstBase>(I);
+ else {
+ if (FirstProfInst == nullptr &&
+ (isa<InstrProfIncrementInst>(I) || isa<InstrProfCoverInst>(I)))
+ FirstProfInst = dyn_cast<InstrProfCntrInstBase>(I);
+ // If the MCDCBitmapParameters intrinsic seen, create the bitmaps.
+ if (const auto &Params = dyn_cast<InstrProfMCDCBitmapParameters>(I))
+ static_cast<void>(getOrCreateRegionBitmaps(Params));
+ }
+ }
+ }
- // Value profiling intrinsic lowering requires per-function profile data
- // variable to be created first.
- if (FirstProfInst != nullptr)
+ // Use a profile intrinsic to create the region counters and data variable.
+ // Also create the data variable based on the MCDCParams.
+ if (FirstProfInst != nullptr) {
static_cast<void>(getOrCreateRegionCounters(FirstProfInst));
+ }
}
for (Function &F : M)
@@ -651,15 +677,11 @@ void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
SmallVector<OperandBundleDef, 1> OpBundles;
Ind->getOperandBundlesAsDefs(OpBundles);
if (!IsMemOpSize) {
- Value *Args[3] = {Ind->getTargetValue(),
- Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
- Builder.getInt32(Index)};
+ Value *Args[3] = {Ind->getTargetValue(), DataVar, Builder.getInt32(Index)};
Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args,
OpBundles);
} else {
- Value *Args[3] = {Ind->getTargetValue(),
- Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
- Builder.getInt32(Index)};
+ Value *Args[3] = {Ind->getTargetValue(), DataVar, Builder.getInt32(Index)};
Call = Builder.CreateCall(
getOrInsertValueProfilingCall(*M, *TLI, ValueProfilingCallType::MemOp),
Args, OpBundles);
@@ -670,7 +692,7 @@ void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
Ind->eraseFromParent();
}
-Value *InstrProfiling::getCounterAddress(InstrProfInstBase *I) {
+Value *InstrProfiling::getCounterAddress(InstrProfCntrInstBase *I) {
auto *Counters = getOrCreateRegionCounters(I);
IRBuilder<> Builder(I);
@@ -710,6 +732,25 @@ Value *InstrProfiling::getCounterAddress(InstrProfInstBase *I) {
return Builder.CreateIntToPtr(Add, Addr->getType());
}
+Value *InstrProfiling::getBitmapAddress(InstrProfMCDCTVBitmapUpdate *I) {
+ auto *Bitmaps = getOrCreateRegionBitmaps(I);
+ IRBuilder<> Builder(I);
+
+ auto *Addr = Builder.CreateConstInBoundsGEP2_32(
+ Bitmaps->getValueType(), Bitmaps, 0, I->getBitmapIndex()->getZExtValue());
+
+ if (isRuntimeCounterRelocationEnabled()) {
+ LLVMContext &Ctx = M->getContext();
+ Ctx.diagnose(DiagnosticInfoPGOProfile(
+ M->getName().data(),
+ Twine("Runtime counter relocation is presently not supported for MC/DC "
+ "bitmaps."),
+ DS_Warning));
+ }
+
+ return Addr;
+}
+
void InstrProfiling::lowerCover(InstrProfCoverInst *CoverInstruction) {
auto *Addr = getCounterAddress(CoverInstruction);
IRBuilder<> Builder(CoverInstruction);
@@ -769,6 +810,86 @@ void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
CoverageNamesVar->eraseFromParent();
}
+void InstrProfiling::lowerMCDCTestVectorBitmapUpdate(
+ InstrProfMCDCTVBitmapUpdate *Update) {
+ IRBuilder<> Builder(Update);
+ auto *Int8Ty = Type::getInt8Ty(M->getContext());
+ auto *Int8PtrTy = PointerType::getUnqual(M->getContext());
+ auto *Int32Ty = Type::getInt32Ty(M->getContext());
+ auto *Int64Ty = Type::getInt64Ty(M->getContext());
+ auto *MCDCCondBitmapAddr = Update->getMCDCCondBitmapAddr();
+ auto *BitmapAddr = getBitmapAddress(Update);
+
+ // Load Temp Val.
+ // %mcdc.temp = load i32, ptr %mcdc.addr, align 4
+ auto *Temp = Builder.CreateLoad(Int32Ty, MCDCCondBitmapAddr, "mcdc.temp");
+
+ // Calculate byte offset using div8.
+ // %1 = lshr i32 %mcdc.temp, 3
+ auto *BitmapByteOffset = Builder.CreateLShr(Temp, 0x3);
+
+ // Add byte offset to section base byte address.
+ // %2 = zext i32 %1 to i64
+ // %3 = add i64 ptrtoint (ptr @__profbm_test to i64), %2
+ auto *BitmapByteAddr =
+ Builder.CreateAdd(Builder.CreatePtrToInt(BitmapAddr, Int64Ty),
+ Builder.CreateZExtOrBitCast(BitmapByteOffset, Int64Ty));
+
+ // Convert to a pointer.
+ // %4 = inttoptr i32 %3 to ptr
+ BitmapByteAddr = Builder.CreateIntToPtr(BitmapByteAddr, Int8PtrTy);
+
+ // Calculate bit offset into bitmap byte by using div8 remainder (AND ~8)
+ // %5 = and i32 %mcdc.temp, 7
+ // %6 = trunc i32 %5 to i8
+ auto *BitToSet = Builder.CreateTrunc(Builder.CreateAnd(Temp, 0x7), Int8Ty);
+
+ // Shift bit offset left to form a bitmap.
+ // %7 = shl i8 1, %6
+ auto *ShiftedVal = Builder.CreateShl(Builder.getInt8(0x1), BitToSet);
+
+ // Load profile bitmap byte.
+ // %mcdc.bits = load i8, ptr %4, align 1
+ auto *Bitmap = Builder.CreateLoad(Int8Ty, BitmapByteAddr, "mcdc.bits");
+
+ // Perform logical OR of profile bitmap byte and shifted bit offset.
+ // %8 = or i8 %mcdc.bits, %7
+ auto *Result = Builder.CreateOr(Bitmap, ShiftedVal);
+
+ // Store the updated profile bitmap byte.
+ // store i8 %8, ptr %3, align 1
+ Builder.CreateStore(Result, BitmapByteAddr);
+ Update->eraseFromParent();
+}
+
+void InstrProfiling::lowerMCDCCondBitmapUpdate(
+ InstrProfMCDCCondBitmapUpdate *Update) {
+ IRBuilder<> Builder(Update);
+ auto *Int32Ty = Type::getInt32Ty(M->getContext());
+ auto *MCDCCondBitmapAddr = Update->getMCDCCondBitmapAddr();
+
+ // Load the MCDC temporary value from the stack.
+ // %mcdc.temp = load i32, ptr %mcdc.addr, align 4
+ auto *Temp = Builder.CreateLoad(Int32Ty, MCDCCondBitmapAddr, "mcdc.temp");
+
+ // Zero-extend the evaluated condition boolean value (0 or 1) by 32bits.
+ // %1 = zext i1 %tobool to i32
+ auto *CondV_32 = Builder.CreateZExt(Update->getCondBool(), Int32Ty);
+
+ // Shift the boolean value left (by the condition's ID) to form a bitmap.
+ // %2 = shl i32 %1, <Update->getCondID()>
+ auto *ShiftedVal = Builder.CreateShl(CondV_32, Update->getCondID());
+
+ // Perform logical OR of the bitmap against the loaded MCDC temporary value.
+ // %3 = or i32 %mcdc.temp, %2
+ auto *Result = Builder.CreateOr(Temp, ShiftedVal);
+
+ // Store the updated temporary value back to the stack.
+ // store i32 %3, ptr %mcdc.addr, align 4
+ Builder.CreateStore(Result, MCDCCondBitmapAddr);
+ Update->eraseFromParent();
+}
+
/// Get the name of a profiling variable for a particular function.
static std::string getVarName(InstrProfInstBase *Inc, StringRef Prefix,
bool &Renamed) {
@@ -784,7 +905,7 @@ static std::string getVarName(InstrProfInstBase *Inc, StringRef Prefix,
Renamed = true;
uint64_t FuncHash = Inc->getHash()->getZExtValue();
SmallVector<char, 24> HashPostfix;
- if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
+ if (Name.ends_with((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
return (Prefix + Name).str();
return (Prefix + Name + "." + Twine(FuncHash)).str();
}
@@ -878,7 +999,7 @@ static inline bool shouldUsePublicSymbol(Function *Fn) {
}
static inline Constant *getFuncAddrForProfData(Function *Fn) {
- auto *Int8PtrTy = Type::getInt8PtrTy(Fn->getContext());
+ auto *Int8PtrTy = PointerType::getUnqual(Fn->getContext());
// Store a nullptr in __llvm_profd, if we shouldn't use a real address
if (!shouldRecordFunctionAddr(Fn))
return ConstantPointerNull::get(Int8PtrTy);
@@ -886,7 +1007,7 @@ static inline Constant *getFuncAddrForProfData(Function *Fn) {
// If we can't use an alias, we must use the public symbol, even though this
// may require a symbolic relocation.
if (shouldUsePublicSymbol(Fn))
- return ConstantExpr::getBitCast(Fn, Int8PtrTy);
+ return Fn;
// When possible use a private alias to avoid symbolic relocations.
auto *GA = GlobalAlias::create(GlobalValue::LinkageTypes::PrivateLinkage,
@@ -909,7 +1030,7 @@ static inline Constant *getFuncAddrForProfData(Function *Fn) {
// appendToCompilerUsed(*Fn->getParent(), {GA});
- return ConstantExpr::getBitCast(GA, Int8PtrTy);
+ return GA;
}
static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
@@ -924,37 +1045,31 @@ static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
return true;
}
-GlobalVariable *
-InstrProfiling::createRegionCounters(InstrProfInstBase *Inc, StringRef Name,
- GlobalValue::LinkageTypes Linkage) {
- uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
- auto &Ctx = M->getContext();
- GlobalVariable *GV;
- if (isa<InstrProfCoverInst>(Inc)) {
- auto *CounterTy = Type::getInt8Ty(Ctx);
- auto *CounterArrTy = ArrayType::get(CounterTy, NumCounters);
- // TODO: `Constant::getAllOnesValue()` does not yet accept an array type.
- std::vector<Constant *> InitialValues(NumCounters,
- Constant::getAllOnesValue(CounterTy));
- GV = new GlobalVariable(*M, CounterArrTy, false, Linkage,
- ConstantArray::get(CounterArrTy, InitialValues),
- Name);
- GV->setAlignment(Align(1));
- } else {
- auto *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
- GV = new GlobalVariable(*M, CounterTy, false, Linkage,
- Constant::getNullValue(CounterTy), Name);
- GV->setAlignment(Align(8));
- }
- return GV;
+void InstrProfiling::maybeSetComdat(GlobalVariable *GV, Function *Fn,
+ StringRef VarName) {
+ bool DataReferencedByCode = profDataReferencedByCode(*M);
+ bool NeedComdat = needsComdatForCounter(*Fn, *M);
+ bool UseComdat = (NeedComdat || TT.isOSBinFormatELF());
+
+ if (!UseComdat)
+ return;
+
+ StringRef GroupName =
+ TT.isOSBinFormatCOFF() && DataReferencedByCode ? GV->getName() : VarName;
+ Comdat *C = M->getOrInsertComdat(GroupName);
+ if (!NeedComdat)
+ C->setSelectionKind(Comdat::NoDeduplicate);
+ GV->setComdat(C);
+ // COFF doesn't allow the comdat group leader to have private linkage, so
+ // upgrade private linkage to internal linkage to produce a symbol table
+ // entry.
+ if (TT.isOSBinFormatCOFF() && GV->hasPrivateLinkage())
+ GV->setLinkage(GlobalValue::InternalLinkage);
}
-GlobalVariable *
-InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
+GlobalVariable *InstrProfiling::setupProfileSection(InstrProfInstBase *Inc,
+ InstrProfSectKind IPSK) {
GlobalVariable *NamePtr = Inc->getName();
- auto &PD = ProfileDataMap[NamePtr];
- if (PD.RegionCounters)
- return PD.RegionCounters;
// Match the linkage and visibility of the name global.
Function *Fn = Inc->getParent()->getParent();
@@ -993,42 +1108,101 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
// nodeduplicate COMDAT which is lowered to a zero-flag section group. This
// allows -z start-stop-gc to discard the entire group when the function is
// discarded.
- bool DataReferencedByCode = profDataReferencedByCode(*M);
- bool NeedComdat = needsComdatForCounter(*Fn, *M);
bool Renamed;
- std::string CntsVarName =
- getVarName(Inc, getInstrProfCountersVarPrefix(), Renamed);
- std::string DataVarName =
- getVarName(Inc, getInstrProfDataVarPrefix(), Renamed);
- auto MaybeSetComdat = [&](GlobalVariable *GV) {
- bool UseComdat = (NeedComdat || TT.isOSBinFormatELF());
- if (UseComdat) {
- StringRef GroupName = TT.isOSBinFormatCOFF() && DataReferencedByCode
- ? GV->getName()
- : CntsVarName;
- Comdat *C = M->getOrInsertComdat(GroupName);
- if (!NeedComdat)
- C->setSelectionKind(Comdat::NoDeduplicate);
- GV->setComdat(C);
- // COFF doesn't allow the comdat group leader to have private linkage, so
- // upgrade private linkage to internal linkage to produce a symbol table
- // entry.
- if (TT.isOSBinFormatCOFF() && GV->hasPrivateLinkage())
- GV->setLinkage(GlobalValue::InternalLinkage);
- }
- };
+ GlobalVariable *Ptr;
+ StringRef VarPrefix;
+ std::string VarName;
+ if (IPSK == IPSK_cnts) {
+ VarPrefix = getInstrProfCountersVarPrefix();
+ VarName = getVarName(Inc, VarPrefix, Renamed);
+ InstrProfCntrInstBase *CntrIncrement = dyn_cast<InstrProfCntrInstBase>(Inc);
+ Ptr = createRegionCounters(CntrIncrement, VarName, Linkage);
+ } else if (IPSK == IPSK_bitmap) {
+ VarPrefix = getInstrProfBitmapVarPrefix();
+ VarName = getVarName(Inc, VarPrefix, Renamed);
+ InstrProfMCDCBitmapInstBase *BitmapUpdate =
+ dyn_cast<InstrProfMCDCBitmapInstBase>(Inc);
+ Ptr = createRegionBitmaps(BitmapUpdate, VarName, Linkage);
+ } else {
+ llvm_unreachable("Profile Section must be for Counters or Bitmaps");
+ }
+
+ Ptr->setVisibility(Visibility);
+ // Put the counters and bitmaps in their own sections so linkers can
+ // remove unneeded sections.
+ Ptr->setSection(getInstrProfSectionName(IPSK, TT.getObjectFormat()));
+ Ptr->setLinkage(Linkage);
+ maybeSetComdat(Ptr, Fn, VarName);
+ return Ptr;
+}
+
+GlobalVariable *
+InstrProfiling::createRegionBitmaps(InstrProfMCDCBitmapInstBase *Inc,
+ StringRef Name,
+ GlobalValue::LinkageTypes Linkage) {
+ uint64_t NumBytes = Inc->getNumBitmapBytes()->getZExtValue();
+ auto *BitmapTy = ArrayType::get(Type::getInt8Ty(M->getContext()), NumBytes);
+ auto GV = new GlobalVariable(*M, BitmapTy, false, Linkage,
+ Constant::getNullValue(BitmapTy), Name);
+ GV->setAlignment(Align(1));
+ return GV;
+}
+
+GlobalVariable *
+InstrProfiling::getOrCreateRegionBitmaps(InstrProfMCDCBitmapInstBase *Inc) {
+ GlobalVariable *NamePtr = Inc->getName();
+ auto &PD = ProfileDataMap[NamePtr];
+ if (PD.RegionBitmaps)
+ return PD.RegionBitmaps;
+
+ // If RegionBitmaps doesn't already exist, create it by first setting up
+ // the corresponding profile section.
+ auto *BitmapPtr = setupProfileSection(Inc, IPSK_bitmap);
+ PD.RegionBitmaps = BitmapPtr;
+ PD.NumBitmapBytes = Inc->getNumBitmapBytes()->getZExtValue();
+ return PD.RegionBitmaps;
+}
+GlobalVariable *
+InstrProfiling::createRegionCounters(InstrProfCntrInstBase *Inc, StringRef Name,
+ GlobalValue::LinkageTypes Linkage) {
uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
- LLVMContext &Ctx = M->getContext();
+ auto &Ctx = M->getContext();
+ GlobalVariable *GV;
+ if (isa<InstrProfCoverInst>(Inc)) {
+ auto *CounterTy = Type::getInt8Ty(Ctx);
+ auto *CounterArrTy = ArrayType::get(CounterTy, NumCounters);
+ // TODO: `Constant::getAllOnesValue()` does not yet accept an array type.
+ std::vector<Constant *> InitialValues(NumCounters,
+ Constant::getAllOnesValue(CounterTy));
+ GV = new GlobalVariable(*M, CounterArrTy, false, Linkage,
+ ConstantArray::get(CounterArrTy, InitialValues),
+ Name);
+ GV->setAlignment(Align(1));
+ } else {
+ auto *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
+ GV = new GlobalVariable(*M, CounterTy, false, Linkage,
+ Constant::getNullValue(CounterTy), Name);
+ GV->setAlignment(Align(8));
+ }
+ return GV;
+}
+
+GlobalVariable *
+InstrProfiling::getOrCreateRegionCounters(InstrProfCntrInstBase *Inc) {
+ GlobalVariable *NamePtr = Inc->getName();
+ auto &PD = ProfileDataMap[NamePtr];
+ if (PD.RegionCounters)
+ return PD.RegionCounters;
- auto *CounterPtr = createRegionCounters(Inc, CntsVarName, Linkage);
- CounterPtr->setVisibility(Visibility);
- CounterPtr->setSection(
- getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat()));
- CounterPtr->setLinkage(Linkage);
- MaybeSetComdat(CounterPtr);
+ // If RegionCounters doesn't already exist, create it by first setting up
+ // the corresponding profile section.
+ auto *CounterPtr = setupProfileSection(Inc, IPSK_cnts);
PD.RegionCounters = CounterPtr;
+
if (DebugInfoCorrelate) {
+ LLVMContext &Ctx = M->getContext();
+ Function *Fn = Inc->getParent()->getParent();
if (auto *SP = Fn->getSubprogram()) {
DIBuilder DB(*M, true, SP->getUnit());
Metadata *FunctionNameAnnotation[] = {
@@ -1056,16 +1230,58 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
Annotations);
CounterPtr->addDebugInfo(DICounter);
DB.finalize();
- } else {
- std::string Msg = ("Missing debug info for function " + Fn->getName() +
- "; required for profile correlation.")
- .str();
- Ctx.diagnose(
- DiagnosticInfoPGOProfile(M->getName().data(), Msg, DS_Warning));
}
+
+ // Mark the counter variable as used so that it isn't optimized out.
+ CompilerUsedVars.push_back(PD.RegionCounters);
}
- auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
+ // Create the data variable (if it doesn't already exist).
+ createDataVariable(Inc);
+
+ return PD.RegionCounters;
+}
+
+void InstrProfiling::createDataVariable(InstrProfCntrInstBase *Inc) {
+ // When debug information is correlated to profile data, a data variable
+ // is not needed.
+ if (DebugInfoCorrelate)
+ return;
+
+ GlobalVariable *NamePtr = Inc->getName();
+ auto &PD = ProfileDataMap[NamePtr];
+
+ // Return if data variable was already created.
+ if (PD.DataVar)
+ return;
+
+ LLVMContext &Ctx = M->getContext();
+
+ Function *Fn = Inc->getParent()->getParent();
+ GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage();
+ GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility();
+
+ // Due to the limitation of binder as of 2021/09/28, the duplicate weak
+ // symbols in the same csect won't be discarded. When there are duplicate weak
+ // symbols, we can NOT guarantee that the relocations get resolved to the
+ // intended weak symbol, so we can not ensure the correctness of the relative
+ // CounterPtr, so we have to use private linkage for counter and data symbols.
+ if (TT.isOSBinFormatXCOFF()) {
+ Linkage = GlobalValue::PrivateLinkage;
+ Visibility = GlobalValue::DefaultVisibility;
+ }
+
+ bool DataReferencedByCode = profDataReferencedByCode(*M);
+ bool NeedComdat = needsComdatForCounter(*Fn, *M);
+ bool Renamed;
+
+ // The Data Variable section is anchored to profile counters.
+ std::string CntsVarName =
+ getVarName(Inc, getInstrProfCountersVarPrefix(), Renamed);
+ std::string DataVarName =
+ getVarName(Inc, getInstrProfDataVarPrefix(), Renamed);
+
+ auto *Int8PtrTy = PointerType::getUnqual(Ctx);
// Allocate statically the array of pointers to value profile nodes for
// the current function.
Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy);
@@ -1079,19 +1295,18 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
*M, ValuesTy, false, Linkage, Constant::getNullValue(ValuesTy),
getVarName(Inc, getInstrProfValuesVarPrefix(), Renamed));
ValuesVar->setVisibility(Visibility);
+ setGlobalVariableLargeSection(TT, *ValuesVar);
ValuesVar->setSection(
getInstrProfSectionName(IPSK_vals, TT.getObjectFormat()));
ValuesVar->setAlignment(Align(8));
- MaybeSetComdat(ValuesVar);
- ValuesPtrExpr =
- ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
+ maybeSetComdat(ValuesVar, Fn, CntsVarName);
+ ValuesPtrExpr = ValuesVar;
}
- if (DebugInfoCorrelate) {
- // Mark the counter variable as used so that it isn't optimized out.
- CompilerUsedVars.push_back(PD.RegionCounters);
- return PD.RegionCounters;
- }
+ uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
+ auto *CounterPtr = PD.RegionCounters;
+
+ uint64_t NumBitmapBytes = PD.NumBitmapBytes;
// Create data variable.
auto *IntPtrTy = M->getDataLayout().getIntPtrType(M->getContext());
@@ -1134,6 +1349,16 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
ConstantExpr::getSub(ConstantExpr::getPtrToInt(CounterPtr, IntPtrTy),
ConstantExpr::getPtrToInt(Data, IntPtrTy));
+ // Bitmaps are relative to the same data variable as profile counters.
+ GlobalVariable *BitmapPtr = PD.RegionBitmaps;
+ Constant *RelativeBitmapPtr = ConstantInt::get(IntPtrTy, 0);
+
+ if (BitmapPtr != nullptr) {
+ RelativeBitmapPtr =
+ ConstantExpr::getSub(ConstantExpr::getPtrToInt(BitmapPtr, IntPtrTy),
+ ConstantExpr::getPtrToInt(Data, IntPtrTy));
+ }
+
Constant *DataVals[] = {
#define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init,
#include "llvm/ProfileData/InstrProfData.inc"
@@ -1143,7 +1368,7 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
Data->setVisibility(Visibility);
Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat()));
Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT));
- MaybeSetComdat(Data);
+ maybeSetComdat(Data, Fn, CntsVarName);
PD.DataVar = Data;
@@ -1155,8 +1380,6 @@ InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
NamePtr->setLinkage(GlobalValue::PrivateLinkage);
// Collect the referenced names to be used by emitNameData.
ReferencedNames.push_back(NamePtr);
-
- return PD.RegionCounters;
}
void InstrProfiling::emitVNodes() {
@@ -1201,6 +1424,7 @@ void InstrProfiling::emitVNodes() {
auto *VNodesVar = new GlobalVariable(
*M, VNodesTy, false, GlobalValue::PrivateLinkage,
Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName());
+ setGlobalVariableLargeSection(TT, *VNodesVar);
VNodesVar->setSection(
getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat()));
VNodesVar->setAlignment(M->getDataLayout().getABITypeAlign(VNodesTy));
@@ -1228,6 +1452,7 @@ void InstrProfiling::emitNameData() {
GlobalValue::PrivateLinkage, NamesVal,
getInstrProfNamesVarName());
NamesSize = CompressedNameStr.size();
+ setGlobalVariableLargeSection(TT, *NamesVar);
NamesVar->setSection(
getInstrProfSectionName(IPSK_name, TT.getObjectFormat()));
// On COFF, it's important to reduce the alignment down to 1 to prevent the
@@ -1248,7 +1473,7 @@ void InstrProfiling::emitRegistration() {
// Construct the function.
auto *VoidTy = Type::getVoidTy(M->getContext());
- auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext());
+ auto *VoidPtrTy = PointerType::getUnqual(M->getContext());
auto *Int64Ty = Type::getInt64Ty(M->getContext());
auto *RegisterFTy = FunctionType::get(VoidTy, false);
auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage,
@@ -1265,10 +1490,10 @@ void InstrProfiling::emitRegistration() {
IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF));
for (Value *Data : CompilerUsedVars)
if (!isa<Function>(Data))
- IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
+ IRB.CreateCall(RuntimeRegisterF, Data);
for (Value *Data : UsedVars)
if (Data != NamesVar && !isa<Function>(Data))
- IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
+ IRB.CreateCall(RuntimeRegisterF, Data);
if (NamesVar) {
Type *ParamTypes[] = {VoidPtrTy, Int64Ty};
@@ -1277,8 +1502,7 @@ void InstrProfiling::emitRegistration() {
auto *NamesRegisterF =
Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage,
getInstrProfNamesRegFuncName(), M);
- IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy),
- IRB.getInt64(NamesSize)});
+ IRB.CreateCall(NamesRegisterF, {NamesVar, IRB.getInt64(NamesSize)});
}
IRB.CreateRetVoid();
diff --git a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
index 806afc8fcdf7..199afbe966dd 100644
--- a/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -85,3 +85,10 @@ Comdat *llvm::getOrCreateFunctionComdat(Function &F, Triple &T) {
return C;
}
+void llvm::setGlobalVariableLargeSection(Triple &TargetTriple,
+ GlobalVariable &GV) {
+ if (TargetTriple.getArch() == Triple::x86_64 &&
+ TargetTriple.getObjectFormat() == Triple::ELF) {
+ GV.setCodeModel(CodeModel::Large);
+ }
+}
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index 789ed005d03d..539b7441d24b 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -182,6 +182,7 @@ public:
C = &(M.getContext());
LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
+ PtrTy = PointerType::getUnqual(*C);
}
/// If it is an interesting memory access, populate information
@@ -209,6 +210,7 @@ private:
LLVMContext *C;
int LongSize;
Type *IntptrTy;
+ PointerType *PtrTy;
ShadowMapping Mapping;
// These arrays is indexed by AccessIsWrite
@@ -267,15 +269,13 @@ Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
if (isa<MemTransferInst>(MI)) {
- IRB.CreateCall(
- isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+ IRB.CreateCall(isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy,
+ {MI->getOperand(0), MI->getOperand(1),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
} else if (isa<MemSetInst>(MI)) {
IRB.CreateCall(
MemProfMemset,
- {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
+ {MI->getOperand(0),
IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
}
@@ -364,13 +364,13 @@ MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
StringRef SectionName = GV->getSection();
// Check if the global is in the PGO counters section.
auto OF = Triple(I->getModule()->getTargetTriple()).getObjectFormat();
- if (SectionName.endswith(
+ if (SectionName.ends_with(
getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
return std::nullopt;
}
// Do not instrument accesses to LLVM internal variables.
- if (GV->getName().startswith("__llvm"))
+ if (GV->getName().starts_with("__llvm"))
return std::nullopt;
}
@@ -519,14 +519,12 @@ void MemProfiler::initializeCallbacks(Module &M) {
FunctionType::get(IRB.getVoidTy(), Args1, false));
}
MemProfMemmove = M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
+ ClMemoryAccessCallbackPrefix + "memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy",
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
- MemProfMemset = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset",
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt32Ty(), IntptrTy);
+ PtrTy, PtrTy, PtrTy, IntptrTy);
+ MemProfMemset =
+ M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset", PtrTy,
+ PtrTy, IRB.getInt32Ty(), IntptrTy);
}
bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
@@ -562,7 +560,7 @@ bool MemProfiler::instrumentFunction(Function &F) {
return false;
if (ClDebugFunc == F.getName())
return false;
- if (F.getName().startswith("__memprof_"))
+ if (F.getName().starts_with("__memprof_"))
return false;
bool FunctionModified = false;
@@ -628,7 +626,7 @@ static void addCallsiteMetadata(Instruction &I,
static uint64_t computeStackId(GlobalValue::GUID Function, uint32_t LineOffset,
uint32_t Column) {
- llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::support::endianness::little>
+ llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::endianness::little>
HashBuilder;
HashBuilder.add(Function, LineOffset, Column);
llvm::BLAKE3Result<8> Hash = HashBuilder.final();
@@ -678,13 +676,19 @@ static void readMemprof(Module &M, Function &F,
IndexedInstrProfReader *MemProfReader,
const TargetLibraryInfo &TLI) {
auto &Ctx = M.getContext();
-
- auto FuncName = getPGOFuncName(F);
+ // Previously we used getIRPGOFuncName() here. If F is local linkage,
+ // getIRPGOFuncName() returns FuncName with prefix 'FileName;'. But
+ // llvm-profdata uses FuncName in dwarf to create GUID which doesn't
+ // contain FileName's prefix. It caused local linkage function can't
+ // find MemProfRecord. So we use getName() now.
+ // 'unique-internal-linkage-names' can make MemProf work better for local
+ // linkage function.
+ auto FuncName = F.getName();
auto FuncGUID = Function::getGUID(FuncName);
- Expected<memprof::MemProfRecord> MemProfResult =
- MemProfReader->getMemProfRecord(FuncGUID);
- if (Error E = MemProfResult.takeError()) {
- handleAllErrors(std::move(E), [&](const InstrProfError &IPE) {
+ std::optional<memprof::MemProfRecord> MemProfRec;
+ auto Err = MemProfReader->getMemProfRecord(FuncGUID).moveInto(MemProfRec);
+ if (Err) {
+ handleAllErrors(std::move(Err), [&](const InstrProfError &IPE) {
auto Err = IPE.get();
bool SkipWarning = false;
LLVM_DEBUG(dbgs() << "Error in reading profile for Func " << FuncName
@@ -715,6 +719,12 @@ static void readMemprof(Module &M, Function &F,
return;
}
+ // Detect if there are non-zero column numbers in the profile. If not,
+ // treat all column numbers as 0 when matching (i.e. ignore any non-zero
+ // columns in the IR). The profiled binary might have been built with
+ // column numbers disabled, for example.
+ bool ProfileHasColumns = false;
+
// Build maps of the location hash to all profile data with that leaf location
// (allocation info and the callsites).
std::map<uint64_t, std::set<const AllocationInfo *>> LocHashToAllocInfo;
@@ -722,21 +732,22 @@ static void readMemprof(Module &M, Function &F,
// the frame array (see comments below where the map entries are added).
std::map<uint64_t, std::set<std::pair<const SmallVector<Frame> *, unsigned>>>
LocHashToCallSites;
- const auto MemProfRec = std::move(MemProfResult.get());
- for (auto &AI : MemProfRec.AllocSites) {
+ for (auto &AI : MemProfRec->AllocSites) {
// Associate the allocation info with the leaf frame. The later matching
// code will match any inlined call sequences in the IR with a longer prefix
// of call stack frames.
uint64_t StackId = computeStackId(AI.CallStack[0]);
LocHashToAllocInfo[StackId].insert(&AI);
+ ProfileHasColumns |= AI.CallStack[0].Column;
}
- for (auto &CS : MemProfRec.CallSites) {
+ for (auto &CS : MemProfRec->CallSites) {
// Need to record all frames from leaf up to and including this function,
// as any of these may or may not have been inlined at this point.
unsigned Idx = 0;
for (auto &StackFrame : CS) {
uint64_t StackId = computeStackId(StackFrame);
LocHashToCallSites[StackId].insert(std::make_pair(&CS, Idx++));
+ ProfileHasColumns |= StackFrame.Column;
// Once we find this function, we can stop recording.
if (StackFrame.Function == FuncGUID)
break;
@@ -785,21 +796,21 @@ static void readMemprof(Module &M, Function &F,
if (Name.empty())
Name = DIL->getScope()->getSubprogram()->getName();
auto CalleeGUID = Function::getGUID(Name);
- auto StackId =
- computeStackId(CalleeGUID, GetOffset(DIL), DIL->getColumn());
- // LeafFound will only be false on the first iteration, since we either
- // set it true or break out of the loop below.
+ auto StackId = computeStackId(CalleeGUID, GetOffset(DIL),
+ ProfileHasColumns ? DIL->getColumn() : 0);
+ // Check if we have found the profile's leaf frame. If yes, collect
+ // the rest of the call's inlined context starting here. If not, see if
+ // we find a match further up the inlined context (in case the profile
+ // was missing debug frames at the leaf).
if (!LeafFound) {
AllocInfoIter = LocHashToAllocInfo.find(StackId);
CallSitesIter = LocHashToCallSites.find(StackId);
- // Check if the leaf is in one of the maps. If not, no need to look
- // further at this call.
- if (AllocInfoIter == LocHashToAllocInfo.end() &&
- CallSitesIter == LocHashToCallSites.end())
- break;
- LeafFound = true;
+ if (AllocInfoIter != LocHashToAllocInfo.end() ||
+ CallSitesIter != LocHashToCallSites.end())
+ LeafFound = true;
}
- InlinedCallStack.push_back(StackId);
+ if (LeafFound)
+ InlinedCallStack.push_back(StackId);
}
// If leaf not in either of the maps, skip inst.
if (!LeafFound)
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 83d90049abc3..94af63da38c8 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -152,7 +152,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -550,6 +549,7 @@ public:
private:
friend struct MemorySanitizerVisitor;
+ friend struct VarArgHelperBase;
friend struct VarArgAMD64Helper;
friend struct VarArgMIPS64Helper;
friend struct VarArgAArch64Helper;
@@ -574,8 +574,9 @@ private:
Triple TargetTriple;
LLVMContext *C;
- Type *IntptrTy;
+ Type *IntptrTy; ///< Integer type with the size of a ptr in default AS.
Type *OriginTy;
+ PointerType *PtrTy; ///< Integer type with the size of a ptr in default AS.
// XxxTLS variables represent the per-thread state in MSan and per-task state
// in KMSAN.
@@ -595,16 +596,13 @@ private:
/// Thread-local origin storage for function return value.
Value *RetvalOriginTLS;
- /// Thread-local shadow storage for in-register va_arg function
- /// parameters (x86_64-specific).
+ /// Thread-local shadow storage for in-register va_arg function.
Value *VAArgTLS;
- /// Thread-local shadow storage for in-register va_arg function
- /// parameters (x86_64-specific).
+ /// Thread-local shadow storage for in-register va_arg function.
Value *VAArgOriginTLS;
- /// Thread-local shadow storage for va_arg overflow area
- /// (x86_64-specific).
+ /// Thread-local shadow storage for va_arg overflow area.
Value *VAArgOverflowSizeTLS;
/// Are the instrumentation callbacks set up?
@@ -823,11 +821,10 @@ void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
// Functions for poisoning and unpoisoning memory.
- MsanPoisonAllocaFn =
- M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
+ MsanPoisonAllocaFn = M.getOrInsertFunction(
+ "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
MsanUnpoisonAllocaFn = M.getOrInsertFunction(
- "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
+ "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
}
static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
@@ -894,18 +891,18 @@ void MemorySanitizer::createUserspaceApi(Module &M, const TargetLibraryInfo &TLI
FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
FunctionName, TLI.getAttrList(C, {0, 2}, /*Signed=*/false),
- IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
+ IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
IRB.getInt32Ty());
}
- MsanSetAllocaOriginWithDescriptionFn = M.getOrInsertFunction(
- "__msan_set_alloca_origin_with_descr", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
- MsanSetAllocaOriginNoDescriptionFn = M.getOrInsertFunction(
- "__msan_set_alloca_origin_no_descr", IRB.getVoidTy(), IRB.getInt8PtrTy(),
- IntptrTy, IRB.getInt8PtrTy());
- MsanPoisonStackFn = M.getOrInsertFunction(
- "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
+ MsanSetAllocaOriginWithDescriptionFn =
+ M.getOrInsertFunction("__msan_set_alloca_origin_with_descr",
+ IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
+ MsanSetAllocaOriginNoDescriptionFn =
+ M.getOrInsertFunction("__msan_set_alloca_origin_no_descr",
+ IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
+ MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack",
+ IRB.getVoidTy(), PtrTy, IntptrTy);
}
/// Insert extern declaration of runtime-provided functions and globals.
@@ -923,16 +920,14 @@ void MemorySanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo &TL
IRB.getInt32Ty());
MsanSetOriginFn = M.getOrInsertFunction(
"__msan_set_origin", TLI.getAttrList(C, {2}, /*Signed=*/false),
- IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
+ IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
MemmoveFn =
- M.getOrInsertFunction("__msan_memmove", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
+ M.getOrInsertFunction("__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
MemcpyFn =
- M.getOrInsertFunction("__msan_memcpy", IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
- MemsetFn = M.getOrInsertFunction(
- "__msan_memset", TLI.getAttrList(C, {1}, /*Signed=*/true),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
+ M.getOrInsertFunction("__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
+ MemsetFn = M.getOrInsertFunction("__msan_memset",
+ TLI.getAttrList(C, {1}, /*Signed=*/true),
+ PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
MsanInstrumentAsmStoreFn =
M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
@@ -1046,6 +1041,7 @@ void MemorySanitizer::initializeModule(Module &M) {
IRBuilder<> IRB(*C);
IntptrTy = IRB.getIntPtrTy(DL);
OriginTy = IRB.getInt32Ty();
+ PtrTy = IRB.getPtrTy();
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
@@ -1304,9 +1300,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
- CallBase *CB = IRB.CreateCall(
- Fn, {ConvertedShadow2,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), Origin});
+ CallBase *CB = IRB.CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
CB->addParamAttr(0, Attribute::ZExt);
CB->addParamAttr(2, Attribute::ZExt);
} else {
@@ -1676,7 +1670,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
VectTy->getElementCount());
}
assert(IntPtrTy == MS.IntptrTy);
- return ShadowTy->getPointerTo();
+ return PointerType::get(*MS.C, 0);
}
Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const {
@@ -1718,6 +1712,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
std::pair<Value *, Value *>
getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
MaybeAlign Alignment) {
+ VectorType *VectTy = dyn_cast<VectorType>(Addr->getType());
+ if (!VectTy) {
+ assert(Addr->getType()->isPointerTy());
+ } else {
+ assert(VectTy->getElementType()->isPointerTy());
+ }
Type *IntptrTy = ptrToIntPtrType(Addr->getType());
Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
Value *ShadowLong = ShadowOffset;
@@ -1800,11 +1800,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// TODO: Support callbacs with vectors of addresses.
unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
Value *ShadowPtrs = ConstantInt::getNullValue(
- FixedVectorType::get(ShadowTy->getPointerTo(), NumElements));
+ FixedVectorType::get(IRB.getPtrTy(), NumElements));
Value *OriginPtrs = nullptr;
if (MS.TrackOrigins)
OriginPtrs = ConstantInt::getNullValue(
- FixedVectorType::get(MS.OriginTy->getPointerTo(), NumElements));
+ FixedVectorType::get(IRB.getPtrTy(), NumElements));
for (unsigned i = 0; i < NumElements; ++i) {
Value *OneAddr =
IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i));
@@ -1832,33 +1832,30 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Compute the shadow address for a given function argument.
///
/// Shadow = ParamTLS+ArgOffset.
- Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) {
+ Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
if (ArgOffset)
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
- "_msarg");
+ return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg");
}
/// Compute the origin address for a given function argument.
- Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) {
+ Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
if (ArgOffset)
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
- "_msarg_o");
+ return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o");
}
/// Compute the shadow address for a retval.
- Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
- return IRB.CreatePointerCast(MS.RetvalTLS,
- PointerType::get(getShadowTy(A), 0), "_msret");
+ Value *getShadowPtrForRetval(IRBuilder<> &IRB) {
+ return IRB.CreatePointerCast(MS.RetvalTLS, IRB.getPtrTy(0), "_msret");
}
/// Compute the origin address for a retval.
- Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
+ Value *getOriginPtrForRetval() {
// We keep a single origin for the entire retval. Might be too optimistic.
return MS.RetvalOriginTLS;
}
@@ -1982,7 +1979,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
Size, ArgAlign);
} else {
- Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
CopyAlign, Size);
@@ -1991,7 +1988,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins) {
Value *OriginPtr =
- getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ getOriginPtrForArgument(EntryIRB, ArgOffset);
// FIXME: OriginSize should be:
// alignTo(V % kMinOriginAlignment + Size, kMinOriginAlignment)
unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
@@ -2010,12 +2007,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(A, getCleanOrigin());
} else {
// Shadow over TLS
- Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ Value *Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
kShadowTLSAlignment);
if (MS.TrackOrigins) {
Value *OriginPtr =
- getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
+ getOriginPtrForArgument(EntryIRB, ArgOffset);
setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
}
}
@@ -2838,11 +2835,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitMemMoveInst(MemMoveInst &I) {
getShadow(I.getArgOperand(1)); // Ensure shadow initialized
IRBuilder<> IRB(&I);
- IRB.CreateCall(
- MS.MemmoveFn,
- {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
+ IRB.CreateCall(MS.MemmoveFn,
+ {I.getArgOperand(0), I.getArgOperand(1),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
@@ -2863,11 +2858,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitMemCpyInst(MemCpyInst &I) {
getShadow(I.getArgOperand(1)); // Ensure shadow initialized
IRBuilder<> IRB(&I);
- IRB.CreateCall(
- MS.MemcpyFn,
- {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
+ IRB.CreateCall(MS.MemcpyFn,
+ {I.getArgOperand(0), I.getArgOperand(1),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
@@ -2876,7 +2869,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
IRB.CreateCall(
MS.MemsetFn,
- {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ {I.getArgOperand(0),
IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
@@ -3385,8 +3378,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *ShadowPtr =
getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
- IRB.CreateStore(getCleanShadow(Ty),
- IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
+ IRB.CreateStore(getCleanShadow(Ty), ShadowPtr);
if (ClCheckAccessAddress)
insertShadowCheck(Addr, &I);
@@ -4162,7 +4154,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (Function *Func = CB.getCalledFunction()) {
// __sanitizer_unaligned_{load,store} functions may be called by users
// and always expects shadows in the TLS. So don't check them.
- MayCheckCall &= !Func->getName().startswith("__sanitizer_unaligned_");
+ MayCheckCall &= !Func->getName().starts_with("__sanitizer_unaligned_");
}
unsigned ArgOffset = 0;
@@ -4188,7 +4180,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// in that case getShadow() will copy the actual arg shadow to
// __msan_param_tls.
Value *ArgShadow = getShadow(A);
- Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
+ Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
<< " Shadow: " << *ArgShadow << "\n");
if (ByVal) {
@@ -4215,7 +4207,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
Alignment, Size);
if (MS.TrackOrigins) {
- Value *ArgOriginBase = getOriginPtrForArgument(A, IRB, ArgOffset);
+ Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
// FIXME: OriginSize should be:
// alignTo(A % kMinOriginAlignment + Size, kMinOriginAlignment)
unsigned OriginSize = alignTo(Size, kMinOriginAlignment);
@@ -4237,7 +4229,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Constant *Cst = dyn_cast<Constant>(ArgShadow);
if (MS.TrackOrigins && !(Cst && Cst->isNullValue())) {
IRB.CreateStore(getOrigin(A),
- getOriginPtrForArgument(A, IRB, ArgOffset));
+ getOriginPtrForArgument(IRB, ArgOffset));
}
}
(void)Store;
@@ -4269,7 +4261,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRBBefore(&CB);
// Until we have full dynamic coverage, make sure the retval shadow is 0.
- Value *Base = getShadowPtrForRetval(&CB, IRBBefore);
+ Value *Base = getShadowPtrForRetval(IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&CB), Base,
kShadowTLSAlignment);
BasicBlock::iterator NextInsn;
@@ -4294,12 +4286,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
- getShadowTy(&CB), getShadowPtrForRetval(&CB, IRBAfter),
+ getShadowTy(&CB), getShadowPtrForRetval(IRBAfter),
kShadowTLSAlignment, "_msret");
setShadow(&CB, RetvalShadow);
if (MS.TrackOrigins)
setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy,
- getOriginPtrForRetval(IRBAfter)));
+ getOriginPtrForRetval()));
}
bool isAMustTailRetVal(Value *RetVal) {
@@ -4320,7 +4312,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Don't emit the epilogue for musttail call returns.
if (isAMustTailRetVal(RetVal))
return;
- Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
+ Value *ShadowPtr = getShadowPtrForRetval(IRB);
bool HasNoUndef = F.hasRetAttribute(Attribute::NoUndef);
bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
// FIXME: Consider using SpecialCaseList to specify a list of functions that
@@ -4340,7 +4332,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (StoreShadow) {
IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
if (MS.TrackOrigins && StoreOrigin)
- IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
+ IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
}
}
@@ -4374,8 +4366,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
if (PoisonStack && ClPoisonStackWithCall) {
- IRB.CreateCall(MS.MsanPoisonStackFn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
+ IRB.CreateCall(MS.MsanPoisonStackFn, {&I, Len});
} else {
Value *ShadowBase, *OriginBase;
std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
@@ -4390,13 +4381,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ClPrintStackNames) {
Value *Descr = getLocalVarDescription(I);
IRB.CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
- IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
+ {&I, Len, Idptr, Descr});
} else {
- IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
- IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy())});
+ IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
}
}
}
@@ -4404,12 +4391,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
Value *Descr = getLocalVarDescription(I);
if (PoisonStack) {
- IRB.CreateCall(MS.MsanPoisonAllocaFn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
- IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
+ IRB.CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
} else {
- IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
+ IRB.CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
}
}
@@ -4571,10 +4555,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
if (!ElemTy->isSized())
return;
- Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
Value *SizeVal =
IRB.CreateTypeSize(MS.IntptrTy, DL.getTypeStoreSize(ElemTy));
- IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
+ IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
}
/// Get the number of output arguments returned by pointers.
@@ -4668,8 +4651,91 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
};
+struct VarArgHelperBase : public VarArgHelper {
+ Function &F;
+ MemorySanitizer &MS;
+ MemorySanitizerVisitor &MSV;
+ SmallVector<CallInst *, 16> VAStartInstrumentationList;
+ const unsigned VAListTagSize;
+
+ VarArgHelperBase(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV, unsigned VAListTagSize)
+ : F(F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
+
+ Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ }
+
+ /// Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
+ unsigned ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
+ "_msarg_va_s");
+ }
+
+ /// Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
+ unsigned ArgOffset, unsigned ArgSize) {
+ // Make sure we don't overflow __msan_va_arg_tls.
+ if (ArgOffset + ArgSize > kParamTLSSize)
+ return nullptr;
+ return getShadowPtrForVAArgument(Ty, IRB, ArgOffset);
+ }
+
+ /// Compute the origin address for a given va_arg.
+ Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
+ // getOriginPtrForVAArgument() is always called after
+ // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
+ // overflow.
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
+ "_msarg_va_o");
+ }
+
+ void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
+ unsigned BaseOffset) {
+ // The tails of __msan_va_arg_tls is not large enough to fit full
+ // value shadow, but it will be copied to backup anyway. Make it
+ // clean.
+ if (BaseOffset >= kParamTLSSize)
+ return;
+ Value *TailSize =
+ ConstantInt::getSigned(IRB.getInt32Ty(), kParamTLSSize - BaseOffset);
+ IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
+ TailSize, Align(8));
+ }
+
+ void unpoisonVAListTagForInst(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ const Align Alignment = Align(8);
+ auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
+ VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
+ // Unpoison the whole __va_list_tag.
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ VAListTagSize, Alignment, false);
+ }
+
+ void visitVAStartInst(VAStartInst &I) override {
+ if (F.getCallingConv() == CallingConv::Win64)
+ return;
+ VAStartInstrumentationList.push_back(&I);
+ unpoisonVAListTagForInst(I);
+ }
+
+ void visitVACopyInst(VACopyInst &I) override {
+ if (F.getCallingConv() == CallingConv::Win64)
+ return;
+ unpoisonVAListTagForInst(I);
+ }
+};
+
/// AMD64-specific implementation of VarArgHelper.
-struct VarArgAMD64Helper : public VarArgHelper {
+struct VarArgAMD64Helper : public VarArgHelperBase {
// An unfortunate workaround for asymmetric lowering of va_arg stuff.
// See a comment in visitCallBase for more details.
static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
@@ -4678,20 +4744,15 @@ struct VarArgAMD64Helper : public VarArgHelper {
static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
unsigned AMD64FpEndOffset;
- Function &F;
- MemorySanitizer &MS;
- MemorySanitizerVisitor &MSV;
AllocaInst *VAArgTLSCopy = nullptr;
AllocaInst *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV) {
+ : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/24) {
AMD64FpEndOffset = AMD64FpEndOffsetSSE;
for (const auto &Attr : F.getAttributes().getFnAttrs()) {
if (Attr.isStringAttribute() &&
@@ -4706,6 +4767,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
ArgKind classifyArgument(Value *arg) {
// A very rough approximation of X86_64 argument classification rules.
Type *T = arg->getType();
+ if (T->isX86_FP80Ty())
+ return AK_Memory;
if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
return AK_FloatingPoint;
if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
@@ -4728,6 +4791,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
+
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
@@ -4740,19 +4804,24 @@ struct VarArgAMD64Helper : public VarArgHelper {
assert(A->getType()->isPointerTy());
Type *RealTy = CB.getParamByValType(ArgNo);
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
- Value *ShadowBase = getShadowPtrForVAArgument(
- RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
+ uint64_t AlignedSize = alignTo(ArgSize, 8);
+ unsigned BaseOffset = OverflowOffset;
+ Value *ShadowBase =
+ getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
Value *OriginBase = nullptr;
if (MS.TrackOrigins)
- OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
- OverflowOffset += alignTo(ArgSize, 8);
- if (!ShadowBase)
- continue;
+ OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
+ OverflowOffset += AlignedSize;
+
+ if (OverflowOffset > kParamTLSSize) {
+ CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
+ continue; // We have no space to copy shadow there.
+ }
+
Value *ShadowPtr, *OriginPtr;
std::tie(ShadowPtr, OriginPtr) =
MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
/*isStore*/ false);
-
IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
kShadowTLSAlignment, ArgSize);
if (MS.TrackOrigins)
@@ -4767,37 +4836,42 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *ShadowBase, *OriginBase = nullptr;
switch (AK) {
case AK_GeneralPurpose:
- ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
+ ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
if (MS.TrackOrigins)
- OriginBase = getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
+ OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
GpOffset += 8;
+ assert(GpOffset <= kParamTLSSize);
break;
case AK_FloatingPoint:
- ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
+ ShadowBase = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
if (MS.TrackOrigins)
- OriginBase = getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
+ OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
FpOffset += 16;
+ assert(FpOffset <= kParamTLSSize);
break;
case AK_Memory:
if (IsFixed)
continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ uint64_t AlignedSize = alignTo(ArgSize, 8);
+ unsigned BaseOffset = OverflowOffset;
ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
- if (MS.TrackOrigins)
- OriginBase =
- getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += alignTo(ArgSize, 8);
+ getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
+ if (MS.TrackOrigins) {
+ OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
+ }
+ OverflowOffset += AlignedSize;
+ if (OverflowOffset > kParamTLSSize) {
+ // We have no space to copy shadow there.
+ CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
+ continue;
+ }
}
// Take fixed arguments into account for GpOffset and FpOffset,
// but don't actually store shadows for them.
// TODO(glider): don't call get*PtrForVAArgument() for them.
if (IsFixed)
continue;
- if (!ShadowBase)
- continue;
Value *Shadow = MSV.getShadow(A);
IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment);
if (MS.TrackOrigins) {
@@ -4813,59 +4887,6 @@ struct VarArgAMD64Helper : public VarArgHelper {
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
- /// Compute the shadow address for a given va_arg.
- Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
- unsigned ArgOffset, unsigned ArgSize) {
- // Make sure we don't overflow __msan_va_arg_tls.
- if (ArgOffset + ArgSize > kParamTLSSize)
- return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
- "_msarg_va_s");
- }
-
- /// Compute the origin address for a given va_arg.
- Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
- // getOriginPtrForVAArgument() is always called after
- // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
- // overflow.
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
- "_msarg_va_o");
- }
-
- void unpoisonVAListTagForInst(IntrinsicInst &I) {
- IRBuilder<> IRB(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) =
- MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
- /*isStore*/ true);
-
- // Unpoison the whole __va_list_tag.
- // FIXME: magic ABI constants.
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 24, Alignment, false);
- // We shouldn't need to zero out the origins, as they're only checked for
- // nonzero shadow.
- }
-
- void visitVAStartInst(VAStartInst &I) override {
- if (F.getCallingConv() == CallingConv::Win64)
- return;
- VAStartInstrumentationList.push_back(&I);
- unpoisonVAListTagForInst(I);
- }
-
- void visitVACopyInst(VACopyInst &I) override {
- if (F.getCallingConv() == CallingConv::Win64)
- return;
- unpoisonVAListTagForInst(I);
- }
-
void finalizeInstrumentation() override {
assert(!VAArgOverflowSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
@@ -4902,7 +4923,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 16)),
@@ -4919,7 +4940,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
Alignment, AMD64FpEndOffset);
- Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, 8)),
@@ -4945,18 +4966,14 @@ struct VarArgAMD64Helper : public VarArgHelper {
};
/// MIPS64-specific implementation of VarArgHelper.
-struct VarArgMIPS64Helper : public VarArgHelper {
- Function &F;
- MemorySanitizer &MS;
- MemorySanitizerVisitor &MSV;
+/// NOTE: This is also used for LoongArch64.
+struct VarArgMIPS64Helper : public VarArgHelperBase {
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
-
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV) {}
+ : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/8) {}
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
@@ -4986,42 +5003,6 @@ struct VarArgMIPS64Helper : public VarArgHelper {
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
}
- /// Compute the shadow address for a given va_arg.
- Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
- unsigned ArgOffset, unsigned ArgSize) {
- // Make sure we don't overflow __msan_va_arg_tls.
- if (ArgOffset + ArgSize > kParamTLSSize)
- return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
- "_msarg");
- }
-
- void visitVAStartInst(VAStartInst &I) override {
- IRBuilder<> IRB(&I);
- VAStartInstrumentationList.push_back(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 8, Alignment, false);
- }
-
- void visitVACopyInst(VACopyInst &I) override {
- IRBuilder<> IRB(&I);
- VAStartInstrumentationList.push_back(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 8, Alignment, false);
- }
-
void finalizeInstrumentation() override {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
@@ -5051,7 +5032,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(RegSaveAreaPtrTy, 0));
@@ -5069,7 +5050,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
};
/// AArch64-specific implementation of VarArgHelper.
-struct VarArgAArch64Helper : public VarArgHelper {
+struct VarArgAArch64Helper : public VarArgHelperBase {
static const unsigned kAArch64GrArgSize = 64;
static const unsigned kAArch64VrArgSize = 128;
@@ -5081,28 +5062,36 @@ struct VarArgAArch64Helper : public VarArgHelper {
AArch64VrBegOffset + kAArch64VrArgSize;
static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
- Function &F;
- MemorySanitizer &MS;
- MemorySanitizerVisitor &MSV;
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV) {}
+ : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/32) {}
- ArgKind classifyArgument(Value *arg) {
- Type *T = arg->getType();
- if (T->isFPOrFPVectorTy())
- return AK_FloatingPoint;
- if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) ||
- (T->isPointerTy()))
- return AK_GeneralPurpose;
- return AK_Memory;
+ // A very rough approximation of aarch64 argument classification rules.
+ std::pair<ArgKind, uint64_t> classifyArgument(Type *T) {
+ if (T->isIntOrPtrTy() && T->getPrimitiveSizeInBits() <= 64)
+ return {AK_GeneralPurpose, 1};
+ if (T->isFloatingPointTy() && T->getPrimitiveSizeInBits() <= 128)
+ return {AK_FloatingPoint, 1};
+
+ if (T->isArrayTy()) {
+ auto R = classifyArgument(T->getArrayElementType());
+ R.second *= T->getScalarType()->getArrayNumElements();
+ return R;
+ }
+
+ if (const FixedVectorType *FV = dyn_cast<FixedVectorType>(T)) {
+ auto R = classifyArgument(FV->getScalarType());
+ R.second *= FV->getNumElements();
+ return R;
+ }
+
+ LLVM_DEBUG(errs() << "Unknown vararg type: " << *T << "\n");
+ return {AK_Memory, 0};
}
// The instrumentation stores the argument shadow in a non ABI-specific
@@ -5110,7 +5099,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
// like x86_64 case, lowers the va_args in the frontend and this pass only
// sees the low level code that deals with va_list internals).
// The first seven GR registers are saved in the first 56 bytes of the
- // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
+ // va_arg tls arra, followed by the first 8 FP/SIMD registers, and then
// the remaining arguments.
// Using constant offset within the va_arg TLS array allows fast copy
// in the finalize instrumentation.
@@ -5122,20 +5111,22 @@ struct VarArgAArch64Helper : public VarArgHelper {
const DataLayout &DL = F.getParent()->getDataLayout();
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
- ArgKind AK = classifyArgument(A);
- if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
+ auto [AK, RegNum] = classifyArgument(A->getType());
+ if (AK == AK_GeneralPurpose &&
+ (GrOffset + RegNum * 8) > AArch64GrEndOffset)
AK = AK_Memory;
- if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
+ if (AK == AK_FloatingPoint &&
+ (VrOffset + RegNum * 16) > AArch64VrEndOffset)
AK = AK_Memory;
Value *Base;
switch (AK) {
case AK_GeneralPurpose:
- Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
- GrOffset += 8;
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
+ GrOffset += 8 * RegNum;
break;
case AK_FloatingPoint:
- Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
- VrOffset += 16;
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
+ VrOffset += 16 * RegNum;
break;
case AK_Memory:
// Don't count fixed arguments in the overflow area - va_start will
@@ -5143,17 +5134,21 @@ struct VarArgAArch64Helper : public VarArgHelper {
if (IsFixed)
continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
- alignTo(ArgSize, 8));
- OverflowOffset += alignTo(ArgSize, 8);
+ uint64_t AlignedSize = alignTo(ArgSize, 8);
+ unsigned BaseOffset = OverflowOffset;
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, BaseOffset);
+ OverflowOffset += AlignedSize;
+ if (OverflowOffset > kParamTLSSize) {
+ // We have no space to copy shadow there.
+ CleanUnusedTLS(IRB, Base, BaseOffset);
+ continue;
+ }
break;
}
// Count Gp/Vr fixed arguments to their respective offsets, but don't
// bother to actually store a shadow.
if (IsFixed)
continue;
- if (!Base)
- continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
@@ -5161,48 +5156,12 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
- /// Compute the shadow address for a given va_arg.
- Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
- unsigned ArgOffset, unsigned ArgSize) {
- // Make sure we don't overflow __msan_va_arg_tls.
- if (ArgOffset + ArgSize > kParamTLSSize)
- return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
- "_msarg");
- }
-
- void visitVAStartInst(VAStartInst &I) override {
- IRBuilder<> IRB(&I);
- VAStartInstrumentationList.push_back(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 32, Alignment, false);
- }
-
- void visitVACopyInst(VACopyInst &I) override {
- IRBuilder<> IRB(&I);
- VAStartInstrumentationList.push_back(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 32, Alignment, false);
- }
-
// Retrieve a va_list field of 'void*' size.
Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
- Type::getInt64PtrTy(*MS.C));
+ PointerType::get(*MS.C, 0));
return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
}
@@ -5211,7 +5170,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
Value *SaveAreaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
- Type::getInt32PtrTy(*MS.C));
+ PointerType::get(*MS.C, 0));
Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
}
@@ -5262,21 +5221,25 @@ struct VarArgAArch64Helper : public VarArgHelper {
// we need to adjust the offset for both GR and VR fields based on
// the __{gr,vr}_offs value (since they are stores based on incoming
// named arguments).
+ Type *RegSaveAreaPtrTy = IRB.getPtrTy();
// Read the stack pointer from the va_list.
- Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
+ Value *StackSaveAreaPtr =
+ IRB.CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
// Read both the __gr_top and __gr_off and add them up.
Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
- Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
+ Value *GrRegSaveAreaPtr = IRB.CreateIntToPtr(
+ IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
// Read both the __vr_top and __vr_off and add them up.
Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
- Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
+ Value *VrRegSaveAreaPtr = IRB.CreateIntToPtr(
+ IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
// It does not know how many named arguments is being used and, on the
// callsite all the arguments were saved. Since __gr_off is defined as
@@ -5332,18 +5295,13 @@ struct VarArgAArch64Helper : public VarArgHelper {
};
/// PowerPC64-specific implementation of VarArgHelper.
-struct VarArgPowerPC64Helper : public VarArgHelper {
- Function &F;
- MemorySanitizer &MS;
- MemorySanitizerVisitor &MSV;
+struct VarArgPowerPC64Helper : public VarArgHelperBase {
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
-
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV) {}
+ : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/8) {}
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
@@ -5431,43 +5389,6 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
}
- /// Compute the shadow address for a given va_arg.
- Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
- unsigned ArgOffset, unsigned ArgSize) {
- // Make sure we don't overflow __msan_va_arg_tls.
- if (ArgOffset + ArgSize > kParamTLSSize)
- return nullptr;
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
- "_msarg");
- }
-
- void visitVAStartInst(VAStartInst &I) override {
- IRBuilder<> IRB(&I);
- VAStartInstrumentationList.push_back(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 8, Alignment, false);
- }
-
- void visitVACopyInst(VACopyInst &I) override {
- IRBuilder<> IRB(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
- VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
- // Unpoison the whole __va_list_tag.
- // FIXME: magic ABI constants.
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- /* size */ 8, Alignment, false);
- }
-
void finalizeInstrumentation() override {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
@@ -5498,7 +5419,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
CallInst *OrigInst = VAStartInstrumentationList[i];
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(RegSaveAreaPtrTy, 0));
@@ -5516,7 +5437,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
};
/// SystemZ-specific implementation of VarArgHelper.
-struct VarArgSystemZHelper : public VarArgHelper {
+struct VarArgSystemZHelper : public VarArgHelperBase {
static const unsigned SystemZGpOffset = 16;
static const unsigned SystemZGpEndOffset = 56;
static const unsigned SystemZFpOffset = 128;
@@ -5528,16 +5449,11 @@ struct VarArgSystemZHelper : public VarArgHelper {
static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
static const unsigned SystemZRegSaveAreaPtrOffset = 24;
- Function &F;
- MemorySanitizer &MS;
- MemorySanitizerVisitor &MSV;
bool IsSoftFloatABI;
AllocaInst *VAArgTLSCopy = nullptr;
AllocaInst *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst *, 16> VAStartInstrumentationList;
-
enum class ArgKind {
GeneralPurpose,
FloatingPoint,
@@ -5550,7 +5466,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
VarArgSystemZHelper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV),
+ : VarArgHelperBase(F, MS, MSV, SystemZVAListTagSize),
IsSoftFloatABI(F.getFnAttribute("use-soft-float").getValueAsBool()) {}
ArgKind classifyArgument(Type *T) {
@@ -5711,39 +5627,8 @@ struct VarArgSystemZHelper : public VarArgHelper {
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
- Value *getShadowAddrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
- return IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- }
-
- Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) {
- Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
- Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
- return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
- "_msarg_va_o");
- }
-
- void unpoisonVAListTagForInst(IntrinsicInst &I) {
- IRBuilder<> IRB(&I);
- Value *VAListTag = I.getArgOperand(0);
- Value *ShadowPtr, *OriginPtr;
- const Align Alignment = Align(8);
- std::tie(ShadowPtr, OriginPtr) =
- MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
- /*isStore*/ true);
- IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
- SystemZVAListTagSize, Alignment, false);
- }
-
- void visitVAStartInst(VAStartInst &I) override {
- VAStartInstrumentationList.push_back(&I);
- unpoisonVAListTagForInst(I);
- }
-
- void visitVACopyInst(VACopyInst &I) override { unpoisonVAListTagForInst(I); }
-
void copyRegSaveArea(IRBuilder<> &IRB, Value *VAListTag) {
- Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(
IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
@@ -5767,8 +5652,10 @@ struct VarArgSystemZHelper : public VarArgHelper {
Alignment, RegSaveAreaSize);
}
+ // FIXME: This implementation limits OverflowOffset to kParamTLSSize, so we
+ // don't know real overflow size and can't clear shadow beyond kParamTLSSize.
void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
- Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
+ Type *OverflowArgAreaPtrTy = PointerType::getUnqual(*MS.C); // i64*
Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(
IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
@@ -5836,6 +5723,10 @@ struct VarArgSystemZHelper : public VarArgHelper {
}
};
+// Loongarch64 is not a MIPS, but the current vargs calling convention matches
+// the MIPS.
+using VarArgLoongArch64Helper = VarArgMIPS64Helper;
+
/// A no-op implementation of VarArgHelper.
struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
@@ -5868,6 +5759,8 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else if (TargetTriple.getArch() == Triple::systemz)
return new VarArgSystemZHelper(Func, Msan, Visitor);
+ else if (TargetTriple.isLoongArch64())
+ return new VarArgLoongArch64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 3c8f25d73c62..4a5a0b25bebb 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -327,7 +327,6 @@ extern cl::opt<PGOViewCountsType> PGOViewCounts;
// Defined in Analysis/BlockFrequencyInfo.cpp: -view-bfi-func-name=
extern cl::opt<std::string> ViewBlockFreqFuncName;
-extern cl::opt<bool> DebugInfoCorrelate;
} // namespace llvm
static cl::opt<bool>
@@ -525,6 +524,7 @@ public:
std::vector<std::vector<VPCandidateInfo>> ValueSites;
SelectInstVisitor SIVisitor;
std::string FuncName;
+ std::string DeprecatedFuncName;
GlobalVariable *FuncNameVar;
// CFG hash value for this function.
@@ -582,21 +582,22 @@ public:
if (!IsCS) {
NumOfPGOSelectInsts += SIVisitor.getNumOfSelectInsts();
NumOfPGOMemIntrinsics += ValueSites[IPVK_MemOPSize].size();
- NumOfPGOBB += MST.BBInfos.size();
+ NumOfPGOBB += MST.bbInfoSize();
ValueSites[IPVK_IndirectCallTarget] = VPC.get(IPVK_IndirectCallTarget);
} else {
NumOfCSPGOSelectInsts += SIVisitor.getNumOfSelectInsts();
NumOfCSPGOMemIntrinsics += ValueSites[IPVK_MemOPSize].size();
- NumOfCSPGOBB += MST.BBInfos.size();
+ NumOfCSPGOBB += MST.bbInfoSize();
}
- FuncName = getPGOFuncName(F);
+ FuncName = getIRPGOFuncName(F);
+ DeprecatedFuncName = getPGOFuncName(F);
computeCFGHash();
if (!ComdatMembers.empty())
renameComdatFunction();
LLVM_DEBUG(dumpInfo("after CFGMST"));
- for (auto &E : MST.AllEdges) {
+ for (const auto &E : MST.allEdges()) {
if (E->Removed)
continue;
IsCS ? NumOfCSPGOEdge++ : NumOfPGOEdge++;
@@ -639,7 +640,7 @@ void FuncPGOInstrumentation<Edge, BBInfo>::computeCFGHash() {
FunctionHash = (uint64_t)SIVisitor.getNumOfSelectInsts() << 56 |
(uint64_t)ValueSites[IPVK_IndirectCallTarget].size() << 48 |
//(uint64_t)ValueSites[IPVK_MemOPSize].size() << 40 |
- (uint64_t)MST.AllEdges.size() << 32 | JC.getCRC();
+ (uint64_t)MST.numEdges() << 32 | JC.getCRC();
} else {
// The higher 32 bits.
auto updateJCH = [&JCH](uint64_t Num) {
@@ -653,7 +654,7 @@ void FuncPGOInstrumentation<Edge, BBInfo>::computeCFGHash() {
if (BCI) {
updateJCH(BCI->getInstrumentedBlocksHash());
} else {
- updateJCH((uint64_t)MST.AllEdges.size());
+ updateJCH((uint64_t)MST.numEdges());
}
// Hash format for context sensitive profile. Reserve 4 bits for other
@@ -668,7 +669,7 @@ void FuncPGOInstrumentation<Edge, BBInfo>::computeCFGHash() {
LLVM_DEBUG(dbgs() << "Function Hash Computation for " << F.getName() << ":\n"
<< " CRC = " << JC.getCRC()
<< ", Selects = " << SIVisitor.getNumOfSelectInsts()
- << ", Edges = " << MST.AllEdges.size() << ", ICSites = "
+ << ", Edges = " << MST.numEdges() << ", ICSites = "
<< ValueSites[IPVK_IndirectCallTarget].size());
if (!PGOOldCFGHashing) {
LLVM_DEBUG(dbgs() << ", Memops = " << ValueSites[IPVK_MemOPSize].size()
@@ -756,8 +757,8 @@ void FuncPGOInstrumentation<Edge, BBInfo>::getInstrumentBBs(
// Use a worklist as we will update the vector during the iteration.
std::vector<Edge *> EdgeList;
- EdgeList.reserve(MST.AllEdges.size());
- for (auto &E : MST.AllEdges)
+ EdgeList.reserve(MST.numEdges());
+ for (const auto &E : MST.allEdges())
EdgeList.push_back(E.get());
for (auto &E : EdgeList) {
@@ -874,8 +875,7 @@ static void instrumentOneFunc(
F, TLI, ComdatMembers, true, BPI, BFI, IsCS, PGOInstrumentEntry,
PGOBlockCoverage);
- Type *I8PtrTy = Type::getInt8PtrTy(M->getContext());
- auto Name = ConstantExpr::getBitCast(FuncInfo.FuncNameVar, I8PtrTy);
+ auto Name = FuncInfo.FuncNameVar;
auto CFGHash = ConstantInt::get(Type::getInt64Ty(M->getContext()),
FuncInfo.FunctionHash);
if (PGOFunctionEntryCoverage) {
@@ -964,9 +964,8 @@ static void instrumentOneFunc(
populateEHOperandBundle(Cand, BlockColors, OpBundles);
Builder.CreateCall(
Intrinsic::getDeclaration(M, Intrinsic::instrprof_value_profile),
- {ConstantExpr::getBitCast(FuncInfo.FuncNameVar, I8PtrTy),
- Builder.getInt64(FuncInfo.FunctionHash), ToProfile,
- Builder.getInt32(Kind), Builder.getInt32(SiteIndex++)},
+ {FuncInfo.FuncNameVar, Builder.getInt64(FuncInfo.FunctionHash),
+ ToProfile, Builder.getInt32(Kind), Builder.getInt32(SiteIndex++)},
OpBundles);
}
} // IPVK_First <= Kind <= IPVK_Last
@@ -1164,12 +1163,12 @@ private:
} // end anonymous namespace
/// Set up InEdges/OutEdges for all BBs in the MST.
-static void
-setupBBInfoEdges(FuncPGOInstrumentation<PGOUseEdge, PGOUseBBInfo> &FuncInfo) {
+static void setupBBInfoEdges(
+ const FuncPGOInstrumentation<PGOUseEdge, PGOUseBBInfo> &FuncInfo) {
// This is not required when there is block coverage inference.
if (FuncInfo.BCI)
return;
- for (auto &E : FuncInfo.MST.AllEdges) {
+ for (const auto &E : FuncInfo.MST.allEdges()) {
if (E->Removed)
continue;
const BasicBlock *SrcBB = E->SrcBB;
@@ -1225,7 +1224,7 @@ bool PGOUseFunc::setInstrumentedCounts(
// Set the profile count the Instrumented edges. There are BBs that not in
// MST but not instrumented. Need to set the edge count value so that we can
// populate the profile counts later.
- for (auto &E : FuncInfo.MST.AllEdges) {
+ for (const auto &E : FuncInfo.MST.allEdges()) {
if (E->Removed || E->InMST)
continue;
const BasicBlock *SrcBB = E->SrcBB;
@@ -1336,7 +1335,8 @@ bool PGOUseFunc::readCounters(IndexedInstrProfReader *PGOReader, bool &AllZeros,
auto &Ctx = M->getContext();
uint64_t MismatchedFuncSum = 0;
Expected<InstrProfRecord> Result = PGOReader->getInstrProfRecord(
- FuncInfo.FuncName, FuncInfo.FunctionHash, &MismatchedFuncSum);
+ FuncInfo.FuncName, FuncInfo.FunctionHash, FuncInfo.DeprecatedFuncName,
+ &MismatchedFuncSum);
if (Error E = Result.takeError()) {
handleInstrProfError(std::move(E), MismatchedFuncSum);
return false;
@@ -1381,7 +1381,8 @@ bool PGOUseFunc::readCounters(IndexedInstrProfReader *PGOReader, bool &AllZeros,
void PGOUseFunc::populateCoverage(IndexedInstrProfReader *PGOReader) {
uint64_t MismatchedFuncSum = 0;
Expected<InstrProfRecord> Result = PGOReader->getInstrProfRecord(
- FuncInfo.FuncName, FuncInfo.FunctionHash, &MismatchedFuncSum);
+ FuncInfo.FuncName, FuncInfo.FunctionHash, FuncInfo.DeprecatedFuncName,
+ &MismatchedFuncSum);
if (auto Err = Result.takeError()) {
handleInstrProfError(std::move(Err), MismatchedFuncSum);
return;
@@ -1436,12 +1437,11 @@ void PGOUseFunc::populateCoverage(IndexedInstrProfReader *PGOReader) {
// If A is uncovered, set weight=1.
// This setup will allow BFI to give nonzero profile counts to only covered
// blocks.
- SmallVector<unsigned, 4> Weights;
+ SmallVector<uint32_t, 4> Weights;
for (auto *Succ : successors(&BB))
Weights.push_back((Coverage[Succ] || !Coverage[&BB]) ? 1 : 0);
if (Weights.size() >= 2)
- BB.getTerminator()->setMetadata(LLVMContext::MD_prof,
- MDB.createBranchWeights(Weights));
+ llvm::setBranchWeights(*BB.getTerminator(), Weights);
}
unsigned NumCorruptCoverage = 0;
@@ -1647,12 +1647,10 @@ void SelectInstVisitor::instrumentOneSelectInst(SelectInst &SI) {
Module *M = F.getParent();
IRBuilder<> Builder(&SI);
Type *Int64Ty = Builder.getInt64Ty();
- Type *I8PtrTy = Builder.getInt8PtrTy();
auto *Step = Builder.CreateZExt(SI.getCondition(), Int64Ty);
Builder.CreateCall(
Intrinsic::getDeclaration(M, Intrinsic::instrprof_increment_step),
- {ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
- Builder.getInt64(FuncHash), Builder.getInt32(TotalNumCtrs),
+ {FuncNameVar, Builder.getInt64(FuncHash), Builder.getInt32(TotalNumCtrs),
Builder.getInt32(*CurCtrIdx), Step});
++(*CurCtrIdx);
}
@@ -1757,17 +1755,10 @@ static void collectComdatMembers(
ComdatMembers.insert(std::make_pair(C, &GA));
}
-// Don't perform PGO instrumeatnion / profile-use.
-static bool skipPGO(const Function &F) {
+// Return true if we should not find instrumentation data for this function
+static bool skipPGOUse(const Function &F) {
if (F.isDeclaration())
return true;
- if (F.hasFnAttribute(llvm::Attribute::NoProfile))
- return true;
- if (F.hasFnAttribute(llvm::Attribute::SkipProfile))
- return true;
- if (F.getInstructionCount() < PGOFunctionSizeThreshold)
- return true;
-
// If there are too many critical edges, PGO might cause
// compiler time problem. Skip PGO if the number of
// critical edges execeed the threshold.
@@ -1785,7 +1776,19 @@ static bool skipPGO(const Function &F) {
<< " exceed the threshold. Skip PGO.\n");
return true;
}
+ return false;
+}
+// Return true if we should not instrument this function
+static bool skipPGOGen(const Function &F) {
+ if (skipPGOUse(F))
+ return true;
+ if (F.hasFnAttribute(llvm::Attribute::NoProfile))
+ return true;
+ if (F.hasFnAttribute(llvm::Attribute::SkipProfile))
+ return true;
+ if (F.getInstructionCount() < PGOFunctionSizeThreshold)
+ return true;
return false;
}
@@ -1801,7 +1804,7 @@ static bool InstrumentAllFunctions(
collectComdatMembers(M, ComdatMembers);
for (auto &F : M) {
- if (skipPGO(F))
+ if (skipPGOGen(F))
continue;
auto &TLI = LookupTLI(F);
auto *BPI = LookupBPI(F);
@@ -2028,7 +2031,7 @@ static bool annotateAllFunctions(
InstrumentFuncEntry = PGOInstrumentEntry;
bool HasSingleByteCoverage = PGOReader->hasSingleByteCoverage();
for (auto &F : M) {
- if (skipPGO(F))
+ if (skipPGOUse(F))
continue;
auto &TLI = LookupTLI(F);
auto *BPI = LookupBPI(F);
@@ -2201,7 +2204,6 @@ static std::string getSimpleNodeName(const BasicBlock *Node) {
void llvm::setProfMetadata(Module *M, Instruction *TI,
ArrayRef<uint64_t> EdgeCounts, uint64_t MaxCount) {
- MDBuilder MDB(M->getContext());
assert(MaxCount > 0 && "Bad max count");
uint64_t Scale = calculateCountScale(MaxCount);
SmallVector<unsigned, 4> Weights;
@@ -2215,7 +2217,7 @@ void llvm::setProfMetadata(Module *M, Instruction *TI,
misexpect::checkExpectAnnotations(*TI, Weights, /*IsFrontend=*/false);
- TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+ setBranchWeights(*TI, Weights);
if (EmitBranchProbability) {
std::string BrCondStr = getBranchCondString(TI);
if (BrCondStr.empty())
diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
index 2906fe190984..fd0f69eca96e 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
@@ -378,7 +378,7 @@ bool MemOPSizeOpt::perform(MemOp MO) {
assert(It != DefaultBB->end());
BasicBlock *MergeBB = SplitBlock(DefaultBB, &(*It), DT);
MergeBB->setName("MemOP.Merge");
- BFI.setBlockFreq(MergeBB, OrigBBFreq.getFrequency());
+ BFI.setBlockFreq(MergeBB, OrigBBFreq);
DefaultBB->setName("MemOP.Default");
DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp
index d83a3a991c89..230bb8b0a5dc 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerBinaryMetadata.cpp
@@ -198,17 +198,16 @@ bool SanitizerBinaryMetadata::run() {
// metadata features.
//
- auto *Int8PtrTy = IRB.getInt8PtrTy();
- auto *Int8PtrPtrTy = PointerType::getUnqual(Int8PtrTy);
+ auto *PtrTy = IRB.getPtrTy();
auto *Int32Ty = IRB.getInt32Ty();
- const std::array<Type *, 3> InitTypes = {Int32Ty, Int8PtrPtrTy, Int8PtrPtrTy};
+ const std::array<Type *, 3> InitTypes = {Int32Ty, PtrTy, PtrTy};
auto *Version = ConstantInt::get(Int32Ty, getVersion());
for (const MetadataInfo *MI : MIS) {
const std::array<Value *, InitTypes.size()> InitArgs = {
Version,
- getSectionMarker(getSectionStart(MI->SectionSuffix), Int8PtrTy),
- getSectionMarker(getSectionEnd(MI->SectionSuffix), Int8PtrTy),
+ getSectionMarker(getSectionStart(MI->SectionSuffix), PtrTy),
+ getSectionMarker(getSectionEnd(MI->SectionSuffix), PtrTy),
};
// We declare the _add and _del functions as weak, and only call them if
// there is a valid symbol linked. This allows building binaries with
@@ -306,11 +305,11 @@ bool isUARSafeCall(CallInst *CI) {
// It's safe to both pass pointers to local variables to them
// and to tail-call them.
return F && (F->isIntrinsic() || F->doesNotReturn() ||
- F->getName().startswith("__asan_") ||
- F->getName().startswith("__hwsan_") ||
- F->getName().startswith("__ubsan_") ||
- F->getName().startswith("__msan_") ||
- F->getName().startswith("__tsan_"));
+ F->getName().starts_with("__asan_") ||
+ F->getName().starts_with("__hwsan_") ||
+ F->getName().starts_with("__ubsan_") ||
+ F->getName().starts_with("__msan_") ||
+ F->getName().starts_with("__tsan_"));
}
bool hasUseAfterReturnUnsafeUses(Value &V) {
@@ -368,11 +367,11 @@ bool SanitizerBinaryMetadata::pretendAtomicAccess(const Value *Addr) {
const auto OF = Triple(Mod.getTargetTriple()).getObjectFormat();
const auto ProfSec =
getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false);
- if (GV->getSection().endswith(ProfSec))
+ if (GV->getSection().ends_with(ProfSec))
return true;
}
- if (GV->getName().startswith("__llvm_gcov") ||
- GV->getName().startswith("__llvm_gcda"))
+ if (GV->getName().starts_with("__llvm_gcov") ||
+ GV->getName().starts_with("__llvm_gcda"))
return true;
return false;
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index f22918141f6e..906687663519 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -261,9 +261,7 @@ private:
FunctionCallee SanCovTraceGepFunction;
FunctionCallee SanCovTraceSwitchFunction;
GlobalVariable *SanCovLowestStack;
- Type *Int128PtrTy, *IntptrTy, *IntptrPtrTy, *Int64Ty, *Int64PtrTy, *Int32Ty,
- *Int32PtrTy, *Int16PtrTy, *Int16Ty, *Int8Ty, *Int8PtrTy, *Int1Ty,
- *Int1PtrTy;
+ Type *PtrTy, *IntptrTy, *Int64Ty, *Int32Ty, *Int16Ty, *Int8Ty, *Int1Ty;
Module *CurModule;
std::string CurModuleUniqueId;
Triple TargetTriple;
@@ -331,11 +329,10 @@ ModuleSanitizerCoverage::CreateSecStartEnd(Module &M, const char *Section,
// Account for the fact that on windows-msvc __start_* symbols actually
// point to a uint64_t before the start of the array.
- auto SecStartI8Ptr = IRB.CreatePointerCast(SecStart, Int8PtrTy);
+ auto SecStartI8Ptr = IRB.CreatePointerCast(SecStart, PtrTy);
auto GEP = IRB.CreateGEP(Int8Ty, SecStartI8Ptr,
ConstantInt::get(IntptrTy, sizeof(uint64_t)));
- return std::make_pair(IRB.CreatePointerCast(GEP, PointerType::getUnqual(Ty)),
- SecEnd);
+ return std::make_pair(GEP, SecEnd);
}
Function *ModuleSanitizerCoverage::CreateInitCallsForSections(
@@ -345,7 +342,6 @@ Function *ModuleSanitizerCoverage::CreateInitCallsForSections(
auto SecStart = SecStartEnd.first;
auto SecEnd = SecStartEnd.second;
Function *CtorFunc;
- Type *PtrTy = PointerType::getUnqual(Ty);
std::tie(CtorFunc, std::ignore) = createSanitizerCtorAndInitFunctions(
M, CtorName, InitFunctionName, {PtrTy, PtrTy}, {SecStart, SecEnd});
assert(CtorFunc->getName() == CtorName);
@@ -391,15 +387,9 @@ bool ModuleSanitizerCoverage::instrumentModule(
FunctionPCsArray = nullptr;
FunctionCFsArray = nullptr;
IntptrTy = Type::getIntNTy(*C, DL->getPointerSizeInBits());
- IntptrPtrTy = PointerType::getUnqual(IntptrTy);
+ PtrTy = PointerType::getUnqual(*C);
Type *VoidTy = Type::getVoidTy(*C);
IRBuilder<> IRB(*C);
- Int128PtrTy = PointerType::getUnqual(IRB.getInt128Ty());
- Int64PtrTy = PointerType::getUnqual(IRB.getInt64Ty());
- Int16PtrTy = PointerType::getUnqual(IRB.getInt16Ty());
- Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
- Int8PtrTy = PointerType::getUnqual(IRB.getInt8Ty());
- Int1PtrTy = PointerType::getUnqual(IRB.getInt1Ty());
Int64Ty = IRB.getInt64Ty();
Int32Ty = IRB.getInt32Ty();
Int16Ty = IRB.getInt16Ty();
@@ -438,26 +428,26 @@ bool ModuleSanitizerCoverage::instrumentModule(
M.getOrInsertFunction(SanCovTraceConstCmp8, VoidTy, Int64Ty, Int64Ty);
// Loads.
- SanCovLoadFunction[0] = M.getOrInsertFunction(SanCovLoad1, VoidTy, Int8PtrTy);
+ SanCovLoadFunction[0] = M.getOrInsertFunction(SanCovLoad1, VoidTy, PtrTy);
SanCovLoadFunction[1] =
- M.getOrInsertFunction(SanCovLoad2, VoidTy, Int16PtrTy);
+ M.getOrInsertFunction(SanCovLoad2, VoidTy, PtrTy);
SanCovLoadFunction[2] =
- M.getOrInsertFunction(SanCovLoad4, VoidTy, Int32PtrTy);
+ M.getOrInsertFunction(SanCovLoad4, VoidTy, PtrTy);
SanCovLoadFunction[3] =
- M.getOrInsertFunction(SanCovLoad8, VoidTy, Int64PtrTy);
+ M.getOrInsertFunction(SanCovLoad8, VoidTy, PtrTy);
SanCovLoadFunction[4] =
- M.getOrInsertFunction(SanCovLoad16, VoidTy, Int128PtrTy);
+ M.getOrInsertFunction(SanCovLoad16, VoidTy, PtrTy);
// Stores.
SanCovStoreFunction[0] =
- M.getOrInsertFunction(SanCovStore1, VoidTy, Int8PtrTy);
+ M.getOrInsertFunction(SanCovStore1, VoidTy, PtrTy);
SanCovStoreFunction[1] =
- M.getOrInsertFunction(SanCovStore2, VoidTy, Int16PtrTy);
+ M.getOrInsertFunction(SanCovStore2, VoidTy, PtrTy);
SanCovStoreFunction[2] =
- M.getOrInsertFunction(SanCovStore4, VoidTy, Int32PtrTy);
+ M.getOrInsertFunction(SanCovStore4, VoidTy, PtrTy);
SanCovStoreFunction[3] =
- M.getOrInsertFunction(SanCovStore8, VoidTy, Int64PtrTy);
+ M.getOrInsertFunction(SanCovStore8, VoidTy, PtrTy);
SanCovStoreFunction[4] =
- M.getOrInsertFunction(SanCovStore16, VoidTy, Int128PtrTy);
+ M.getOrInsertFunction(SanCovStore16, VoidTy, PtrTy);
{
AttributeList AL;
@@ -470,7 +460,7 @@ bool ModuleSanitizerCoverage::instrumentModule(
SanCovTraceGepFunction =
M.getOrInsertFunction(SanCovTraceGep, VoidTy, IntptrTy);
SanCovTraceSwitchFunction =
- M.getOrInsertFunction(SanCovTraceSwitchName, VoidTy, Int64Ty, Int64PtrTy);
+ M.getOrInsertFunction(SanCovTraceSwitchName, VoidTy, Int64Ty, PtrTy);
Constant *SanCovLowestStackConstant =
M.getOrInsertGlobal(SanCovLowestStackName, IntptrTy);
@@ -487,7 +477,7 @@ bool ModuleSanitizerCoverage::instrumentModule(
SanCovTracePC = M.getOrInsertFunction(SanCovTracePCName, VoidTy);
SanCovTracePCGuard =
- M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, Int32PtrTy);
+ M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, PtrTy);
for (auto &F : M)
instrumentFunction(F, DTCallback, PDTCallback);
@@ -510,7 +500,7 @@ bool ModuleSanitizerCoverage::instrumentModule(
if (Ctor && Options.PCTable) {
auto SecStartEnd = CreateSecStartEnd(M, SanCovPCsSectionName, IntptrTy);
FunctionCallee InitFunction = declareSanitizerInitFunction(
- M, SanCovPCsInitName, {IntptrPtrTy, IntptrPtrTy});
+ M, SanCovPCsInitName, {PtrTy, PtrTy});
IRBuilder<> IRBCtor(Ctor->getEntryBlock().getTerminator());
IRBCtor.CreateCall(InitFunction, {SecStartEnd.first, SecStartEnd.second});
}
@@ -518,7 +508,7 @@ bool ModuleSanitizerCoverage::instrumentModule(
if (Ctor && Options.CollectControlFlow) {
auto SecStartEnd = CreateSecStartEnd(M, SanCovCFsSectionName, IntptrTy);
FunctionCallee InitFunction = declareSanitizerInitFunction(
- M, SanCovCFsInitName, {IntptrPtrTy, IntptrPtrTy});
+ M, SanCovCFsInitName, {PtrTy, PtrTy});
IRBuilder<> IRBCtor(Ctor->getEntryBlock().getTerminator());
IRBCtor.CreateCall(InitFunction, {SecStartEnd.first, SecStartEnd.second});
}
@@ -616,7 +606,7 @@ void ModuleSanitizerCoverage::instrumentFunction(
return;
if (F.getName().find(".module_ctor") != std::string::npos)
return; // Should not instrument sanitizer init functions.
- if (F.getName().startswith("__sanitizer_"))
+ if (F.getName().starts_with("__sanitizer_"))
return; // Don't instrument __sanitizer_* callbacks.
// Don't touch available_externally functions, their actual body is elewhere.
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
@@ -744,19 +734,19 @@ ModuleSanitizerCoverage::CreatePCArray(Function &F,
IRBuilder<> IRB(&*F.getEntryBlock().getFirstInsertionPt());
for (size_t i = 0; i < N; i++) {
if (&F.getEntryBlock() == AllBlocks[i]) {
- PCs.push_back((Constant *)IRB.CreatePointerCast(&F, IntptrPtrTy));
+ PCs.push_back((Constant *)IRB.CreatePointerCast(&F, PtrTy));
PCs.push_back((Constant *)IRB.CreateIntToPtr(
- ConstantInt::get(IntptrTy, 1), IntptrPtrTy));
+ ConstantInt::get(IntptrTy, 1), PtrTy));
} else {
PCs.push_back((Constant *)IRB.CreatePointerCast(
- BlockAddress::get(AllBlocks[i]), IntptrPtrTy));
- PCs.push_back(Constant::getNullValue(IntptrPtrTy));
+ BlockAddress::get(AllBlocks[i]), PtrTy));
+ PCs.push_back(Constant::getNullValue(PtrTy));
}
}
- auto *PCArray = CreateFunctionLocalArrayInSection(N * 2, F, IntptrPtrTy,
+ auto *PCArray = CreateFunctionLocalArrayInSection(N * 2, F, PtrTy,
SanCovPCsSectionName);
PCArray->setInitializer(
- ConstantArray::get(ArrayType::get(IntptrPtrTy, N * 2), PCs));
+ ConstantArray::get(ArrayType::get(PtrTy, N * 2), PCs));
PCArray->setConstant(true);
return PCArray;
@@ -833,10 +823,9 @@ void ModuleSanitizerCoverage::InjectTraceForSwitch(
Int64Ty->getScalarSizeInBits())
Cond = IRB.CreateIntCast(Cond, Int64Ty, false);
for (auto It : SI->cases()) {
- Constant *C = It.getCaseValue();
- if (C->getType()->getScalarSizeInBits() <
- Int64Ty->getScalarSizeInBits())
- C = ConstantExpr::getCast(CastInst::ZExt, It.getCaseValue(), Int64Ty);
+ ConstantInt *C = It.getCaseValue();
+ if (C->getType()->getScalarSizeInBits() < 64)
+ C = ConstantInt::get(C->getContext(), C->getValue().zext(64));
Initializers.push_back(C);
}
llvm::sort(drop_begin(Initializers, 2),
@@ -850,7 +839,7 @@ void ModuleSanitizerCoverage::InjectTraceForSwitch(
ConstantArray::get(ArrayOfInt64Ty, Initializers),
"__sancov_gen_cov_switch_values");
IRB.CreateCall(SanCovTraceSwitchFunction,
- {Cond, IRB.CreatePointerCast(GV, Int64PtrTy)});
+ {Cond, IRB.CreatePointerCast(GV, PtrTy)});
}
}
}
@@ -895,16 +884,13 @@ void ModuleSanitizerCoverage::InjectTraceForLoadsAndStores(
: TypeSize == 128 ? 4
: -1;
};
- Type *PointerType[5] = {Int8PtrTy, Int16PtrTy, Int32PtrTy, Int64PtrTy,
- Int128PtrTy};
for (auto *LI : Loads) {
InstrumentationIRBuilder IRB(LI);
auto Ptr = LI->getPointerOperand();
int Idx = CallbackIdx(LI->getType());
if (Idx < 0)
continue;
- IRB.CreateCall(SanCovLoadFunction[Idx],
- IRB.CreatePointerCast(Ptr, PointerType[Idx]));
+ IRB.CreateCall(SanCovLoadFunction[Idx], Ptr);
}
for (auto *SI : Stores) {
InstrumentationIRBuilder IRB(SI);
@@ -912,8 +898,7 @@ void ModuleSanitizerCoverage::InjectTraceForLoadsAndStores(
int Idx = CallbackIdx(SI->getValueOperand()->getType());
if (Idx < 0)
continue;
- IRB.CreateCall(SanCovStoreFunction[Idx],
- IRB.CreatePointerCast(Ptr, PointerType[Idx]));
+ IRB.CreateCall(SanCovStoreFunction[Idx], Ptr);
}
}
@@ -978,7 +963,7 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
auto GuardPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePointerCast(FunctionGuardArray, IntptrTy),
ConstantInt::get(IntptrTy, Idx * 4)),
- Int32PtrTy);
+ PtrTy);
IRB.CreateCall(SanCovTracePCGuard, GuardPtr)->setCannotMerge();
}
if (Options.Inline8bitCounters) {
@@ -1008,7 +993,7 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
Module *M = F.getParent();
Function *GetFrameAddr = Intrinsic::getDeclaration(
M, Intrinsic::frameaddress,
- IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
+ IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
auto FrameAddrPtr =
IRB.CreateCall(GetFrameAddr, {Constant::getNullValue(Int32Ty)});
auto FrameAddrInt = IRB.CreatePtrToInt(FrameAddrPtr, IntptrTy);
@@ -1059,40 +1044,40 @@ void ModuleSanitizerCoverage::createFunctionControlFlow(Function &F) {
for (auto &BB : F) {
// blockaddress can not be used on function's entry block.
if (&BB == &F.getEntryBlock())
- CFs.push_back((Constant *)IRB.CreatePointerCast(&F, IntptrPtrTy));
+ CFs.push_back((Constant *)IRB.CreatePointerCast(&F, PtrTy));
else
CFs.push_back((Constant *)IRB.CreatePointerCast(BlockAddress::get(&BB),
- IntptrPtrTy));
+ PtrTy));
for (auto SuccBB : successors(&BB)) {
assert(SuccBB != &F.getEntryBlock());
CFs.push_back((Constant *)IRB.CreatePointerCast(BlockAddress::get(SuccBB),
- IntptrPtrTy));
+ PtrTy));
}
- CFs.push_back((Constant *)Constant::getNullValue(IntptrPtrTy));
+ CFs.push_back((Constant *)Constant::getNullValue(PtrTy));
for (auto &Inst : BB) {
if (CallBase *CB = dyn_cast<CallBase>(&Inst)) {
if (CB->isIndirectCall()) {
// TODO(navidem): handle indirect calls, for now mark its existence.
CFs.push_back((Constant *)IRB.CreateIntToPtr(
- ConstantInt::get(IntptrTy, -1), IntptrPtrTy));
+ ConstantInt::get(IntptrTy, -1), PtrTy));
} else {
auto CalledF = CB->getCalledFunction();
if (CalledF && !CalledF->isIntrinsic())
CFs.push_back(
- (Constant *)IRB.CreatePointerCast(CalledF, IntptrPtrTy));
+ (Constant *)IRB.CreatePointerCast(CalledF, PtrTy));
}
}
}
- CFs.push_back((Constant *)Constant::getNullValue(IntptrPtrTy));
+ CFs.push_back((Constant *)Constant::getNullValue(PtrTy));
}
FunctionCFsArray = CreateFunctionLocalArrayInSection(
- CFs.size(), F, IntptrPtrTy, SanCovCFsSectionName);
+ CFs.size(), F, PtrTy, SanCovCFsSectionName);
FunctionCFsArray->setInitializer(
- ConstantArray::get(ArrayType::get(IntptrPtrTy, CFs.size()), CFs));
+ ConstantArray::get(ArrayType::get(PtrTy, CFs.size()), CFs));
FunctionCFsArray->setConstant(true);
}
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index ce35eefb63fa..8ee0bca7e354 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -205,7 +205,7 @@ void ThreadSanitizer::initialize(Module &M, const TargetLibraryInfo &TLI) {
Attr = Attr.addFnAttribute(Ctx, Attribute::NoUnwind);
// Initialize the callbacks.
TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", Attr,
- IRB.getVoidTy(), IRB.getInt8PtrTy());
+ IRB.getVoidTy(), IRB.getPtrTy());
TsanFuncExit =
M.getOrInsertFunction("__tsan_func_exit", Attr, IRB.getVoidTy());
TsanIgnoreBegin = M.getOrInsertFunction("__tsan_ignore_thread_begin", Attr,
@@ -220,49 +220,49 @@ void ThreadSanitizer::initialize(Module &M, const TargetLibraryInfo &TLI) {
std::string BitSizeStr = utostr(BitSize);
SmallString<32> ReadName("__tsan_read" + ByteSizeStr);
TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
- IRB.getInt8PtrTy());
+ IRB.getPtrTy());
SmallString<32> WriteName("__tsan_write" + ByteSizeStr);
TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
- IRB.getInt8PtrTy());
+ IRB.getPtrTy());
SmallString<64> UnalignedReadName("__tsan_unaligned_read" + ByteSizeStr);
TsanUnalignedRead[i] = M.getOrInsertFunction(
- UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + ByteSizeStr);
TsanUnalignedWrite[i] = M.getOrInsertFunction(
- UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
TsanVolatileRead[i] = M.getOrInsertFunction(
- VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ VolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
TsanVolatileWrite[i] = M.getOrInsertFunction(
- VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
ByteSizeStr);
TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
- UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> UnalignedVolatileWriteName(
"__tsan_unaligned_volatile_write" + ByteSizeStr);
TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
- UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr);
TsanCompoundRW[i] = M.getOrInsertFunction(
- CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ CompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" +
ByteSizeStr);
TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
- UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
Type *Ty = Type::getIntNTy(Ctx, BitSize);
- Type *PtrTy = Ty->getPointerTo();
+ Type *PtrTy = PointerType::get(Ctx, 0);
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
TsanAtomicLoad[i] =
M.getOrInsertFunction(AtomicLoadName,
@@ -318,9 +318,9 @@ void ThreadSanitizer::initialize(Module &M, const TargetLibraryInfo &TLI) {
}
TsanVptrUpdate =
M.getOrInsertFunction("__tsan_vptr_update", Attr, IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
+ IRB.getPtrTy(), IRB.getPtrTy());
TsanVptrLoad = M.getOrInsertFunction("__tsan_vptr_read", Attr,
- IRB.getVoidTy(), IRB.getInt8PtrTy());
+ IRB.getVoidTy(), IRB.getPtrTy());
TsanAtomicThreadFence = M.getOrInsertFunction(
"__tsan_atomic_thread_fence",
TLI.getAttrList(&Ctx, {0}, /*Signed=*/true, /*Ret=*/false, Attr),
@@ -332,15 +332,15 @@ void ThreadSanitizer::initialize(Module &M, const TargetLibraryInfo &TLI) {
IRB.getVoidTy(), OrdTy);
MemmoveFn =
- M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
+ M.getOrInsertFunction("__tsan_memmove", Attr, IRB.getPtrTy(),
+ IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
MemcpyFn =
- M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
+ M.getOrInsertFunction("__tsan_memcpy", Attr, IRB.getPtrTy(),
+ IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
MemsetFn = M.getOrInsertFunction(
"__tsan_memset",
TLI.getAttrList(&Ctx, {1}, /*Signed=*/true, /*Ret=*/false, Attr),
- IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
+ IRB.getPtrTy(), IRB.getPtrTy(), IRB.getInt32Ty(), IntptrTy);
}
static bool isVtableAccess(Instruction *I) {
@@ -360,15 +360,10 @@ static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
StringRef SectionName = GV->getSection();
// Check if the global is in the PGO counters section.
auto OF = Triple(M->getTargetTriple()).getObjectFormat();
- if (SectionName.endswith(
+ if (SectionName.ends_with(
getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
return false;
}
-
- // Check if the global is private gcov data.
- if (GV->getName().startswith("__llvm_gcov") ||
- GV->getName().startswith("__llvm_gcda"))
- return false;
}
// Do not instrument accesses from different address spaces; we cannot deal
@@ -522,6 +517,9 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
// Traverse all instructions, collect loads/stores/returns, check for calls.
for (auto &BB : F) {
for (auto &Inst : BB) {
+ // Skip instructions inserted by another instrumentation.
+ if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
+ continue;
if (isTsanAtomic(&Inst))
AtomicAccesses.push_back(&Inst);
else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
@@ -613,17 +611,14 @@ bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
StoredValue = IRB.CreateExtractElement(
StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
if (StoredValue->getType()->isIntegerTy())
- StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
+ StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getPtrTy());
// Call TsanVptrUpdate.
- IRB.CreateCall(TsanVptrUpdate,
- {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
+ IRB.CreateCall(TsanVptrUpdate, {Addr, StoredValue});
NumInstrumentedVtableWrites++;
return true;
}
if (!IsWrite && isVtableAccess(II.Inst)) {
- IRB.CreateCall(TsanVptrLoad,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
+ IRB.CreateCall(TsanVptrLoad, Addr);
NumInstrumentedVtableReads++;
return true;
}
@@ -655,7 +650,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II,
else
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
}
- IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
+ IRB.CreateCall(OnAccessFunc, Addr);
if (IsCompoundRW || IsWrite)
NumInstrumentedWrites++;
if (IsCompoundRW || !IsWrite)
@@ -691,17 +686,19 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
InstrumentationIRBuilder IRB(I);
if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
+ Value *Cast1 = IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false);
+ Value *Cast2 = IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false);
IRB.CreateCall(
MemsetFn,
- {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
+ {M->getArgOperand(0),
+ Cast1,
+ Cast2});
I->eraseFromParent();
} else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
IRB.CreateCall(
isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
- {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
+ {M->getArgOperand(0),
+ M->getArgOperand(1),
IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)});
I->eraseFromParent();
}
@@ -724,11 +721,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
int Idx = getMemoryAccessFuncIndex(OrigTy, Addr, DL);
if (Idx < 0)
return false;
- const unsigned ByteSize = 1U << Idx;
- const unsigned BitSize = ByteSize * 8;
- Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
- Type *PtrTy = Ty->getPointerTo();
- Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ Value *Args[] = {Addr,
createOrdering(&IRB, LI->getOrdering())};
Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
@@ -742,8 +735,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
const unsigned ByteSize = 1U << Idx;
const unsigned BitSize = ByteSize * 8;
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
- Type *PtrTy = Ty->getPointerTo();
- Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ Value *Args[] = {Addr,
IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
createOrdering(&IRB, SI->getOrdering())};
CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
@@ -760,8 +752,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
const unsigned ByteSize = 1U << Idx;
const unsigned BitSize = ByteSize * 8;
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
- Type *PtrTy = Ty->getPointerTo();
- Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ Value *Args[] = {Addr,
IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
createOrdering(&IRB, RMWI->getOrdering())};
CallInst *C = CallInst::Create(F, Args);
@@ -775,12 +766,11 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
const unsigned ByteSize = 1U << Idx;
const unsigned BitSize = ByteSize * 8;
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
- Type *PtrTy = Ty->getPointerTo();
Value *CmpOperand =
IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
Value *NewOperand =
IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
- Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
+ Value *Args[] = {Addr,
CmpOperand,
NewOperand,
createOrdering(&IRB, CASI->getSuccessOrdering()),