aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:04 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:11 +0000
commite3b557809604d036af6e00c60f012c2025b59a5e (patch)
tree8a11ba2269a3b669601e2fd41145b174008f4da8 /llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
parent08e8dd7b9db7bb4a9de26d44c1cbfd24e869c014 (diff)
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp')
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp1667
1 files changed, 1030 insertions, 637 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 4606bd5de6c3..fe8b8ce0dc86 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -145,13 +145,15 @@
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Argument.h"
@@ -184,6 +186,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugCounter.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -202,6 +205,9 @@ using namespace llvm;
#define DEBUG_TYPE "msan"
+DEBUG_COUNTER(DebugInsertCheck, "msan-insert-check",
+ "Controls which checks to insert");
+
static const unsigned kOriginSize = 4;
static const Align kMinOriginAlignment = Align(4);
static const Align kShadowTLSAlignment = Align(8);
@@ -217,37 +223,48 @@ static const size_t kNumberOfAccessSizes = 4;
///
/// Adds a section to MemorySanitizer report that points to the allocation
/// (stack or heap) the uninitialized bits came from originally.
-static cl::opt<int> ClTrackOrigins("msan-track-origins",
- cl::desc("Track origins (allocation sites) of poisoned memory"),
- cl::Hidden, cl::init(0));
+static cl::opt<int> ClTrackOrigins(
+ "msan-track-origins",
+ cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden,
+ cl::init(0));
static cl::opt<bool> ClKeepGoing("msan-keep-going",
- cl::desc("keep going after reporting a UMR"),
- cl::Hidden, cl::init(false));
+ cl::desc("keep going after reporting a UMR"),
+ cl::Hidden, cl::init(false));
+
+static cl::opt<bool>
+ ClPoisonStack("msan-poison-stack",
+ cl::desc("poison uninitialized stack variables"), cl::Hidden,
+ cl::init(true));
-static cl::opt<bool> ClPoisonStack("msan-poison-stack",
- cl::desc("poison uninitialized stack variables"),
- cl::Hidden, cl::init(true));
+static cl::opt<bool> ClPoisonStackWithCall(
+ "msan-poison-stack-with-call",
+ cl::desc("poison uninitialized stack variables with a call"), cl::Hidden,
+ cl::init(false));
-static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
- cl::desc("poison uninitialized stack variables with a call"),
- cl::Hidden, cl::init(false));
+static cl::opt<int> ClPoisonStackPattern(
+ "msan-poison-stack-pattern",
+ cl::desc("poison uninitialized stack variables with the given pattern"),
+ cl::Hidden, cl::init(0xff));
-static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
- cl::desc("poison uninitialized stack variables with the given pattern"),
- cl::Hidden, cl::init(0xff));
+static cl::opt<bool>
+ ClPrintStackNames("msan-print-stack-names",
+ cl::desc("Print name of local stack variable"),
+ cl::Hidden, cl::init(true));
static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
- cl::desc("poison undef temps"),
- cl::Hidden, cl::init(true));
+ cl::desc("poison undef temps"), cl::Hidden,
+ cl::init(true));
-static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
- cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
- cl::Hidden, cl::init(true));
+static cl::opt<bool>
+ ClHandleICmp("msan-handle-icmp",
+ cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
+ cl::Hidden, cl::init(true));
-static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
- cl::desc("exact handling of relational integer ICmp"),
- cl::Hidden, cl::init(false));
+static cl::opt<bool>
+ ClHandleICmpExact("msan-handle-icmp-exact",
+ cl::desc("exact handling of relational integer ICmp"),
+ cl::Hidden, cl::init(false));
static cl::opt<bool> ClHandleLifetimeIntrinsics(
"msan-handle-lifetime-intrinsics",
@@ -277,18 +294,20 @@ static cl::opt<bool> ClHandleAsmConservative(
// (e.g. only lower bits of address are garbage, or the access happens
// early at program startup where malloc-ed memory is more likely to
// be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
-static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
- cl::desc("report accesses through a pointer which has poisoned shadow"),
- cl::Hidden, cl::init(true));
+static cl::opt<bool> ClCheckAccessAddress(
+ "msan-check-access-address",
+ cl::desc("report accesses through a pointer which has poisoned shadow"),
+ cl::Hidden, cl::init(true));
static cl::opt<bool> ClEagerChecks(
"msan-eager-checks",
cl::desc("check arguments and return values at function call boundaries"),
cl::Hidden, cl::init(false));
-static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
- cl::desc("print out instructions with default strict semantics"),
- cl::Hidden, cl::init(false));
+static cl::opt<bool> ClDumpStrictInstructions(
+ "msan-dump-strict-instructions",
+ cl::desc("print out instructions with default strict semantics"),
+ cl::Hidden, cl::init(false));
static cl::opt<int> ClInstrumentationWithCallThreshold(
"msan-instrumentation-with-call-threshold",
@@ -308,18 +327,17 @@ static cl::opt<bool>
cl::desc("Apply no_sanitize to the whole file"), cl::Hidden,
cl::init(false));
-// This is an experiment to enable handling of cases where shadow is a non-zero
-// compile-time constant. For some unexplainable reason they were silently
-// ignored in the instrumentation.
-static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
- cl::desc("Insert checks for constant shadow values"),
- cl::Hidden, cl::init(false));
+static cl::opt<bool>
+ ClCheckConstantShadow("msan-check-constant-shadow",
+ cl::desc("Insert checks for constant shadow values"),
+ cl::Hidden, cl::init(true));
// This is off by default because of a bug in gold:
// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
-static cl::opt<bool> ClWithComdat("msan-with-comdat",
- cl::desc("Place MSan constructors in comdat sections"),
- cl::Hidden, cl::init(false));
+static cl::opt<bool>
+ ClWithComdat("msan-with-comdat",
+ cl::desc("Place MSan constructors in comdat sections"),
+ cl::Hidden, cl::init(false));
// These options allow to specify custom memory map parameters
// See MemoryMapParams for details.
@@ -339,6 +357,12 @@ static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
cl::desc("Define custom MSan OriginBase"),
cl::Hidden, cl::init(0));
+static cl::opt<int>
+ ClDisambiguateWarning("msan-disambiguate-warning-threshold",
+ cl::desc("Define threshold for number of checks per "
+ "debug location to force origin update."),
+ cl::Hidden, cl::init(3));
+
const char kMsanModuleCtorName[] = "msan.module_ctor";
const char kMsanInitName[] = "__msan_init";
@@ -364,41 +388,34 @@ struct PlatformMemoryMapParams {
// i386 Linux
static const MemoryMapParams Linux_I386_MemoryMapParams = {
- 0x000080000000, // AndMask
- 0, // XorMask (not used)
- 0, // ShadowBase (not used)
- 0x000040000000, // OriginBase
+ 0x000080000000, // AndMask
+ 0, // XorMask (not used)
+ 0, // ShadowBase (not used)
+ 0x000040000000, // OriginBase
};
// x86_64 Linux
static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
-#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
- 0x400000000000, // AndMask
- 0, // XorMask (not used)
- 0, // ShadowBase (not used)
- 0x200000000000, // OriginBase
-#else
- 0, // AndMask (not used)
- 0x500000000000, // XorMask
- 0, // ShadowBase (not used)
- 0x100000000000, // OriginBase
-#endif
+ 0, // AndMask (not used)
+ 0x500000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x100000000000, // OriginBase
};
// mips64 Linux
static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
- 0, // AndMask (not used)
- 0x008000000000, // XorMask
- 0, // ShadowBase (not used)
- 0x002000000000, // OriginBase
+ 0, // AndMask (not used)
+ 0x008000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x002000000000, // OriginBase
};
// ppc64 Linux
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
- 0xE00000000000, // AndMask
- 0x100000000000, // XorMask
- 0x080000000000, // ShadowBase
- 0x1C0000000000, // OriginBase
+ 0xE00000000000, // AndMask
+ 0x100000000000, // XorMask
+ 0x080000000000, // ShadowBase
+ 0x1C0000000000, // OriginBase
};
// s390x Linux
@@ -411,57 +428,57 @@ static const MemoryMapParams Linux_S390X_MemoryMapParams = {
// aarch64 Linux
static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
- 0, // AndMask (not used)
- 0x06000000000, // XorMask
- 0, // ShadowBase (not used)
- 0x01000000000, // OriginBase
+ 0, // AndMask (not used)
+ 0x0B00000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x0200000000000, // OriginBase
};
// aarch64 FreeBSD
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
- 0x1800000000000, // AndMask
- 0x0400000000000, // XorMask
- 0x0200000000000, // ShadowBase
- 0x0700000000000, // OriginBase
+ 0x1800000000000, // AndMask
+ 0x0400000000000, // XorMask
+ 0x0200000000000, // ShadowBase
+ 0x0700000000000, // OriginBase
};
// i386 FreeBSD
static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
- 0x000180000000, // AndMask
- 0x000040000000, // XorMask
- 0x000020000000, // ShadowBase
- 0x000700000000, // OriginBase
+ 0x000180000000, // AndMask
+ 0x000040000000, // XorMask
+ 0x000020000000, // ShadowBase
+ 0x000700000000, // OriginBase
};
// x86_64 FreeBSD
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
- 0xc00000000000, // AndMask
- 0x200000000000, // XorMask
- 0x100000000000, // ShadowBase
- 0x380000000000, // OriginBase
+ 0xc00000000000, // AndMask
+ 0x200000000000, // XorMask
+ 0x100000000000, // ShadowBase
+ 0x380000000000, // OriginBase
};
// x86_64 NetBSD
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
- 0, // AndMask
- 0x500000000000, // XorMask
- 0, // ShadowBase
- 0x100000000000, // OriginBase
+ 0, // AndMask
+ 0x500000000000, // XorMask
+ 0, // ShadowBase
+ 0x100000000000, // OriginBase
};
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
- &Linux_I386_MemoryMapParams,
- &Linux_X86_64_MemoryMapParams,
+ &Linux_I386_MemoryMapParams,
+ &Linux_X86_64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
- nullptr,
- &Linux_MIPS64_MemoryMapParams,
+ nullptr,
+ &Linux_MIPS64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
- nullptr,
- &Linux_PowerPC64_MemoryMapParams,
+ nullptr,
+ &Linux_PowerPC64_MemoryMapParams,
};
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
@@ -470,23 +487,23 @@ static const PlatformMemoryMapParams Linux_S390_MemoryMapParams = {
};
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
- nullptr,
- &Linux_AArch64_MemoryMapParams,
+ nullptr,
+ &Linux_AArch64_MemoryMapParams,
};
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams = {
- nullptr,
- &FreeBSD_AArch64_MemoryMapParams,
+ nullptr,
+ &FreeBSD_AArch64_MemoryMapParams,
};
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
- &FreeBSD_I386_MemoryMapParams,
- &FreeBSD_X86_64_MemoryMapParams,
+ &FreeBSD_I386_MemoryMapParams,
+ &FreeBSD_X86_64_MemoryMapParams,
};
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
- nullptr,
- &NetBSD_X86_64_MemoryMapParams,
+ nullptr,
+ &NetBSD_X86_64_MemoryMapParams,
};
namespace {
@@ -522,9 +539,9 @@ private:
friend struct VarArgSystemZHelper;
void initializeModule(Module &M);
- void initializeCallbacks(Module &M);
- void createKernelApi(Module &M);
- void createUserspaceApi(Module &M);
+ void initializeCallbacks(Module &M, const TargetLibraryInfo &TLI);
+ void createKernelApi(Module &M, const TargetLibraryInfo &TLI);
+ void createUserspaceApi(Module &M, const TargetLibraryInfo &TLI);
/// True if we're compiling the Linux kernel.
bool CompileKernel;
@@ -579,7 +596,9 @@ private:
/// Run-time helper that generates a new origin value for a stack
/// allocation.
- FunctionCallee MsanSetAllocaOrigin4Fn;
+ FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
+ // No description version
+ FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
/// Run-time helper that poisons stack on function entry.
FunctionCallee MsanPoisonStackFn;
@@ -655,20 +674,32 @@ MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K,
Recover(getOptOrDefault(ClKeepGoing, Kernel || R)),
EagerChecks(getOptOrDefault(ClEagerChecks, EagerChecks)) {}
-PreservedAnalyses MemorySanitizerPass::run(Function &F,
- FunctionAnalysisManager &FAM) {
- MemorySanitizer Msan(*F.getParent(), Options);
- if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
- return PreservedAnalyses::none();
- return PreservedAnalyses::all();
-}
+PreservedAnalyses MemorySanitizerPass::run(Module &M,
+ ModuleAnalysisManager &AM) {
+ bool Modified = false;
+ if (!Options.Kernel) {
+ insertModuleCtor(M);
+ Modified = true;
+ }
-PreservedAnalyses
-ModuleMemorySanitizerPass::run(Module &M, ModuleAnalysisManager &AM) {
- if (Options.Kernel)
+ auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+ for (Function &F : M) {
+ if (F.empty())
+ continue;
+ MemorySanitizer Msan(*F.getParent(), Options);
+ Modified |=
+ Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F));
+ }
+
+ if (!Modified)
return PreservedAnalyses::all();
- insertModuleCtor(M);
- return PreservedAnalyses::none();
+
+ PreservedAnalyses PA = PreservedAnalyses::none();
+ // GlobalsAA is considered stateless and does not get invalidated unless
+ // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
+ // make changes that require GlobalsAA to be invalidated.
+ PA.abandon<GlobalsAA>();
+ return PA;
}
void MemorySanitizerPass::printPipeline(
@@ -691,15 +722,15 @@ void MemorySanitizerPass::printPipeline(
/// Creates a writable global for Str so that we can pass it to the
/// run-time lib. Runtime uses first 4 bytes of the string to store the
/// frame ID, so the string needs to be mutable.
-static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
- StringRef Str) {
+static GlobalVariable *createPrivateConstGlobalForString(Module &M,
+ StringRef Str) {
Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
- return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
+ return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/true,
GlobalValue::PrivateLinkage, StrConst, "");
}
/// Create KMSAN API callbacks.
-void MemorySanitizer::createKernelApi(Module &M) {
+void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
IRBuilder<> IRB(*C);
// These will be initialized in insertKmsanPrologue().
@@ -711,8 +742,10 @@ void MemorySanitizer::createKernelApi(Module &M) {
VAArgOriginTLS = nullptr;
VAArgOverflowSizeTLS = nullptr;
- WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
- IRB.getInt32Ty());
+ WarningFn = M.getOrInsertFunction("__msan_warning",
+ TLI.getAttrList(C, {0}, /*Signed=*/false),
+ IRB.getVoidTy(), IRB.getInt32Ty());
+
// Requests the per-task context state (kmsan_context_state*) from the
// runtime library.
MsanContextStateTy = StructType::get(
@@ -763,16 +796,23 @@ static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
}
/// Insert declarations for userspace-specific functions and globals.
-void MemorySanitizer::createUserspaceApi(Module &M) {
+void MemorySanitizer::createUserspaceApi(Module &M, const TargetLibraryInfo &TLI) {
IRBuilder<> IRB(*C);
// Create the callback.
// FIXME: this function should have "Cold" calling conv,
// which is not yet implemented.
- StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
- : "__msan_warning_with_origin_noreturn";
- WarningFn =
- M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), IRB.getInt32Ty());
+ if (TrackOrigins) {
+ StringRef WarningFnName = Recover ? "__msan_warning_with_origin"
+ : "__msan_warning_with_origin_noreturn";
+ WarningFn = M.getOrInsertFunction(WarningFnName,
+ TLI.getAttrList(C, {0}, /*Signed=*/false),
+ IRB.getVoidTy(), IRB.getInt32Ty());
+ } else {
+ StringRef WarningFnName =
+ Recover ? "__msan_warning" : "__msan_warning_noreturn";
+ WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
+ }
// Create the global TLS variables.
RetvalTLS =
@@ -804,37 +844,29 @@ void MemorySanitizer::createUserspaceApi(Module &M) {
AccessSizeIndex++) {
unsigned AccessSize = 1 << AccessSizeIndex;
std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
- SmallVector<std::pair<unsigned, Attribute>, 2> MaybeWarningFnAttrs;
- MaybeWarningFnAttrs.push_back(std::make_pair(
- AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
- MaybeWarningFnAttrs.push_back(std::make_pair(
- AttributeList::FirstArgIndex + 1, Attribute::get(*C, Attribute::ZExt)));
MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
- FunctionName, AttributeList::get(*C, MaybeWarningFnAttrs),
+ FunctionName, TLI.getAttrList(C, {0, 1}, /*Signed=*/false),
IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
- SmallVector<std::pair<unsigned, Attribute>, 2> MaybeStoreOriginFnAttrs;
- MaybeStoreOriginFnAttrs.push_back(std::make_pair(
- AttributeList::FirstArgIndex, Attribute::get(*C, Attribute::ZExt)));
- MaybeStoreOriginFnAttrs.push_back(std::make_pair(
- AttributeList::FirstArgIndex + 2, Attribute::get(*C, Attribute::ZExt)));
MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
- FunctionName, AttributeList::get(*C, MaybeStoreOriginFnAttrs),
+ FunctionName, TLI.getAttrList(C, {0, 2}, /*Signed=*/false),
IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(),
IRB.getInt32Ty());
}
- MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
- "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
- IRB.getInt8PtrTy(), IntptrTy);
- MsanPoisonStackFn =
- M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IntptrTy);
+ MsanSetAllocaOriginWithDescriptionFn = M.getOrInsertFunction(
+ "__msan_set_alloca_origin_with_descr", IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IRB.getInt8PtrTy());
+ MsanSetAllocaOriginNoDescriptionFn = M.getOrInsertFunction(
+ "__msan_set_alloca_origin_no_descr", IRB.getVoidTy(), IRB.getInt8PtrTy(),
+ IntptrTy, IRB.getInt8PtrTy());
+ MsanPoisonStackFn = M.getOrInsertFunction(
+ "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
}
/// Insert extern declaration of runtime-provided functions and globals.
-void MemorySanitizer::initializeCallbacks(Module &M) {
+void MemorySanitizer::initializeCallbacks(Module &M, const TargetLibraryInfo &TLI) {
// Only do this once.
if (CallbacksInitialized)
return;
@@ -843,28 +875,30 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
// Initialize callbacks that are common for kernel and userspace
// instrumentation.
MsanChainOriginFn = M.getOrInsertFunction(
- "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
- MsanSetOriginFn =
- M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(),
- IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
- MemmoveFn = M.getOrInsertFunction(
- "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IRB.getInt8PtrTy(), IntptrTy);
- MemcpyFn = M.getOrInsertFunction(
- "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
- IntptrTy);
+ "__msan_chain_origin",
+ TLI.getAttrList(C, {0}, /*Signed=*/false, /*Ret=*/true), IRB.getInt32Ty(),
+ IRB.getInt32Ty());
+ MsanSetOriginFn = M.getOrInsertFunction(
+ "__msan_set_origin", TLI.getAttrList(C, {2}, /*Signed=*/false),
+ IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
+ MemmoveFn =
+ M.getOrInsertFunction("__msan_memmove", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
+ MemcpyFn =
+ M.getOrInsertFunction("__msan_memcpy", IRB.getInt8PtrTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy);
MemsetFn = M.getOrInsertFunction(
- "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
- IntptrTy);
+ "__msan_memset", TLI.getAttrList(C, {1}, /*Signed=*/true),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy);
MsanInstrumentAsmStoreFn =
M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
if (CompileKernel) {
- createKernelApi(M);
+ createKernelApi(M, TLI);
} else {
- createUserspaceApi(M);
+ createUserspaceApi(M, TLI);
}
CallbacksInitialized = true;
}
@@ -905,59 +939,59 @@ void MemorySanitizer::initializeModule(Module &M) {
} else {
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
- case Triple::FreeBSD:
- switch (TargetTriple.getArch()) {
- case Triple::aarch64:
- MapParams = FreeBSD_ARM_MemoryMapParams.bits64;
- break;
- case Triple::x86_64:
- MapParams = FreeBSD_X86_MemoryMapParams.bits64;
- break;
- case Triple::x86:
- MapParams = FreeBSD_X86_MemoryMapParams.bits32;
- break;
- default:
- report_fatal_error("unsupported architecture");
- }
+ case Triple::FreeBSD:
+ switch (TargetTriple.getArch()) {
+ case Triple::aarch64:
+ MapParams = FreeBSD_ARM_MemoryMapParams.bits64;
break;
- case Triple::NetBSD:
- switch (TargetTriple.getArch()) {
- case Triple::x86_64:
- MapParams = NetBSD_X86_MemoryMapParams.bits64;
- break;
- default:
- report_fatal_error("unsupported architecture");
- }
+ case Triple::x86_64:
+ MapParams = FreeBSD_X86_MemoryMapParams.bits64;
break;
- case Triple::Linux:
- switch (TargetTriple.getArch()) {
- case Triple::x86_64:
- MapParams = Linux_X86_MemoryMapParams.bits64;
- break;
- case Triple::x86:
- MapParams = Linux_X86_MemoryMapParams.bits32;
- break;
- case Triple::mips64:
- case Triple::mips64el:
- MapParams = Linux_MIPS_MemoryMapParams.bits64;
- break;
- case Triple::ppc64:
- case Triple::ppc64le:
- MapParams = Linux_PowerPC_MemoryMapParams.bits64;
- break;
- case Triple::systemz:
- MapParams = Linux_S390_MemoryMapParams.bits64;
- break;
- case Triple::aarch64:
- case Triple::aarch64_be:
- MapParams = Linux_ARM_MemoryMapParams.bits64;
- break;
- default:
- report_fatal_error("unsupported architecture");
- }
+ case Triple::x86:
+ MapParams = FreeBSD_X86_MemoryMapParams.bits32;
+ break;
+ default:
+ report_fatal_error("unsupported architecture");
+ }
+ break;
+ case Triple::NetBSD:
+ switch (TargetTriple.getArch()) {
+ case Triple::x86_64:
+ MapParams = NetBSD_X86_MemoryMapParams.bits64;
break;
default:
- report_fatal_error("unsupported operating system");
+ report_fatal_error("unsupported architecture");
+ }
+ break;
+ case Triple::Linux:
+ switch (TargetTriple.getArch()) {
+ case Triple::x86_64:
+ MapParams = Linux_X86_MemoryMapParams.bits64;
+ break;
+ case Triple::x86:
+ MapParams = Linux_X86_MemoryMapParams.bits32;
+ break;
+ case Triple::mips64:
+ case Triple::mips64el:
+ MapParams = Linux_MIPS_MemoryMapParams.bits64;
+ break;
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ MapParams = Linux_PowerPC_MemoryMapParams.bits64;
+ break;
+ case Triple::systemz:
+ MapParams = Linux_S390_MemoryMapParams.bits64;
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ MapParams = Linux_ARM_MemoryMapParams.bits64;
+ break;
+ default:
+ report_fatal_error("unsupported architecture");
+ }
+ break;
+ default:
+ report_fatal_error("unsupported operating system");
}
}
@@ -983,7 +1017,7 @@ void MemorySanitizer::initializeModule(Module &M) {
GlobalValue::WeakODRLinkage,
IRB.getInt32(Recover), "__msan_keep_going");
});
-}
+ }
}
namespace {
@@ -1023,12 +1057,22 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
MemorySanitizerVisitor &Visitor);
static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
- if (TypeSize <= 8) return 0;
+ if (TypeSize <= 8)
+ return 0;
return Log2_32_Ceil((TypeSize + 7) / 8);
}
namespace {
+/// Helper class to attach debug information of the given instruction onto new
+/// instructions inserted after.
+class NextNodeIRBuilder : public IRBuilder<> {
+public:
+ explicit NextNodeIRBuilder(Instruction *IP) : IRBuilder<>(IP->getNextNode()) {
+ SetCurrentDebugLocation(IP->getDebugLoc());
+ }
+};
+
/// This class does all the work for a given function. Store and Load
/// instructions store and load corresponding shadow and origin
/// values. Most instructions propagate shadow from arguments to their
@@ -1039,7 +1083,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function &F;
MemorySanitizer &MS;
SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
- ValueMap<Value*, Value*> ShadowMap, OriginMap;
+ ValueMap<Value *, Value *> ShadowMap, OriginMap;
std::unique_ptr<VarArgHelper> VAHelper;
const TargetLibraryInfo *TLI;
Instruction *FnPrologueEnd;
@@ -1057,13 +1101,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Instruction *OrigIns;
ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
- : Shadow(S), Origin(O), OrigIns(I) {}
+ : Shadow(S), Origin(O), OrigIns(I) {}
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
+ DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
- SmallSet<AllocaInst *, 16> AllocaSet;
+ SmallSetVector<AllocaInst *, 16> AllocaSet;
SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
SmallVector<StoreInst *, 16> StoreList;
+ int64_t SplittableBlocksCount = 0;
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
const TargetLibraryInfo &TLI)
@@ -1081,7 +1127,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// It's easier to remove unreachable blocks than deal with missing shadow.
removeUnreachableBlocks(F);
- MS.initializeCallbacks(*F.getParent());
+ MS.initializeCallbacks(*F.getParent(), TLI);
FnPrologueEnd = IRBuilder<>(F.getEntryBlock().getFirstNonPHI())
.CreateIntrinsic(Intrinsic::donothing, {}, {});
@@ -1095,20 +1141,36 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
<< F.getName() << "'\n");
}
+ bool instrumentWithCalls(Value *V) {
+ // Constants likely will be eliminated by follow-up passes.
+ if (isa<Constant>(V))
+ return false;
+
+ ++SplittableBlocksCount;
+ return ClInstrumentationWithCallThreshold >= 0 &&
+ SplittableBlocksCount > ClInstrumentationWithCallThreshold;
+ }
+
bool isInPrologue(Instruction &I) {
return I.getParent() == FnPrologueEnd->getParent() &&
(&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
}
+ // Creates a new origin and records the stack trace. In general we can call
+ // this function for any origin manipulation we like. However it will cost
+ // runtime resources. So use this wisely only if it can provide additional
+ // information helpful to a user.
Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
- if (MS.TrackOrigins <= 1) return V;
+ if (MS.TrackOrigins <= 1)
+ return V;
return IRB.CreateCall(MS.MsanChainOriginFn, V);
}
Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
- if (IntptrSize == kOriginSize) return Origin;
+ if (IntptrSize == kOriginSize)
+ return Origin;
assert(IntptrSize == kOriginSize * 2);
Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
@@ -1147,21 +1209,30 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
- Value *OriginPtr, Align Alignment, bool AsCall) {
+ Value *OriginPtr, Align Alignment) {
const DataLayout &DL = F.getParent()->getDataLayout();
const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
- if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
+ if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
+ // Origin is not needed: value is initialized or const shadow is
+ // ignored.
+ return;
+ }
+ if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
+ // Copy origin as the value is definitely uninitialized.
paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
OriginAlignment);
- return;
+ return;
+ }
+ // Fallback to runtime check, which still can be optimized out later.
}
unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
- if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
+ if (instrumentWithCalls(ConvertedShadow) &&
+ SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
@@ -1180,7 +1251,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
- void materializeStores(bool InstrumentWithCalls) {
+ void materializeStores() {
for (StoreInst *SI : StoreList) {
IRBuilder<> IRB(SI);
Value *Val = SI->getValueOperand();
@@ -1202,40 +1273,62 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (MS.TrackOrigins && !SI->isAtomic())
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
- OriginAlignment, InstrumentWithCalls);
+ OriginAlignment);
}
}
+ // Returns true if Debug Location curresponds to multiple warnings.
+ bool shouldDisambiguateWarningLocation(const DebugLoc &DebugLoc) {
+ if (MS.TrackOrigins < 2)
+ return false;
+
+ if (LazyWarningDebugLocationCount.empty())
+ for (const auto &I : InstrumentationList)
+ ++LazyWarningDebugLocationCount[I.OrigIns->getDebugLoc()];
+
+ return LazyWarningDebugLocationCount[DebugLoc] >= ClDisambiguateWarning;
+ }
+
/// Helper function to insert a warning at IRB's current insert point.
void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
if (!Origin)
Origin = (Value *)IRB.getInt32(0);
assert(Origin->getType()->isIntegerTy());
- IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
- // FIXME: Insert UnreachableInst if !MS.Recover?
- // This may invalidate some of the following checks and needs to be done
- // at the very end.
- }
-
- void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
- bool AsCall) {
- IRBuilder<> IRB(OrigIns);
- LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
- Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
- LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
- if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
- if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
- insertWarningFn(IRB, Origin);
+ if (shouldDisambiguateWarningLocation(IRB.getCurrentDebugLocation())) {
+ // Try to create additional origin with debug info of the last origin
+ // instruction. It may provide additional information to the user.
+ if (Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
+ assert(MS.TrackOrigins);
+ auto NewDebugLoc = OI->getDebugLoc();
+ // Origin update with missing or the same debug location provides no
+ // additional value.
+ if (NewDebugLoc && NewDebugLoc != IRB.getCurrentDebugLocation()) {
+ // Insert update just before the check, so we call runtime only just
+ // before the report.
+ IRBuilder<> IRBOrigin(&*IRB.GetInsertPoint());
+ IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
+ Origin = updateOrigin(Origin, IRBOrigin);
+ }
}
- return;
}
- const DataLayout &DL = OrigIns->getModule()->getDataLayout();
+ if (MS.CompileKernel || MS.TrackOrigins)
+ IRB.CreateCall(MS.WarningFn, Origin)->setCannotMerge();
+ else
+ IRB.CreateCall(MS.WarningFn)->setCannotMerge();
+ // FIXME: Insert UnreachableInst if !MS.Recover?
+ // This may invalidate some of the following checks and needs to be done
+ // at the very end.
+ }
+ void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow,
+ Value *Origin) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
- if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
+ if (instrumentWithCalls(ConvertedShadow) &&
+ SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
@@ -1247,7 +1340,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
} else {
Value *Cmp = convertToBool(ConvertedShadow, IRB, "_mscmp");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
- Cmp, OrigIns,
+ Cmp, &*IRB.GetInsertPoint(),
/* Unreachable */ !MS.Recover, MS.ColdCallWeights);
IRB.SetInsertPoint(CheckTerm);
@@ -1256,13 +1349,77 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
}
- void materializeChecks(bool InstrumentWithCalls) {
- for (const auto &ShadowData : InstrumentationList) {
- Instruction *OrigIns = ShadowData.OrigIns;
- Value *Shadow = ShadowData.Shadow;
- Value *Origin = ShadowData.Origin;
- materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
+ void materializeInstructionChecks(
+ ArrayRef<ShadowOriginAndInsertPoint> InstructionChecks) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ // Disable combining in some cases. TrackOrigins checks each shadow to pick
+ // correct origin.
+ bool Combine = !MS.TrackOrigins;
+ Instruction *Instruction = InstructionChecks.front().OrigIns;
+ Value *Shadow = nullptr;
+ for (const auto &ShadowData : InstructionChecks) {
+ assert(ShadowData.OrigIns == Instruction);
+ IRBuilder<> IRB(Instruction);
+
+ Value *ConvertedShadow = ShadowData.Shadow;
+
+ if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
+ if (!ClCheckConstantShadow || ConstantShadow->isZeroValue()) {
+ // Skip, value is initialized or const shadow is ignored.
+ continue;
+ }
+ if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
+ // Report as the value is definitely uninitialized.
+ insertWarningFn(IRB, ShadowData.Origin);
+ if (!MS.Recover)
+ return; // Always fail and stop here, not need to check the rest.
+ // Skip entire instruction,
+ continue;
+ }
+ // Fallback to runtime check, which still can be optimized out later.
+ }
+
+ if (!Combine) {
+ materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
+ continue;
+ }
+
+ if (!Shadow) {
+ Shadow = ConvertedShadow;
+ continue;
+ }
+
+ Shadow = convertToBool(Shadow, IRB, "_mscmp");
+ ConvertedShadow = convertToBool(ConvertedShadow, IRB, "_mscmp");
+ Shadow = IRB.CreateOr(Shadow, ConvertedShadow, "_msor");
+ }
+
+ if (Shadow) {
+ assert(Combine);
+ IRBuilder<> IRB(Instruction);
+ materializeOneCheck(IRB, Shadow, nullptr);
+ }
+ }
+
+ void materializeChecks() {
+ llvm::stable_sort(InstrumentationList,
+ [](const ShadowOriginAndInsertPoint &L,
+ const ShadowOriginAndInsertPoint &R) {
+ return L.OrigIns < R.OrigIns;
+ });
+
+ for (auto I = InstrumentationList.begin();
+ I != InstrumentationList.end();) {
+ auto J =
+ std::find_if(I + 1, InstrumentationList.end(),
+ [L = I->OrigIns](const ShadowOriginAndInsertPoint &R) {
+ return L != R.OrigIns;
+ });
+ // Process all checks of instruction at once.
+ materializeInstructionChecks(ArrayRef<ShadowOriginAndInsertPoint>(I, J));
+ I = J;
}
+
LLVM_DEBUG(dbgs() << "DONE:\n" << F);
}
@@ -1303,7 +1460,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
size_t NumValues = PN->getNumIncomingValues();
for (size_t v = 0; v < NumValues; v++) {
PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
- if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
+ if (PNO)
+ PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
}
}
@@ -1314,7 +1472,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (InstrumentLifetimeStart) {
for (auto Item : LifetimeStartList) {
instrumentAlloca(*Item.second, Item.first);
- AllocaSet.erase(Item.second);
+ AllocaSet.remove(Item.second);
}
}
// Poison the allocas for which we didn't instrument the corresponding
@@ -1322,24 +1480,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (AllocaInst *AI : AllocaSet)
instrumentAlloca(*AI);
- bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
- InstrumentationList.size() + StoreList.size() >
- (unsigned)ClInstrumentationWithCallThreshold;
-
// Insert shadow value checks.
- materializeChecks(InstrumentWithCalls);
+ materializeChecks();
// Delayed instrumentation of StoreInst.
// This may not add new address checks.
- materializeStores(InstrumentWithCalls);
+ materializeStores();
return true;
}
/// Compute the shadow type that corresponds to a given Value.
- Type *getShadowTy(Value *V) {
- return getShadowTy(V->getType());
- }
+ Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); }
/// Compute the shadow type that corresponds to a given Type.
Type *getShadowTy(Type *OrigTy) {
@@ -1361,7 +1513,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AT->getNumElements());
}
if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
- SmallVector<Type*, 4> Elements;
+ SmallVector<Type *, 4> Elements;
for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
Elements.push_back(getShadowTy(ST->getElementType(i)));
StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
@@ -1376,7 +1528,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Type *getShadowTyNoVec(Type *ty) {
if (VectorType *vt = dyn_cast<VectorType>(ty))
return IntegerType::get(*MS.C,
- vt->getPrimitiveSizeInBits().getFixedSize());
+ vt->getPrimitiveSizeInBits().getFixedValue());
return ty;
}
@@ -1428,36 +1580,66 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return collapseArrayShadow(Array, V, IRB);
Type *Ty = V->getType();
Type *NoVecTy = getShadowTyNoVec(Ty);
- if (Ty == NoVecTy) return V;
+ if (Ty == NoVecTy)
+ return V;
return IRB.CreateBitCast(V, NoVecTy);
}
// Convert a scalar value to an i1 by comparing with 0
Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &name = "") {
Type *VTy = V->getType();
- assert(VTy->isIntegerTy());
+ if (!VTy->isIntegerTy())
+ return convertToBool(convertShadowToScalar(V, IRB), IRB, name);
if (VTy->getIntegerBitWidth() == 1)
// Just converting a bool to a bool, so do nothing.
return V;
return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), name);
}
+ Type *ptrToIntPtrType(Type *PtrTy) const {
+ if (FixedVectorType *VectTy = dyn_cast<FixedVectorType>(PtrTy)) {
+ return FixedVectorType::get(ptrToIntPtrType(VectTy->getElementType()),
+ VectTy->getNumElements());
+ }
+ assert(PtrTy->isIntOrPtrTy());
+ return MS.IntptrTy;
+ }
+
+ Type *getPtrToShadowPtrType(Type *IntPtrTy, Type *ShadowTy) const {
+ if (FixedVectorType *VectTy = dyn_cast<FixedVectorType>(IntPtrTy)) {
+ return FixedVectorType::get(
+ getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
+ VectTy->getNumElements());
+ }
+ assert(IntPtrTy == MS.IntptrTy);
+ return ShadowTy->getPointerTo();
+ }
+
+ Constant *constToIntPtr(Type *IntPtrTy, uint64_t C) const {
+ if (FixedVectorType *VectTy = dyn_cast<FixedVectorType>(IntPtrTy)) {
+ return ConstantDataVector::getSplat(
+ VectTy->getNumElements(), constToIntPtr(VectTy->getElementType(), C));
+ }
+ assert(IntPtrTy == MS.IntptrTy);
+ return ConstantInt::get(MS.IntptrTy, C);
+ }
+
/// Compute the integer shadow offset that corresponds to a given
/// application address.
///
/// Offset = (Addr & ~AndMask) ^ XorMask
+ /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
+ /// a single pointee.
+ /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
- Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
+ Type *IntptrTy = ptrToIntPtrType(Addr->getType());
+ Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);
- uint64_t AndMask = MS.MapParams->AndMask;
- if (AndMask)
- OffsetLong =
- IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
+ if (uint64_t AndMask = MS.MapParams->AndMask)
+ OffsetLong = IRB.CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
- uint64_t XorMask = MS.MapParams->XorMask;
- if (XorMask)
- OffsetLong =
- IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
+ if (uint64_t XorMask = MS.MapParams->XorMask)
+ OffsetLong = IRB.CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
return OffsetLong;
}
@@ -1466,41 +1648,43 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// Shadow = ShadowBase + Offset
/// Origin = (OriginBase + Offset) & ~3ULL
+ /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
+ /// a single pointee.
+ /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
std::pair<Value *, Value *>
getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
MaybeAlign Alignment) {
+ Type *IntptrTy = ptrToIntPtrType(Addr->getType());
Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
Value *ShadowLong = ShadowOffset;
- uint64_t ShadowBase = MS.MapParams->ShadowBase;
- if (ShadowBase != 0) {
+ if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
ShadowLong =
- IRB.CreateAdd(ShadowLong,
- ConstantInt::get(MS.IntptrTy, ShadowBase));
+ IRB.CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
}
- Value *ShadowPtr =
- IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
+ Value *ShadowPtr = IRB.CreateIntToPtr(
+ ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
+
Value *OriginPtr = nullptr;
if (MS.TrackOrigins) {
Value *OriginLong = ShadowOffset;
uint64_t OriginBase = MS.MapParams->OriginBase;
if (OriginBase != 0)
- OriginLong = IRB.CreateAdd(OriginLong,
- ConstantInt::get(MS.IntptrTy, OriginBase));
+ OriginLong =
+ IRB.CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
if (!Alignment || *Alignment < kMinOriginAlignment) {
uint64_t Mask = kMinOriginAlignment.value() - 1;
- OriginLong =
- IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
+ OriginLong = IRB.CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
}
- OriginPtr =
- IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
+ OriginPtr = IRB.CreateIntToPtr(
+ OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
}
return std::make_pair(ShadowPtr, OriginPtr);
}
- std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
- IRBuilder<> &IRB,
- Type *ShadowTy,
- bool isStore) {
+ std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(Value *Addr,
+ IRBuilder<> &IRB,
+ Type *ShadowTy,
+ bool isStore) {
Value *ShadowOriginPtrs;
const DataLayout &DL = F.getParent()->getDataLayout();
int Size = DL.getTypeStoreSize(ShadowTy);
@@ -1523,6 +1707,42 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return std::make_pair(ShadowPtr, OriginPtr);
}
+ /// Addr can be a ptr or <N x ptr>. In both cases ShadowTy the shadow type of
+ /// a single pointee.
+ /// Returns <shadow_ptr, origin_ptr> or <<N x shadow_ptr>, <N x origin_ptr>>.
+ std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
+ IRBuilder<> &IRB,
+ Type *ShadowTy,
+ bool isStore) {
+ FixedVectorType *VectTy = dyn_cast<FixedVectorType>(Addr->getType());
+ if (!VectTy) {
+ assert(Addr->getType()->isPointerTy());
+ return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy, isStore);
+ }
+
+ // TODO: Support callbacs with vectors of addresses.
+ unsigned NumElements = VectTy->getNumElements();
+ Value *ShadowPtrs = ConstantInt::getNullValue(
+ FixedVectorType::get(ShadowTy->getPointerTo(), NumElements));
+ Value *OriginPtrs = nullptr;
+ if (MS.TrackOrigins)
+ OriginPtrs = ConstantInt::getNullValue(
+ FixedVectorType::get(MS.OriginTy->getPointerTo(), NumElements));
+ for (unsigned i = 0; i < NumElements; ++i) {
+ Value *OneAddr =
+ IRB.CreateExtractElement(Addr, ConstantInt::get(IRB.getInt32Ty(), i));
+ auto [ShadowPtr, OriginPtr] =
+ getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy, isStore);
+
+ ShadowPtrs = IRB.CreateInsertElement(
+ ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.getInt32Ty(), i));
+ if (MS.TrackOrigins)
+ OriginPtrs = IRB.CreateInsertElement(
+ OriginPtrs, OriginPtr, ConstantInt::get(IRB.getInt32Ty(), i));
+ }
+ return {ShadowPtrs, OriginPtrs};
+ }
+
std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
Type *ShadowTy,
MaybeAlign Alignment,
@@ -1535,8 +1755,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Compute the shadow address for a given function argument.
///
/// Shadow = ParamTLS+ArgOffset.
- Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
- int ArgOffset) {
+ Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) {
Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
if (ArgOffset)
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
@@ -1545,8 +1764,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
/// Compute the origin address for a given function argument.
- Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
- int ArgOffset) {
+ Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) {
if (!MS.TrackOrigins)
return nullptr;
Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
@@ -1559,8 +1777,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Compute the shadow address for a retval.
Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
return IRB.CreatePointerCast(MS.RetvalTLS,
- PointerType::get(getShadowTy(A), 0),
- "_msret");
+ PointerType::get(getShadowTy(A), 0), "_msret");
}
/// Compute the origin address for a retval.
@@ -1577,7 +1794,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Set Origin to be the origin value for V.
void setOrigin(Value *V, Value *Origin) {
- if (!MS.TrackOrigins) return;
+ if (!MS.TrackOrigins)
+ return;
assert(!OriginMap.count(V) && "Values may only have one origin");
LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
OriginMap[V] = Origin;
@@ -1594,9 +1812,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// Clean shadow (all zeroes) means all bits of the value are defined
/// (initialized).
- Constant *getCleanShadow(Value *V) {
- return getCleanShadow(V->getType());
- }
+ Constant *getCleanShadow(Value *V) { return getCleanShadow(V->getType()); }
/// Create a dirty shadow of a given shadow type.
Constant *getPoisonedShadow(Type *ShadowTy) {
@@ -1626,9 +1842,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
/// Create a clean (zero) origin.
- Value *getCleanOrigin() {
- return Constant::getNullValue(MS.OriginTy);
- }
+ Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); }
/// Get the shadow value for a given Value.
///
@@ -1680,7 +1894,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// argument shadow to the underlying memory.
// Figure out maximal valid memcpy alignment.
const Align ArgAlign = DL.getValueOrABITypeAlignment(
- MaybeAlign(FArg.getParamAlignment()), FArg.getParamByValType());
+ FArg.getParamAlign(), FArg.getParamByValType());
Value *CpShadowPtr, *CpOriginPtr;
std::tie(CpShadowPtr, CpOriginPtr) =
getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
@@ -1721,7 +1935,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Shadow over TLS
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base,
- kShadowTLSAlignment);
+ kShadowTLSAlignment);
if (MS.TrackOrigins) {
Value *OriginPtr =
getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
@@ -1749,9 +1963,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Get the origin for a value.
Value *getOrigin(Value *V) {
- if (!MS.TrackOrigins) return nullptr;
- if (!PropagateShadow) return getCleanOrigin();
- if (isa<Constant>(V)) return getCleanOrigin();
+ if (!MS.TrackOrigins)
+ return nullptr;
+ if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
+ return getCleanOrigin();
assert((isa<Instruction>(V) || isa<Argument>(V)) &&
"Unexpected value type in getOrigin()");
if (Instruction *I = dyn_cast<Instruction>(V)) {
@@ -1774,7 +1989,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// UMR warning in runtime if the shadow value is not 0.
void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
assert(Shadow);
- if (!InsertChecks) return;
+ if (!InsertChecks)
+ return;
+
+ if (!DebugCounter::shouldExecute(DebugInsertCheck)) {
+ LLVM_DEBUG(dbgs() << "Skipping check of " << *Shadow << " before "
+ << *OrigIns << "\n");
+ return;
+ }
#ifndef NDEBUG
Type *ShadowTy = Shadow->getType();
assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
@@ -1795,11 +2017,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Shadow, *Origin;
if (ClCheckConstantShadow) {
Shadow = getShadow(Val);
- if (!Shadow) return;
+ if (!Shadow)
+ return;
Origin = getOrigin(Val);
} else {
Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
- if (!Shadow) return;
+ if (!Shadow)
+ return;
Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
}
insertShadowCheck(Shadow, Origin, OrigIns);
@@ -1807,17 +2031,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
- case AtomicOrdering::NotAtomic:
- return AtomicOrdering::NotAtomic;
- case AtomicOrdering::Unordered:
- case AtomicOrdering::Monotonic:
- case AtomicOrdering::Release:
- return AtomicOrdering::Release;
- case AtomicOrdering::Acquire:
- case AtomicOrdering::AcquireRelease:
- return AtomicOrdering::AcquireRelease;
- case AtomicOrdering::SequentiallyConsistent:
- return AtomicOrdering::SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
+ return AtomicOrdering::Release;
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
@@ -1837,22 +2061,22 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
(int)AtomicOrderingCABI::seq_cst;
return ConstantDataVector::get(IRB.getContext(),
- makeArrayRef(OrderingTable, NumOrderings));
+ ArrayRef(OrderingTable, NumOrderings));
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
- case AtomicOrdering::NotAtomic:
- return AtomicOrdering::NotAtomic;
- case AtomicOrdering::Unordered:
- case AtomicOrdering::Monotonic:
- case AtomicOrdering::Acquire:
- return AtomicOrdering::Acquire;
- case AtomicOrdering::Release:
- case AtomicOrdering::AcquireRelease:
- return AtomicOrdering::AcquireRelease;
- case AtomicOrdering::SequentiallyConsistent:
- return AtomicOrdering::SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
+ return AtomicOrdering::Acquire;
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
@@ -1872,7 +2096,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
(int)AtomicOrderingCABI::seq_cst;
return ConstantDataVector::get(IRB.getContext(),
- makeArrayRef(OrderingTable, NumOrderings));
+ ArrayRef(OrderingTable, NumOrderings));
}
// ------------------- Visitors.
@@ -1893,7 +2117,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitLoadInst(LoadInst &I) {
assert(I.getType()->isSized() && "Load type must have size");
assert(!I.getMetadata(LLVMContext::MD_nosanitize));
- IRBuilder<> IRB(I.getNextNode());
+ NextNodeIRBuilder IRB(&I);
Type *ShadowTy = getShadowTy(&I);
Value *Addr = I.getPointerOperand();
Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
@@ -1940,7 +2164,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Addr = I.getOperand(0);
Value *Val = I.getOperand(1);
- Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, Val->getType(), Align(1),
+ Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val), Align(1),
/*isStore*/ true)
.first;
@@ -1974,22 +2198,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(I.getOperand(1), &I);
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
- "_msprop"));
+ "_msprop"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitInsertElementInst(InsertElementInst &I) {
insertShadowCheck(I.getOperand(2), &I);
IRBuilder<> IRB(&I);
- setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
- I.getOperand(2), "_msprop"));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ setShadow(&I, IRB.CreateInsertElement(Shadow0, Shadow1, I.getOperand(2),
+ "_msprop"));
setOriginForNaryOp(I);
}
void visitShuffleVectorInst(ShuffleVectorInst &I) {
IRBuilder<> IRB(&I);
- setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
- I.getShuffleMask(), "_msprop"));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ setShadow(&I, IRB.CreateShuffleVector(Shadow0, Shadow1, I.getShuffleMask(),
+ "_msprop"));
setOriginForNaryOp(I);
}
@@ -2027,23 +2255,23 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitPtrToIntInst(PtrToIntInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
- "_msprop_ptrtoint"));
+ "_msprop_ptrtoint"));
setOrigin(&I, getOrigin(&I, 0));
}
void visitIntToPtrInst(IntToPtrInst &I) {
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
- "_msprop_inttoptr"));
+ "_msprop_inttoptr"));
setOrigin(&I, getOrigin(&I, 0));
}
- void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
- void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
- void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
- void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
- void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
- void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
+ void visitFPToSIInst(CastInst &I) { handleShadowOr(I); }
+ void visitFPToUIInst(CastInst &I) { handleShadowOr(I); }
+ void visitSIToFPInst(CastInst &I) { handleShadowOr(I); }
+ void visitUIToFPInst(CastInst &I) { handleShadowOr(I); }
+ void visitFPExtInst(CastInst &I) { handleShadowOr(I); }
+ void visitFPTruncInst(CastInst &I) { handleShadowOr(I); }
/// Propagate shadow for bitwise AND.
///
@@ -2109,8 +2337,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// not entirely initialized. If there is more than one such arguments, the
/// rightmost of them is picked. It does not matter which one is picked if all
/// arguments are initialized.
- template <bool CombineShadow>
- class Combiner {
+ template <bool CombineShadow> class Combiner {
Value *Shadow = nullptr;
Value *Origin = nullptr;
IRBuilder<> &IRB;
@@ -2177,7 +2404,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Propagate origin for arbitrary operation.
void setOriginForNaryOp(Instruction &I) {
- if (!MS.TrackOrigins) return;
+ if (!MS.TrackOrigins)
+ return;
IRBuilder<> IRB(&I);
OriginCombiner OC(this, IRB);
for (Use &Op : I.operands())
@@ -2211,7 +2439,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return IRB.CreateIntCast(V, dstTy, Signed);
Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
Value *V2 =
- IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
+ IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
return IRB.CreateBitCast(V2, dstTy);
// TODO: handle struct types.
}
@@ -2347,10 +2575,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Si = !(C & ~Sc) && Sc
Value *Zero = Constant::getNullValue(Sc->getType());
Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
- Value *Si =
- IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
- IRB.CreateICmpEQ(
- IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
+ Value *LHS = IRB.CreateICmpNE(Sc, Zero);
+ Value *RHS =
+ IRB.CreateICmpEQ(IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero);
+ Value *Si = IRB.CreateAnd(LHS, RHS);
Si->setName("_msprop_icmp");
setShadow(&I, Si);
setOriginForNaryOp(I);
@@ -2365,8 +2593,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
// Maximise the undefined shadow bit, minimize other undefined bits.
- return
- IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
+ return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)),
+ SaSignBit);
} else {
// Minimize undefined bits.
return IRB.CreateAnd(A, IRB.CreateNot(Sa));
@@ -2376,14 +2604,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Build the highest possible value of V, taking into account V's
/// uninitialized bits.
Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
- bool isSigned) {
+ bool isSigned) {
if (isSigned) {
// Split shadow into sign bit and other bits.
Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
// Minimise the undefined shadow bit, maximise other undefined bits.
- return
- IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
+ return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)),
+ SaOtherBits);
} else {
// Maximize undefined bits.
return IRB.CreateOr(A, Sa);
@@ -2485,9 +2713,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleShadowOr(I);
}
- void visitFCmpInst(FCmpInst &I) {
- handleShadowOr(I);
- }
+ void visitFCmpInst(FCmpInst &I) { handleShadowOr(I); }
void handleShift(BinaryOperator &I) {
IRBuilder<> IRB(&I);
@@ -2495,8 +2721,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Otherwise perform the same shift on S1.
Value *S1 = getShadow(&I, 0);
Value *S2 = getShadow(&I, 1);
- Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
- S2->getType());
+ Value *S2Conv =
+ IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
Value *V2 = I.getOperand(1);
Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
@@ -2545,10 +2771,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
I.eraseFromParent();
}
- // Similar to memmove: avoid copying shadow twice.
- // This is somewhat unfortunate as it may slowdown small constant memcpys.
- // FIXME: consider doing manual inline for small constant sizes and proper
- // alignment.
+ /// Instrument memcpy
+ ///
+ /// Similar to memmove: avoid copying shadow twice. This is somewhat
+ /// unfortunate as it may slowdown small constant memcpys.
+ /// FIXME: consider doing manual inline for small constant sizes and proper
+ /// alignment.
+ ///
+ /// Note: This also handles memcpy.inline, which promises no calls to external
+ /// functions as an optimization. However, with instrumentation enabled this
+ /// is difficult to promise; additionally, we know that the MSan runtime
+ /// exists and provides __msan_memcpy(). Therefore, we assume that with
+ /// instrumentation it's safe to turn memcpy.inline into a call to
+ /// __msan_memcpy(). Should this be wrong, such as when implementing memcpy()
+ /// itself, instrumentation should be disabled with the no_sanitize attribute.
void visitMemCpyInst(MemCpyInst &I) {
getShadow(I.getArgOperand(1)); // Ensure shadow initialized
IRBuilder<> IRB(&I);
@@ -2571,13 +2807,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
I.eraseFromParent();
}
- void visitVAStartInst(VAStartInst &I) {
- VAHelper->visitVAStartInst(I);
- }
+ void visitVAStartInst(VAStartInst &I) { VAHelper->visitVAStartInst(I); }
- void visitVACopyInst(VACopyInst &I) {
- VAHelper->visitVACopyInst(I);
- }
+ void visitVACopyInst(VACopyInst &I) { VAHelper->visitVACopyInst(I); }
/// Handle vector store-like intrinsics.
///
@@ -2585,7 +2817,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// has 1 pointer argument and 1 vector argument, returns void.
bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
- Value* Addr = I.getArgOperand(0);
+ Value *Addr = I.getArgOperand(0);
Value *Shadow = getShadow(&I, 1);
Value *ShadowPtr, *OriginPtr;
@@ -2599,7 +2831,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Addr, &I);
// FIXME: factor out common code from materializeStores
- if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
+ if (MS.TrackOrigins)
+ IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
return true;
}
@@ -2645,8 +2878,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// Caller guarantees that this intrinsic does not access memory.
bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
Type *RetTy = I.getType();
- if (!(RetTy->isIntOrIntVectorTy() ||
- RetTy->isFPOrFPVectorTy() ||
+ if (!(RetTy->isIntOrIntVectorTy() || RetTy->isFPOrFPVectorTy() ||
RetTy->isX86_MMXTy()))
return false;
@@ -2681,19 +2913,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (NumArgOperands == 0)
return false;
- if (NumArgOperands == 2 &&
- I.getArgOperand(0)->getType()->isPointerTy() &&
+ if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() &&
I.getArgOperand(1)->getType()->isVectorTy() &&
- I.getType()->isVoidTy() &&
- !I.onlyReadsMemory()) {
+ I.getType()->isVoidTy() && !I.onlyReadsMemory()) {
// This looks like a vector store.
return handleVectorStoreIntrinsic(I);
}
- if (NumArgOperands == 1 &&
- I.getArgOperand(0)->getType()->isPointerTy() &&
- I.getType()->isVectorTy() &&
- I.onlyReadsMemory()) {
+ if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() &&
+ I.getType()->isVectorTy() && I.onlyReadsMemory()) {
// This looks like a vector load.
return handleVectorLoadIntrinsic(I);
}
@@ -2725,11 +2953,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Op = I.getArgOperand(0);
Type *OpType = Op->getType();
Function *BswapFunc = Intrinsic::getDeclaration(
- F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
+ F.getParent(), Intrinsic::bswap, ArrayRef(&OpType, 1));
setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
setOrigin(&I, getOrigin(Op));
}
+ void handleCountZeroes(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Src = I.getArgOperand(0);
+
+ // Set the Output shadow based on input Shadow
+ Value *BoolShadow = IRB.CreateIsNotNull(getShadow(Src), "_mscz_bs");
+
+ // If zero poison is requested, mix in with the shadow
+ Constant *IsZeroPoison = cast<Constant>(I.getOperand(1));
+ if (!IsZeroPoison->isZeroValue()) {
+ Value *BoolZeroPoison = IRB.CreateIsNull(Src, "_mscz_bzp");
+ BoolShadow = IRB.CreateOr(BoolShadow, BoolZeroPoison, "_mscz_bs");
+ }
+
+ Value *OutputShadow =
+ IRB.CreateSExt(BoolShadow, getShadowTy(Src), "_mscz_os");
+
+ setShadow(&I, OutputShadow);
+ setOriginForNaryOp(I);
+ }
+
// Instrument vector convert intrinsic.
//
// This function instruments intrinsics like cvtsi2ss:
@@ -2873,30 +3122,30 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// intrinsic.
Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
switch (id) {
- case Intrinsic::x86_sse2_packsswb_128:
- case Intrinsic::x86_sse2_packuswb_128:
- return Intrinsic::x86_sse2_packsswb_128;
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_sse2_packuswb_128:
+ return Intrinsic::x86_sse2_packsswb_128;
- case Intrinsic::x86_sse2_packssdw_128:
- case Intrinsic::x86_sse41_packusdw:
- return Intrinsic::x86_sse2_packssdw_128;
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse41_packusdw:
+ return Intrinsic::x86_sse2_packssdw_128;
- case Intrinsic::x86_avx2_packsswb:
- case Intrinsic::x86_avx2_packuswb:
- return Intrinsic::x86_avx2_packsswb;
+ case Intrinsic::x86_avx2_packsswb:
+ case Intrinsic::x86_avx2_packuswb:
+ return Intrinsic::x86_avx2_packsswb;
- case Intrinsic::x86_avx2_packssdw:
- case Intrinsic::x86_avx2_packusdw:
- return Intrinsic::x86_avx2_packssdw;
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packusdw:
+ return Intrinsic::x86_avx2_packssdw;
- case Intrinsic::x86_mmx_packsswb:
- case Intrinsic::x86_mmx_packuswb:
- return Intrinsic::x86_mmx_packsswb;
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packuswb:
+ return Intrinsic::x86_mmx_packsswb;
- case Intrinsic::x86_mmx_packssdw:
- return Intrinsic::x86_mmx_packssdw;
- default:
- llvm_unreachable("unexpected intrinsic id");
+ case Intrinsic::x86_mmx_packssdw:
+ return Intrinsic::x86_mmx_packssdw;
+ default:
+ llvm_unreachable("unexpected intrinsic id");
}
}
@@ -2923,10 +3172,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
S1 = IRB.CreateBitCast(S1, T);
S2 = IRB.CreateBitCast(S2, T);
}
- Value *S1_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
- Value *S2_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
+ Value *S1_ext =
+ IRB.CreateSExt(IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
+ Value *S2_ext =
+ IRB.CreateSExt(IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
if (isX86_MMX) {
Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
@@ -2938,7 +3187,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *S =
IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
- if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
+ if (isX86_MMX)
+ S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
}
@@ -2952,7 +3202,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
IRBuilder<> IRB(&I);
- Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ Value *S = IRB.CreateOr(Shadow0, Shadow1);
S = IRB.CreateBitCast(S, ResTy);
S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
ResTy);
@@ -2968,7 +3220,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
IRBuilder<> IRB(&I);
- Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ Value *S = IRB.CreateOr(Shadow0, Shadow1);
S = IRB.CreateBitCast(S, ResTy);
S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
ResTy);
@@ -2983,7 +3237,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Type *ResTy = getShadowTy(&I);
- Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
Value *S = IRB.CreateSExt(
IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
setShadow(&I, S);
@@ -2995,7 +3251,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// element of a vector, and comi* which return the result as i32.
void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
- Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ auto *Shadow0 = getShadow(&I, 0);
+ auto *Shadow1 = getShadow(&I, 1);
+ Value *S0 = IRB.CreateOr(Shadow0, Shadow1);
Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
@@ -3047,7 +3305,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleStmxcsr(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
- Value* Addr = I.getArgOperand(0);
+ Value *Addr = I.getArgOperand(0);
Type *Ty = IRB.getInt32Ty();
Value *ShadowPtr =
getShadowOriginPtr(Addr, IRB, Ty, Align(1), /*isStore*/ true).first;
@@ -3060,7 +3318,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void handleLdmxcsr(IntrinsicInst &I) {
- if (!InsertChecks) return;
+ if (!InsertChecks)
+ return;
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
@@ -3079,93 +3338,201 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Shadow, Origin, &I);
}
+ void handleMaskedExpandLoad(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Ptr = I.getArgOperand(0);
+ Value *Mask = I.getArgOperand(1);
+ Value *PassThru = I.getArgOperand(2);
+
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Ptr, &I);
+ insertShadowCheck(Mask, &I);
+ }
+
+ if (!PropagateShadow) {
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ return;
+ }
+
+ Type *ShadowTy = getShadowTy(&I);
+ Type *ElementShadowTy = cast<FixedVectorType>(ShadowTy)->getElementType();
+ auto [ShadowPtr, OriginPtr] =
+ getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ false);
+
+ Value *Shadow = IRB.CreateMaskedExpandLoad(
+ ShadowTy, ShadowPtr, Mask, getShadow(PassThru), "_msmaskedexpload");
+
+ setShadow(&I, Shadow);
+
+ // TODO: Store origins.
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void handleMaskedCompressStore(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Values = I.getArgOperand(0);
+ Value *Ptr = I.getArgOperand(1);
+ Value *Mask = I.getArgOperand(2);
+
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Ptr, &I);
+ insertShadowCheck(Mask, &I);
+ }
+
+ Value *Shadow = getShadow(Values);
+ Type *ElementShadowTy =
+ getShadowTy(cast<FixedVectorType>(Values->getType())->getElementType());
+ auto [ShadowPtr, OriginPtrs] =
+ getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ true);
+
+ IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Mask);
+
+ // TODO: Store origins.
+ }
+
+ void handleMaskedGather(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Ptrs = I.getArgOperand(0);
+ const Align Alignment(
+ cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
+ Value *Mask = I.getArgOperand(2);
+ Value *PassThru = I.getArgOperand(3);
+
+ Type *PtrsShadowTy = getShadowTy(Ptrs);
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Mask, &I);
+ Value *MaskedPtrShadow = IRB.CreateSelect(
+ Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
+ "_msmaskedptrs");
+ insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
+ }
+
+ if (!PropagateShadow) {
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ return;
+ }
+
+ Type *ShadowTy = getShadowTy(&I);
+ Type *ElementShadowTy = cast<FixedVectorType>(ShadowTy)->getElementType();
+ auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
+ Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ false);
+
+ Value *Shadow =
+ IRB.CreateMaskedGather(ShadowTy, ShadowPtrs, Alignment, Mask,
+ getShadow(PassThru), "_msmaskedgather");
+
+ setShadow(&I, Shadow);
+
+ // TODO: Store origins.
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void handleMaskedScatter(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Values = I.getArgOperand(0);
+ Value *Ptrs = I.getArgOperand(1);
+ const Align Alignment(
+ cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
+ Value *Mask = I.getArgOperand(3);
+
+ Type *PtrsShadowTy = getShadowTy(Ptrs);
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Mask, &I);
+ Value *MaskedPtrShadow = IRB.CreateSelect(
+ Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
+ "_msmaskedptrs");
+ insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
+ }
+
+ Value *Shadow = getShadow(Values);
+ Type *ElementShadowTy =
+ getShadowTy(cast<FixedVectorType>(Values->getType())->getElementType());
+ auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
+ Ptrs, IRB, ElementShadowTy, Alignment, /*isStore*/ true);
+
+ IRB.CreateMaskedScatter(Shadow, ShadowPtrs, Alignment, Mask);
+
+ // TODO: Store origin.
+ }
+
void handleMaskedStore(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *V = I.getArgOperand(0);
- Value *Addr = I.getArgOperand(1);
+ Value *Ptr = I.getArgOperand(1);
const Align Alignment(
cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
Value *Mask = I.getArgOperand(3);
Value *Shadow = getShadow(V);
- Value *ShadowPtr;
- Value *OriginPtr;
- std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
- Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
-
if (ClCheckAccessAddress) {
- insertShadowCheck(Addr, &I);
- // Uninitialized mask is kind of like uninitialized address, but not as
- // scary.
+ insertShadowCheck(Ptr, &I);
insertShadowCheck(Mask, &I);
}
+ Value *ShadowPtr;
+ Value *OriginPtr;
+ std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
+ Ptr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
+
IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
- if (MS.TrackOrigins) {
- auto &DL = F.getParent()->getDataLayout();
- paintOrigin(IRB, getOrigin(V), OriginPtr,
- DL.getTypeStoreSize(Shadow->getType()),
- std::max(Alignment, kMinOriginAlignment));
- }
+ if (!MS.TrackOrigins)
+ return;
+
+ auto &DL = F.getParent()->getDataLayout();
+ paintOrigin(IRB, getOrigin(V), OriginPtr,
+ DL.getTypeStoreSize(Shadow->getType()),
+ std::max(Alignment, kMinOriginAlignment));
}
- bool handleMaskedLoad(IntrinsicInst &I) {
+ void handleMaskedLoad(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
- Value *Addr = I.getArgOperand(0);
+ Value *Ptr = I.getArgOperand(0);
const Align Alignment(
cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
Value *Mask = I.getArgOperand(2);
Value *PassThru = I.getArgOperand(3);
- Type *ShadowTy = getShadowTy(&I);
- Value *ShadowPtr, *OriginPtr;
- if (PropagateShadow) {
- std::tie(ShadowPtr, OriginPtr) =
- getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
- setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
- getShadow(PassThru), "_msmaskedld"));
- } else {
- setShadow(&I, getCleanShadow(&I));
- }
-
if (ClCheckAccessAddress) {
- insertShadowCheck(Addr, &I);
+ insertShadowCheck(Ptr, &I);
insertShadowCheck(Mask, &I);
}
- if (MS.TrackOrigins) {
- if (PropagateShadow) {
- // Choose between PassThru's and the loaded value's origins.
- Value *MaskedPassThruShadow = IRB.CreateAnd(
- getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
-
- Value *Acc = IRB.CreateExtractElement(
- MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
- for (int i = 1, N = cast<FixedVectorType>(PassThru->getType())
- ->getNumElements();
- i < N; ++i) {
- Value *More = IRB.CreateExtractElement(
- MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
- Acc = IRB.CreateOr(Acc, More);
- }
+ if (!PropagateShadow) {
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ return;
+ }
- Value *Origin = IRB.CreateSelect(
- IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
- getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
+ Type *ShadowTy = getShadowTy(&I);
+ Value *ShadowPtr, *OriginPtr;
+ std::tie(ShadowPtr, OriginPtr) =
+ getShadowOriginPtr(Ptr, IRB, ShadowTy, Alignment, /*isStore*/ false);
+ setShadow(&I, IRB.CreateMaskedLoad(ShadowTy, ShadowPtr, Alignment, Mask,
+ getShadow(PassThru), "_msmaskedld"));
- setOrigin(&I, Origin);
- } else {
- setOrigin(&I, getCleanOrigin());
- }
- }
- return true;
+ if (!MS.TrackOrigins)
+ return;
+
+ // Choose between PassThru's and the loaded value's origins.
+ Value *MaskedPassThruShadow = IRB.CreateAnd(
+ getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
+
+ Value *ConvertedShadow = convertShadowToScalar(MaskedPassThruShadow, IRB);
+ Value *NotNull = convertToBool(ConvertedShadow, IRB, "_mscmp");
+
+ Value *PtrOrigin = IRB.CreateLoad(MS.OriginTy, OriginPtr);
+ Value *Origin = IRB.CreateSelect(NotNull, getOrigin(PassThru), PtrOrigin);
+
+ setOrigin(&I, Origin);
}
// Instrument BMI / BMI2 intrinsics.
// All of these intrinsics are Z = I(X, Y)
- // where the types of all operands and the result match, and are either i32 or i64.
- // The following instrumentation happens to work for all of them:
+ // where the types of all operands and the result match, and are either i32 or
+ // i64. The following instrumentation happens to work for all of them:
// Sz = I(Sx, Y) | (sext (Sy != 0))
void handleBmiIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
@@ -3234,6 +3601,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOriginForNaryOp(I);
}
+ void handleVtestIntrinsic(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Shadow0 = getShadow(&I, 0);
+ Value *Shadow1 = getShadow(&I, 1);
+ Value *Or = IRB.CreateOr(Shadow0, Shadow1);
+ Value *NZ = IRB.CreateICmpNE(Or, Constant::getNullValue(Or->getType()));
+ Value *Scalar = convertShadowToScalar(NZ, IRB);
+ Value *Shadow = IRB.CreateZExt(Scalar, getShadowTy(&I));
+
+ setShadow(&I, Shadow);
+ setOriginForNaryOp(I);
+ }
+
void handleBinarySdSsIntrinsic(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
unsigned Width =
@@ -3280,6 +3660,22 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::bswap:
handleBswap(I);
break;
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ handleCountZeroes(I);
+ break;
+ case Intrinsic::masked_compressstore:
+ handleMaskedCompressStore(I);
+ break;
+ case Intrinsic::masked_expandload:
+ handleMaskedExpandLoad(I);
+ break;
+ case Intrinsic::masked_gather:
+ handleMaskedGather(I);
+ break;
+ case Intrinsic::masked_scatter:
+ handleMaskedScatter(I);
+ break;
case Intrinsic::masked_store:
handleMaskedStore(I);
break;
@@ -3495,11 +3891,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleVectorCompareScalarIntrinsic(I);
break;
- case Intrinsic::x86_sse_cmp_ps:
+ case Intrinsic::x86_avx_cmp_pd_256:
+ case Intrinsic::x86_avx_cmp_ps_256:
case Intrinsic::x86_sse2_cmp_pd:
- // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
- // generates reasonably looking IR that fails in the backend with "Do not
- // know how to split the result of this operator!".
+ case Intrinsic::x86_sse_cmp_ps:
handleVectorComparePackedIntrinsic(I);
break;
@@ -3531,6 +3926,27 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleBinarySdSsIntrinsic(I);
break;
+ case Intrinsic::x86_avx_vtestc_pd:
+ case Intrinsic::x86_avx_vtestc_pd_256:
+ case Intrinsic::x86_avx_vtestc_ps:
+ case Intrinsic::x86_avx_vtestc_ps_256:
+ case Intrinsic::x86_avx_vtestnzc_pd:
+ case Intrinsic::x86_avx_vtestnzc_pd_256:
+ case Intrinsic::x86_avx_vtestnzc_ps:
+ case Intrinsic::x86_avx_vtestnzc_ps_256:
+ case Intrinsic::x86_avx_vtestz_pd:
+ case Intrinsic::x86_avx_vtestz_pd_256:
+ case Intrinsic::x86_avx_vtestz_ps:
+ case Intrinsic::x86_avx_vtestz_ps_256:
+ case Intrinsic::x86_avx_ptestc_256:
+ case Intrinsic::x86_avx_ptestnzc_256:
+ case Intrinsic::x86_avx_ptestz_256:
+ case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_sse41_ptestnzc:
+ case Intrinsic::x86_sse41_ptestz:
+ handleVtestIntrinsic(I);
+ break;
+
case Intrinsic::fshl:
case Intrinsic::fshr:
handleFunnelShift(I);
@@ -3564,9 +3980,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
CB.setArgOperand(3, NewOrdering);
- IRBuilder<> NextIRB(CB.getNextNode());
- NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
-
+ NextNodeIRBuilder NextIRB(&CB);
Value *SrcShadowPtr, *SrcOriginPtr;
std::tie(SrcShadowPtr, SrcOriginPtr) =
getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
@@ -3648,12 +4062,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// will become a non-readonly function after it is instrumented by us. To
// prevent this code from being optimized out, mark that function
// non-readonly in advance.
+ // TODO: We can likely do better than dropping memory() completely here.
AttributeMask B;
- B.addAttribute(Attribute::ReadOnly)
- .addAttribute(Attribute::ReadNone)
- .addAttribute(Attribute::WriteOnly)
- .addAttribute(Attribute::ArgMemOnly)
- .addAttribute(Attribute::Speculatable);
+ B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
Call->removeFnAttrs(B);
if (Function *Func = Call->getCalledFunction()) {
@@ -3672,10 +4083,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned ArgOffset = 0;
LLVM_DEBUG(dbgs() << " CallSite: " << CB << "\n");
- for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
- ++ArgIt) {
- Value *A = *ArgIt;
- unsigned i = ArgIt - CB.arg_begin();
+ for (const auto &[i, A] : llvm::enumerate(CB.args())) {
if (!A->getType()->isSized()) {
LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << CB << "\n");
continue;
@@ -3708,7 +4116,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (ArgOffset + Size > kParamTLSSize)
break;
const MaybeAlign ParamAlignment(CB.getParamAlign(i));
- MaybeAlign Alignment = llvm::None;
+ MaybeAlign Alignment = std::nullopt;
if (ParamAlignment)
Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
Value *AShadowPtr, *AOriginPtr;
@@ -3794,8 +4202,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(&CB, getCleanOrigin());
return;
}
- // FIXME: NextInsn is likely in a basic block that has not been visited yet.
- // Anything inserted there will be instrumented by MSan later!
+ // FIXME: NextInsn is likely in a basic block that has not been visited
+ // yet. Anything inserted there will be instrumented by MSan later!
NextInsn = NormalDest->getFirstInsertionPt();
assert(NextInsn != NormalDest->end() &&
"Could not find insertion point for retval shadow load");
@@ -3823,12 +4231,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitReturnInst(ReturnInst &I) {
IRBuilder<> IRB(&I);
Value *RetVal = I.getReturnValue();
- if (!RetVal) return;
+ if (!RetVal)
+ return;
// Don't emit the epilogue for musttail call returns.
- if (isAMustTailRetVal(RetVal)) return;
+ if (isAMustTailRetVal(RetVal))
+ return;
Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
- bool HasNoUndef =
- F.hasRetAttribute(Attribute::NoUndef);
+ bool HasNoUndef = F.hasRetAttribute(Attribute::NoUndef);
bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
// FIXME: Consider using SpecialCaseList to specify a list of functions that
// must always return fully initialized values. For now, we hardcode "main".
@@ -3863,21 +4272,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
"_msphi_s"));
if (MS.TrackOrigins)
- setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
- "_msphi_o"));
+ setOrigin(
+ &I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), "_msphi_o"));
+ }
+
+ Value *getLocalVarIdptr(AllocaInst &I) {
+ ConstantInt *IntConst =
+ ConstantInt::get(Type::getInt32Ty((*F.getParent()).getContext()), 0);
+ return new GlobalVariable(*F.getParent(), IntConst->getType(),
+ /*isConstant=*/false, GlobalValue::PrivateLinkage,
+ IntConst);
}
Value *getLocalVarDescription(AllocaInst &I) {
- SmallString<2048> StackDescriptionStorage;
- raw_svector_ostream StackDescription(StackDescriptionStorage);
- // We create a string with a description of the stack allocation and
- // pass it into __msan_set_alloca_origin.
- // It will be printed by the run-time if stack-originated UMR is found.
- // The first 4 bytes of the string are set to '----' and will be replaced
- // by __msan_va_arg_overflow_size_tls at the first call.
- StackDescription << "----" << I.getName() << "@" << F.getName();
- return createPrivateNonConstGlobalForString(*F.getParent(),
- StackDescription.str());
+ return createPrivateConstGlobalForString(*F.getParent(), I.getName());
}
void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
@@ -3894,11 +4302,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
if (PoisonStack && MS.TrackOrigins) {
- Value *Descr = getLocalVarDescription(I);
- IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
- {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
- IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(&F, MS.IntptrTy)});
+ Value *Idptr = getLocalVarIdptr(I);
+ if (ClPrintStackNames) {
+ Value *Descr = getLocalVarDescription(I);
+ IRB.CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
+ IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
+ } else {
+ IRB.CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
+ IRB.CreatePointerCast(Idptr, IRB.getInt8PtrTy())});
+ }
}
}
@@ -3917,12 +4332,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
if (!InsPoint)
InsPoint = &I;
- IRBuilder<> IRB(InsPoint->getNextNode());
+ NextNodeIRBuilder IRB(InsPoint);
const DataLayout &DL = F.getParent()->getDataLayout();
uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
if (I.isArrayAllocation())
- Len = IRB.CreateMul(Len, I.getArraySize());
+ Len = IRB.CreateMul(Len,
+ IRB.CreateZExtOrTrunc(I.getArraySize(), MS.IntptrTy));
if (MS.CompileKernel)
poisonAllocaKmsan(I, IRB, Len);
@@ -3938,7 +4354,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AllocaSet.insert(&I);
}
- void visitSelectInst(SelectInst& I) {
+ void visitSelectInst(SelectInst &I) {
IRBuilder<> IRB(&I);
// a = select b, c, d
Value *B = I.getCondition();
@@ -3977,9 +4393,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (B->getType()->isVectorTy()) {
Type *FlatTy = getShadowTyNoVec(B->getType());
B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
- ConstantInt::getNullValue(FlatTy));
+ ConstantInt::getNullValue(FlatTy));
Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
- ConstantInt::getNullValue(FlatTy));
+ ConstantInt::getNullValue(FlatTy));
}
// a = select b, c, d
// Oa = Sb ? Ob : (b ? Oc : Od)
@@ -4007,9 +4423,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(&I, getCleanOrigin());
}
- void visitGetElementPtrInst(GetElementPtrInst &I) {
- handleShadowOr(I);
- }
+ void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); }
void visitExtractValueInst(ExtractValueInst &I) {
IRBuilder<> IRB(&I);
@@ -4177,7 +4591,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
struct VarArgAMD64Helper : public VarArgHelper {
// An unfortunate workaround for asymmetric lowering of va_arg stuff.
// See a comment in visitCallBase for more details.
- static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
+ static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
static const unsigned AMD64FpEndOffsetSSE = 176;
// If SSE is disabled, fp_offset in va_list is zero.
static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
@@ -4190,7 +4604,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *VAArgTLSOriginCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<CallInst *, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
@@ -4208,7 +4622,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
}
}
- ArgKind classifyArgument(Value* arg) {
+ ArgKind classifyArgument(Value *arg) {
// A very rough approximation of X86_64 argument classification rules.
Type *T = arg->getType();
if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
@@ -4233,10 +4647,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
- ++ArgIt) {
- Value *A = *ArgIt;
- unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
@@ -4274,32 +4685,30 @@ struct VarArgAMD64Helper : public VarArgHelper {
AK = AK_Memory;
Value *ShadowBase, *OriginBase = nullptr;
switch (AK) {
- case AK_GeneralPurpose:
- ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
- if (MS.TrackOrigins)
- OriginBase =
- getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
- GpOffset += 8;
- break;
- case AK_FloatingPoint:
- ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
- if (MS.TrackOrigins)
- OriginBase =
- getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
- FpOffset += 16;
- break;
- case AK_Memory:
- if (IsFixed)
- continue;
- uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- ShadowBase =
- getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
- if (MS.TrackOrigins)
- OriginBase =
- getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += alignTo(ArgSize, 8);
+ case AK_GeneralPurpose:
+ ShadowBase =
+ getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
+ if (MS.TrackOrigins)
+ OriginBase = getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
+ GpOffset += 8;
+ break;
+ case AK_FloatingPoint:
+ ShadowBase =
+ getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
+ if (MS.TrackOrigins)
+ OriginBase = getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
+ FpOffset += 16;
+ break;
+ case AK_Memory:
+ if (IsFixed)
+ continue;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ ShadowBase =
+ getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
+ if (MS.TrackOrigins)
+ OriginBase =
+ getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
+ OverflowOffset += alignTo(ArgSize, 8);
}
// Take fixed arguments into account for GpOffset and FpOffset,
// but don't actually store shadows for them.
@@ -4319,7 +4728,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
}
}
Constant *OverflowSize =
- ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
+ ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
@@ -4371,7 +4780,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
}
void visitVACopyInst(VACopyInst &I) override {
- if (F.getCallingConv() == CallingConv::Win64) return;
+ if (F.getCallingConv() == CallingConv::Win64)
+ return;
unpoisonVAListTagForInst(I);
}
@@ -4384,9 +4794,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
IRBuilder<> IRB(MSV.FnPrologueEnd);
VAArgOverflowSize =
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
- Value *CopySize =
- IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
- VAArgOverflowSize);
+ Value *CopySize = IRB.CreateAdd(
+ ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
if (MS.TrackOrigins) {
@@ -4400,7 +4809,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
- IRBuilder<> IRB(OrigInst->getNextNode());
+ NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
@@ -4453,24 +4862,23 @@ struct VarArgMIPS64Helper : public VarArgHelper {
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<CallInst *, 16> VAStartInstrumentationList;
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV) {}
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (auto ArgIt = CB.arg_begin() + CB.getFunctionType()->getNumParams(),
- End = CB.arg_end();
- ArgIt != End; ++ArgIt) {
+ for (Value *A :
+ llvm::drop_begin(CB.args(), CB.getFunctionType()->getNumParams())) {
Triple TargetTriple(F.getParent()->getTargetTriple());
- Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
if (TargetTriple.getArch() == Triple::mips64) {
- // Adjusting the shadow for argument with size < 8 to match the placement
- // of bits in big endian system
+ // Adjusting the shadow for argument with size < 8 to match the
+ // placement of bits in big endian system
if (ArgSize < 8)
VAArgOffset += (8 - ArgSize);
}
@@ -4529,8 +4937,8 @@ struct VarArgMIPS64Helper : public VarArgHelper {
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.FnPrologueEnd);
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
- Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
- VAArgSize);
+ Value *CopySize =
+ IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
@@ -4543,7 +4951,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
- IRBuilder<> IRB(OrigInst->getNextNode());
+ NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
@@ -4571,8 +4979,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
// Make VR space aligned to 16 bytes.
static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
- static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
- + kAArch64VrArgSize;
+ static const unsigned AArch64VrEndOffset =
+ AArch64VrBegOffset + kAArch64VrArgSize;
static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
Function &F;
@@ -4581,19 +4989,20 @@ struct VarArgAArch64Helper : public VarArgHelper {
Value *VAArgTLSCopy = nullptr;
Value *VAArgOverflowSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<CallInst *, 16> VAStartInstrumentationList;
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV) {}
- ArgKind classifyArgument(Value* arg) {
+ ArgKind classifyArgument(Value *arg) {
Type *T = arg->getType();
if (T->isFPOrFPVectorTy())
return AK_FloatingPoint;
- if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
- || (T->isPointerTy()))
+ if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) ||
+ (T->isPointerTy()))
return AK_GeneralPurpose;
return AK_Memory;
}
@@ -4613,10 +5022,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
unsigned OverflowOffset = AArch64VAEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
- ++ArgIt) {
- Value *A = *ArgIt;
- unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
@@ -4625,24 +5031,24 @@ struct VarArgAArch64Helper : public VarArgHelper {
AK = AK_Memory;
Value *Base;
switch (AK) {
- case AK_GeneralPurpose:
- Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
- GrOffset += 8;
- break;
- case AK_FloatingPoint:
- Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
- VrOffset += 16;
- break;
- case AK_Memory:
- // Don't count fixed arguments in the overflow area - va_start will
- // skip right over them.
- if (IsFixed)
- continue;
- uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
- alignTo(ArgSize, 8));
- OverflowOffset += alignTo(ArgSize, 8);
- break;
+ case AK_GeneralPurpose:
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
+ GrOffset += 8;
+ break;
+ case AK_FloatingPoint:
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
+ VrOffset += 16;
+ break;
+ case AK_Memory:
+ // Don't count fixed arguments in the overflow area - va_start will
+ // skip right over them.
+ if (IsFixed)
+ continue;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
+ alignTo(ArgSize, 8));
+ OverflowOffset += alignTo(ArgSize, 8);
+ break;
}
// Count Gp/Vr fixed arguments to their respective offsets, but don't
// bother to actually store a shadow.
@@ -4653,7 +5059,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
- ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
+ ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
}
@@ -4694,9 +5100,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
}
// Retrieve a va_list field of 'void*' size.
- Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtrPtr =
- IRB.CreateIntToPtr(
+ Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
+ Value *SaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt64PtrTy(*MS.C));
@@ -4704,9 +5109,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
}
// Retrieve a va_list field of 'int' size.
- Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
- Value *SaveAreaPtr =
- IRB.CreateIntToPtr(
+ Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
+ Value *SaveAreaPtr = IRB.CreateIntToPtr(
IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
ConstantInt::get(MS.IntptrTy, offset)),
Type::getInt32PtrTy(*MS.C));
@@ -4723,9 +5127,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
IRBuilder<> IRB(MSV.FnPrologueEnd);
VAArgOverflowSize =
IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
- Value *CopySize =
- IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
- VAArgOverflowSize);
+ Value *CopySize = IRB.CreateAdd(
+ ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
}
@@ -4737,7 +5140,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
// the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
- IRBuilder<> IRB(OrigInst->getNextNode());
+ NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
@@ -4774,7 +5177,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
// '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
// argument by ignoring the bytes of shadow from named arguments.
Value *GrRegSaveAreaShadowPtrOff =
- IRB.CreateAdd(GrArgSize, GrOffSaveArea);
+ IRB.CreateAdd(GrArgSize, GrOffSaveArea);
Value *GrRegSaveAreaShadowPtr =
MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
@@ -4798,10 +5201,10 @@ struct VarArgAArch64Helper : public VarArgHelper {
.first;
Value *VrSrcPtr = IRB.CreateInBoundsGEP(
- IRB.getInt8Ty(),
- IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
- IRB.getInt32(AArch64VrBegOffset)),
- VrRegSaveAreaShadowPtrOff);
+ IRB.getInt8Ty(),
+ IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
+ IRB.getInt32(AArch64VrBegOffset)),
+ VrRegSaveAreaShadowPtrOff);
Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
@@ -4813,9 +5216,8 @@ struct VarArgAArch64Helper : public VarArgHelper {
Align(16), /*isStore*/ true)
.first;
- Value *StackSrcPtr =
- IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
- IRB.getInt32(AArch64VAEndOffset));
+ Value *StackSrcPtr = IRB.CreateInBoundsGEP(
+ IRB.getInt8Ty(), VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset));
IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
Align(16), VAArgOverflowSize);
@@ -4831,10 +5233,11 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
Value *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
- SmallVector<CallInst*, 16> VAStartInstrumentationList;
+ SmallVector<CallInst *, 16> VAStartInstrumentationList;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV) {}
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
@@ -4854,10 +5257,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
VAArgBase = 32;
unsigned VAArgOffset = VAArgBase;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
- ++ArgIt) {
- Value *A = *ArgIt;
- unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
@@ -4918,8 +5318,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
VAArgBase = VAArgOffset;
}
- Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
- VAArgOffset - VAArgBase);
+ Constant *TotalVAArgSize =
+ ConstantInt::get(IRB.getInt64Ty(), VAArgOffset - VAArgBase);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
@@ -4967,8 +5367,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.FnPrologueEnd);
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
- Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
- VAArgSize);
+ Value *CopySize =
+ IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize);
if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
@@ -4981,7 +5381,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
// Copy va_list shadow from the backup copy of the TLS contents.
for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
CallInst *OrigInst = VAStartInstrumentationList[i];
- IRBuilder<> IRB(OrigInst->getNextNode());
+ NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
Value *RegSaveAreaPtrPtr =
@@ -5082,10 +5482,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
unsigned VrIndex = 0;
unsigned OverflowOffset = SystemZOverflowOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (auto ArgIt = CB.arg_begin(), End = CB.arg_end(); ArgIt != End;
- ++ArgIt) {
- Value *A = *ArgIt;
- unsigned ArgNo = CB.getArgOperandNo(ArgIt);
+ for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
// SystemZABIInfo does not produce ByVal parameters.
assert(!CB.paramHasAttr(ArgNo, Attribute::ByVal));
@@ -5304,7 +5701,7 @@ struct VarArgSystemZHelper : public VarArgHelper {
for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size();
VaStartNo < VaStartNum; VaStartNo++) {
CallInst *OrigInst = VAStartInstrumentationList[VaStartNo];
- IRBuilder<> IRB(OrigInst->getNextNode());
+ NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
copyRegSaveArea(IRB, VAListTag);
copyOverflowArea(IRB, VAListTag);
@@ -5357,13 +5754,9 @@ bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
MemorySanitizerVisitor Visitor(F, *this, TLI);
- // Clear out readonly/readnone attributes.
+ // Clear out memory attributes.
AttributeMask B;
- B.addAttribute(Attribute::ReadOnly)
- .addAttribute(Attribute::ReadNone)
- .addAttribute(Attribute::WriteOnly)
- .addAttribute(Attribute::ArgMemOnly)
- .addAttribute(Attribute::Speculatable);
+ B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
F.removeFnAttrs(B);
return Visitor.runOnFunction();