summaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2020-07-26 19:36:28 +0000
commitcfca06d7963fa0909f90483b42a6d7d194d01e08 (patch)
tree209fb2a2d68f8f277793fc8df46c753d31bc853b /llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
parent706b4fc47bbc608932d3b491ae19a3b9cde9497b (diff)
Notes
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp')
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp82
1 files changed, 65 insertions, 17 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 9b7edad3444be..c911b37afac7e 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -68,6 +68,14 @@ static cl::opt<bool> ClInstrumentAtomics(
static cl::opt<bool> ClInstrumentMemIntrinsics(
"tsan-instrument-memintrinsics", cl::init(true),
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
+static cl::opt<bool> ClDistinguishVolatile(
+ "tsan-distinguish-volatile", cl::init(false),
+ cl::desc("Emit special instrumentation for accesses to volatiles"),
+ cl::Hidden);
+static cl::opt<bool> ClInstrumentReadBeforeWrite(
+ "tsan-instrument-read-before-write", cl::init(false),
+ cl::desc("Do not eliminate read instrumentation for read-before-writes"),
+ cl::Hidden);
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
@@ -118,6 +126,10 @@ private:
FunctionCallee TsanWrite[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
+ FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
+ FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
+ FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
+ FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
@@ -131,7 +143,9 @@ private:
};
struct ThreadSanitizerLegacyPass : FunctionPass {
- ThreadSanitizerLegacyPass() : FunctionPass(ID) {}
+ ThreadSanitizerLegacyPass() : FunctionPass(ID) {
+ initializeThreadSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
StringRef getPassName() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
@@ -236,6 +250,24 @@ void ThreadSanitizer::initialize(Module &M) {
TsanUnalignedWrite[i] = M.getOrInsertFunction(
UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+ SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
+ TsanVolatileRead[i] = M.getOrInsertFunction(
+ VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+
+ SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
+ TsanVolatileWrite[i] = M.getOrInsertFunction(
+ VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+
+ SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
+ ByteSizeStr);
+ TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
+ UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+
+ SmallString<64> UnalignedVolatileWriteName(
+ "__tsan_unaligned_volatile_write" + ByteSizeStr);
+ TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
+ UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
+
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
@@ -246,28 +278,28 @@ void ThreadSanitizer::initialize(Module &M) {
TsanAtomicStore[i] = M.getOrInsertFunction(
AtomicStoreName, Attr, IRB.getVoidTy(), PtrTy, Ty, OrdTy);
- for (int op = AtomicRMWInst::FIRST_BINOP;
- op <= AtomicRMWInst::LAST_BINOP; ++op) {
- TsanAtomicRMW[op][i] = nullptr;
+ for (unsigned Op = AtomicRMWInst::FIRST_BINOP;
+ Op <= AtomicRMWInst::LAST_BINOP; ++Op) {
+ TsanAtomicRMW[Op][i] = nullptr;
const char *NamePart = nullptr;
- if (op == AtomicRMWInst::Xchg)
+ if (Op == AtomicRMWInst::Xchg)
NamePart = "_exchange";
- else if (op == AtomicRMWInst::Add)
+ else if (Op == AtomicRMWInst::Add)
NamePart = "_fetch_add";
- else if (op == AtomicRMWInst::Sub)
+ else if (Op == AtomicRMWInst::Sub)
NamePart = "_fetch_sub";
- else if (op == AtomicRMWInst::And)
+ else if (Op == AtomicRMWInst::And)
NamePart = "_fetch_and";
- else if (op == AtomicRMWInst::Or)
+ else if (Op == AtomicRMWInst::Or)
NamePart = "_fetch_or";
- else if (op == AtomicRMWInst::Xor)
+ else if (Op == AtomicRMWInst::Xor)
NamePart = "_fetch_xor";
- else if (op == AtomicRMWInst::Nand)
+ else if (Op == AtomicRMWInst::Nand)
NamePart = "_fetch_nand";
else
continue;
SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
- TsanAtomicRMW[op][i] =
+ TsanAtomicRMW[Op][i] =
M.getOrInsertFunction(RMWName, Attr, Ty, PtrTy, Ty, OrdTy);
}
@@ -385,7 +417,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
Value *Addr = Load->getPointerOperand();
if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
continue;
- if (WriteTargets.count(Addr)) {
+ if (!ClInstrumentReadBeforeWrite && WriteTargets.count(Addr)) {
// We will write to this temp, so no reason to analyze the read.
NumOmittedReadsBeforeWrite++;
continue;
@@ -441,6 +473,11 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
// the module constructor.
if (F.getName() == kTsanModuleCtorName)
return false;
+ // Naked functions can not have prologue/epilogue
+ // (__tsan_func_entry/__tsan_func_exit) generated, so don't instrument them at
+ // all.
+ if (F.hasFnAttribute(Attribute::Naked))
+ return false;
initialize(*F.getParent());
SmallVector<Instruction*, 8> AllLoadsAndStores;
SmallVector<Instruction*, 8> LocalLoadsAndStores;
@@ -560,13 +597,24 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
const unsigned Alignment = IsWrite
? cast<StoreInst>(I)->getAlignment()
: cast<LoadInst>(I)->getAlignment();
+ const bool IsVolatile =
+ ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile()
+ : cast<LoadInst>(I)->isVolatile());
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
FunctionCallee OnAccessFunc = nullptr;
- if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
- OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
- else
- OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
+ if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
+ if (IsVolatile)
+ OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
+ else
+ OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ } else {
+ if (IsVolatile)
+ OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
+ : TsanUnalignedVolatileRead[Idx];
+ else
+ OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
+ }
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++;
else NumInstrumentedReads++;