summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/Makefile.old4
-rw-r--r--lib/tsan/rtl/tsan_clock.cc454
-rw-r--r--lib/tsan/rtl/tsan_clock.h95
-rw-r--r--lib/tsan/rtl/tsan_defs.h30
-rw-r--r--lib/tsan/rtl/tsan_dense_alloc.h137
-rw-r--r--lib/tsan/rtl/tsan_fd.cc26
-rw-r--r--lib/tsan/rtl/tsan_fd.h4
-rw-r--r--lib/tsan/rtl/tsan_flags.cc82
-rw-r--r--lib/tsan/rtl/tsan_flags.h24
-rw-r--r--lib/tsan/rtl/tsan_ignoreset.cc47
-rw-r--r--lib/tsan/rtl/tsan_ignoreset.h38
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc1140
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.cc35
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc470
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.h205
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc166
-rw-r--r--lib/tsan/rtl/tsan_interface_java.h7
-rw-r--r--lib/tsan/rtl/tsan_md5.cc14
-rw-r--r--lib/tsan/rtl/tsan_mman.cc174
-rw-r--r--lib/tsan/rtl/tsan_mman.h10
-rw-r--r--lib/tsan/rtl/tsan_mutex.cc53
-rw-r--r--lib/tsan/rtl/tsan_mutex.h10
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h11
-rw-r--r--lib/tsan/rtl/tsan_platform.h320
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc336
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc57
-rw-r--r--lib/tsan/rtl/tsan_platform_windows.cc12
-rw-r--r--lib/tsan/rtl/tsan_report.cc182
-rw-r--r--lib/tsan/rtl/tsan_report.h40
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc522
-rw-r--r--lib/tsan/rtl/tsan_rtl.h322
-rw-r--r--lib/tsan/rtl/tsan_rtl_amd64.S271
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc297
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc454
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc123
-rw-r--r--lib/tsan/rtl/tsan_stack_trace.cc46
-rw-r--r--lib/tsan/rtl/tsan_stack_trace.h39
-rw-r--r--lib/tsan/rtl/tsan_stat.cc354
-rw-r--r--lib/tsan/rtl/tsan_stat.h355
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc100
-rw-r--r--lib/tsan/rtl/tsan_suppressions.h1
-rw-r--r--lib/tsan/rtl/tsan_symbolize.cc104
-rw-r--r--lib/tsan/rtl/tsan_symbolize.h8
-rw-r--r--lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc193
-rw-r--r--lib/tsan/rtl/tsan_sync.cc419
-rw-r--r--lib/tsan/rtl/tsan_sync.h100
-rw-r--r--lib/tsan/rtl/tsan_trace.h22
-rw-r--r--lib/tsan/rtl/tsan_update_shadow_word_inl.h13
-rw-r--r--lib/tsan/rtl/tsan_vector.h22
49 files changed, 4259 insertions, 3689 deletions
diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old
index 33944ffe96754..79c761ce3f4e2 100644
--- a/lib/tsan/rtl/Makefile.old
+++ b/lib/tsan/rtl/Makefile.old
@@ -1,4 +1,4 @@
-CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+CXXFLAGS = -std=c++11 -fPIE -g -Wall -Werror -fno-builtin -msse3 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
CLANG=clang
ifeq ($(DEBUG), 0)
CXXFLAGS += -O3
@@ -49,6 +49,8 @@ LIBTSAN_OBJ=$(patsubst %.cc,%.o,$(LIBTSAN_SRC)) \
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
%.o: $(COMMON)/%.cc Makefile.old $(LIBTSAN_HEADERS)
$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
+%.o: %.S
+ $(CXX) $(INCLUDES) -o $@ -c $<
libtsan.a: $(LIBTSAN_OBJ)
ar ru $@ $(LIBTSAN_OBJ)
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index f8745ec3200c9..f2b39a182b39e 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -12,100 +12,418 @@
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
-// It's possible to optimize clock operations for some important cases
-// so that they are O(1). The cases include singletons, once's, local mutexes.
-// First, SyncClock must be re-implemented to allow indexing by tid.
-// It must not necessarily be a full vector clock, though. For example it may
-// be a multi-level table.
-// Then, each slot in SyncClock must contain a dirty bit (it's united with
-// the clock value, so no space increase). The acquire algorithm looks
-// as follows:
-// void acquire(thr, tid, thr_clock, sync_clock) {
-// if (!sync_clock[tid].dirty)
-// return; // No new info to acquire.
-// // This handles constant reads of singleton pointers and
-// // stop-flags.
-// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
-// sync_clock[tid].dirty = false;
-// sync_clock.dirty_count--;
-// }
-// The release operation looks as follows:
-// void release(thr, tid, thr_clock, sync_clock) {
-// // thr->sync_cache is a simple fixed-size hash-based cache that holds
-// // several previous sync_clock's.
-// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
-// // The thread did no acquire operations since last release on this clock.
-// // So update only the thread's slot (other slots can't possibly change).
-// sync_clock[tid].clock = thr->epoch;
-// if (sync_clock.dirty_count == sync_clock.cnt
-// || (sync_clock.dirty_count == sync_clock.cnt - 1
-// && sync_clock[tid].dirty == false))
-// // All dirty flags are set, bail out.
-// return;
-// set all dirty bits, but preserve the thread's bit. // O(N)
-// update sync_clock.dirty_count;
-// return;
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
// }
-// release_impl(thr_clock, sync_clock); // As usual, O(N).
-// set all dirty bits, but preserve the thread's bit.
-// // The previous step is combined with release_impl(), so that
-// // we scan the arrays only once.
-// update sync_clock.dirty_count;
-// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
+// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cc.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of ThreadClock state:
+// clk_ - fixed size vector clock.
+// nclk_ - effective size of the vector clock (the rest is zeros).
+// tid_ - index of the thread associated with he clock ("current thread").
+// last_acquire_ - current thread time when it acquired something from
+// other threads.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly dirty_tids_).
+// dirty_tids_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#ifndef SANITIZER_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
namespace __tsan {
-ThreadClock::ThreadClock() {
- nclk_ = 0;
- for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
- clk_[i] = 0;
+const unsigned kInvalidTid = (unsigned)-1;
+
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) { // 0 has special meaning
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ last_acquire_ = 0;
+ internal_memset(clk_, 0, sizeof(clk_));
+ clk_[tid_].reused = reused_;
}
-void ThreadClock::acquire(const SyncClock *src) {
+void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(src->clk_.Size() <= kMaxTid);
+ DCHECK(src->size_ <= kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
- const uptr nclk = src->clk_.Size();
- if (nclk == 0)
+ // Check if it's empty -> no need to do anything.
+ const uptr nclk = src->size_;
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
return;
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ bool acquired = false;
+ if (nclk > tid_) {
+ CPP_STAT_INC(StatClockAcquireLarge);
+ if (src->elem(tid_).reused == reused_) {
+ CPP_STAT_INC(StatClockAcquireRepeat);
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ u64 epoch = src->elem(tid).epoch;
+ if (clk_[tid].epoch < epoch) {
+ clk_[tid].epoch = epoch;
+ acquired = true;
+ }
+ }
+ }
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
+ }
+ return;
+ }
+ }
+
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
- if (clk_[i] < src->clk_[i])
- clk_[i] = src->clk_[i];
+ u64 epoch = src->elem(i).epoch;
+ if (clk_[i].epoch < epoch) {
+ clk_[i].epoch = epoch;
+ acquired = true;
+ }
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
}
}
-void ThreadClock::release(SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+
+ if (dst->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->elem(tid_).
+ if (dst->elem(tid_).epoch > last_acquire_) {
+ UpdateCurrentThread(dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
+
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
- if (dst->clk_[i] < clk_[i])
- dst->clk_[i] = clk_[i];
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = max(ce.epoch, clk_[i].epoch);
+ ce.reused = 0;
}
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->size_)
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ for (uptr i = nclk_; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::ReleaseStore(SyncClock *dst) const {
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+ DCHECK(dst->size_ <= kMaxTid);
+ CPP_STAT_INC(StatClockStore);
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
- for (uptr i = 0; i < nclk_; i++)
- dst->clk_[i] = clk_[i];
- for (uptr i = nclk_; i < dst->clk_.Size(); i++)
- dst->clk_[i] = 0;
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ dst->elem(tid_).epoch > last_acquire_) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ for (uptr i = 0; i < nclk_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = clk_[i].epoch;
+ ce.reused = 0;
+ }
+ // Clear the tail of dst->clk_.
+ if (nclk_ < dst->size_) {
+ for (uptr i = nclk_; i < dst->size_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = 0;
+ ce.reused = 0;
+ }
+ CPP_STAT_INC(StatClockStoreTail);
+ }
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::acq_rel(SyncClock *dst) {
- acquire(dst);
- release(dst);
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ dst->elem(tid_).epoch = clk_[tid_].epoch;
+
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dst->dirty_tids_[i] == tid_) {
+ CPP_STAT_INC(StatClockReleaseFast1);
+ return;
+ }
+ if (dst->dirty_tids_[i] == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast2);
+ dst->dirty_tids_[i] = tid_;
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ CPP_STAT_INC(StatClockReleaseSlow);
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+}
+
+// Checks whether the current threads has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->elem(tid_).reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ if (clk_[tid].epoch < src->elem(tid).epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid].epoch);
+ clk_[tid].epoch = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_].epoch;
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ printf("] tid=%u/%u last_acq=%llu",
+ tid_, reused_, last_acquire_);
}
SyncClock::SyncClock()
- : clk_(MBlockClock) {
+ : release_store_tid_(kInvalidTid)
+ , release_store_reused_()
+ , tab_()
+ , tab_idx_()
+ , size_() {
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_ == 0) {
+ // nothing
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // One-level table.
+ ctx->clock_alloc.Free(c, tab_idx_);
+ } else {
+ // Two-level table.
+ for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
+ ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
+ ctx->clock_alloc.Free(c, tab_idx_);
+ }
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ if (size_ <= ClockBlock::kClockCount)
+ return tab_->clock[tid];
+ u32 idx = tab_->table[tid / ClockBlock::kClockCount];
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid % ClockBlock::kClockCount];
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
+ release_store_tid_, release_store_reused_,
+ dirty_tids_[0], dirty_tids_[1]);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_clock.h b/lib/tsan/rtl/tsan_clock.h
index 0ee93749b881d..4e352cb81d110 100644
--- a/lib/tsan/rtl/tsan_clock.h
+++ b/lib/tsan/rtl/tsan_clock.h
@@ -14,65 +14,114 @@
#define TSAN_CLOCK_H
#include "tsan_defs.h"
-#include "tsan_vector.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits;
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
+ ~SyncClock();
uptr size() const {
- return clk_.Size();
+ return size_;
}
- void Reset() {
- clk_.Reset();
+ u64 get(unsigned tid) const {
+ return elem(tid).epoch;
}
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
private:
- Vector<u64> clk_;
friend struct ThreadClock;
+ static const uptr kDirtyTids = 2;
+
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ unsigned dirty_tids_[kDirtyTids];
+ // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
+ // If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
+ // Otherwise, tab_ points to an array with 128 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u32 size_;
+
+ ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
struct ThreadClock {
public:
- ThreadClock();
+ typedef DenseSlabAllocCache Cache;
+
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
+ return clk_[tid].epoch;
}
- void set(unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void set(unsigned tid, u64 v);
+
+ void set(u64 v) {
+ DCHECK_GE(v, clk_[tid_].epoch);
+ clk_[tid_].epoch = v;
}
- void tick(unsigned tid) {
- DCHECK_LT(tid, kMaxTid);
- clk_[tid]++;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void tick() {
+ clk_[tid_].epoch++;
}
uptr size() const {
return nclk_;
}
- void acquire(const SyncClock *src);
- void release(SyncClock *dst) const;
- void acq_rel(SyncClock *dst);
- void ReleaseStore(SyncClock *dst) const;
+ void acquire(ClockCache *c, const SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst) const;
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst) const;
+
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ const unsigned tid_;
+ const unsigned reused_;
+ u64 last_acquire_;
uptr nclk_;
- u64 clk_[kMaxTidInClock];
+ ClockElem clk_[kMaxTidInClock];
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ void UpdateCurrentThread(SyncClock *dst) const;
};
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 1e53a8e02f0aa..7ed3796b50126 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -24,7 +24,7 @@
namespace __tsan {
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
const bool kGoMode = true;
const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
@@ -41,8 +41,8 @@ const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
-const uptr kTraceStackSize = 256;
#ifdef TSAN_SHADOW_COUNT
# if TSAN_SHADOW_COUNT == 2 \
@@ -53,6 +53,7 @@ const uptr kShadowCnt = TSAN_SHADOW_COUNT;
# endif
#else
// Count of shadow values in a shadow cell.
+#define TSAN_SHADOW_COUNT 4
const uptr kShadowCnt = 4;
#endif
@@ -65,6 +66,19 @@ const uptr kShadowSize = 8;
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
const bool kCollectStats = true;
#else
@@ -154,12 +168,20 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
struct ThreadState;
+class ThreadContext;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
-class StackTrace;
-struct MBlock;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_dense_alloc.h b/lib/tsan/rtl/tsan_dense_alloc.h
new file mode 100644
index 0000000000000..a1cf84b8f1661
--- /dev/null
+++ b/lib/tsan/rtl/tsan_dense_alloc.h
@@ -0,0 +1,137 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ DenseSlabAlloc() {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Die();
+ }
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T();
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc
index 86db119fc9181..d18502f005403 100644
--- a/lib/tsan/rtl/tsan_fd.cc
+++ b/lib/tsan/rtl/tsan_fd.cc
@@ -44,11 +44,12 @@ static FdContext fdctx;
static bool bogusfd(int fd) {
// Apparently a bogus fd value.
- return fd < 0 || fd >= (1 << 30);
+ return fd < 0 || fd >= kTableSize;
}
-static FdSync *allocsync() {
- FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
+ false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -65,10 +66,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
- if (v)
- DestroyAndFree(v);
- internal_free(s);
+ user_free(thr, pc, s, false);
}
}
}
@@ -81,13 +79,13 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size);
+ void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
- user_free(thr, pc, p);
+ user_free(thr, pc, p, false);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
@@ -219,7 +217,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
- FdSync *s = allocsync();
+ FdSync *s = allocsync(thr, pc);
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
@@ -229,7 +227,7 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
@@ -250,7 +248,7 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
@@ -285,13 +283,13 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, &fdctx.socksync);
}
-uptr File2addr(char *path) {
+uptr File2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
-uptr Dir2addr(char *path) {
+uptr Dir2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
diff --git a/lib/tsan/rtl/tsan_fd.h b/lib/tsan/rtl/tsan_fd.h
index 979198e2e74d4..75c616dae0fde 100644
--- a/lib/tsan/rtl/tsan_fd.h
+++ b/lib/tsan/rtl/tsan_fd.h
@@ -57,8 +57,8 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
-uptr File2addr(char *path);
-uptr Dir2addr(char *path);
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index c6f24bf49abd8..5dc331f594697 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -20,46 +20,46 @@
namespace __tsan {
Flags *flags() {
- return &CTX()->flags;
+ return &ctx->flags;
}
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-void OverrideFlags(Flags *f);
extern "C" const char* __tsan_default_options();
#else
-void WEAK OverrideFlags(Flags *f) {
- (void)f;
-}
-extern "C" const char *WEAK __tsan_default_options() {
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+const char *WEAK __tsan_default_options() {
return "";
}
#endif
static void ParseFlags(Flags *f, const char *env) {
- ParseFlag(env, &f->enable_annotations, "enable_annotations");
- ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
- ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
- ParseFlag(env, &f->suppress_java, "suppress_java");
- ParseFlag(env, &f->report_bugs, "report_bugs");
- ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
- ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
- ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
- ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
- ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
- ParseFlag(env, &f->suppressions, "suppressions");
- ParseFlag(env, &f->print_suppressions, "print_suppressions");
- ParseFlag(env, &f->print_benign, "print_benign");
- ParseFlag(env, &f->exitcode, "exitcode");
- ParseFlag(env, &f->halt_on_error, "halt_on_error");
- ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
- ParseFlag(env, &f->profile_memory, "profile_memory");
- ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
- ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
- ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
- ParseFlag(env, &f->stop_on_start, "stop_on_start");
- ParseFlag(env, &f->history_size, "history_size");
- ParseFlag(env, &f->io_sync, "io_sync");
+ ParseFlag(env, &f->enable_annotations, "enable_annotations", "");
+ ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", "");
+ ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", "");
+ ParseFlag(env, &f->report_bugs, "report_bugs", "");
+ ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", "");
+ ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", "");
+ ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", "");
+ ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", "");
+ ParseFlag(env, &f->report_atomic_races, "report_atomic_races", "");
+ ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", "");
+ ParseFlag(env, &f->print_benign, "print_benign", "");
+ ParseFlag(env, &f->exitcode, "exitcode", "");
+ ParseFlag(env, &f->halt_on_error, "halt_on_error", "");
+ ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", "");
+ ParseFlag(env, &f->profile_memory, "profile_memory", "");
+ ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", "");
+ ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", "");
+ ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", "");
+ ParseFlag(env, &f->stop_on_start, "stop_on_start", "");
+ ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", "");
+ ParseFlag(env, &f->history_size, "history_size", "");
+ ParseFlag(env, &f->io_sync, "io_sync", "");
+ ParseFlag(env, &f->die_after_fork, "die_after_fork", "");
+
+ // DDFlags
+ ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", "");
}
void InitializeFlags(Flags *f, const char *env) {
@@ -69,15 +69,13 @@ void InitializeFlags(Flags *f, const char *env) {
f->enable_annotations = true;
f->suppress_equal_stacks = true;
f->suppress_equal_addresses = true;
- f->suppress_java = false;
f->report_bugs = true;
f->report_thread_leaks = true;
f->report_destroy_locked = true;
+ f->report_mutex_bugs = true;
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
- f->suppressions = "";
- f->print_suppressions = false;
f->print_benign = false;
f->exitcode = 66;
f->halt_on_error = false;
@@ -90,19 +88,25 @@ void InitializeFlags(Flags *f, const char *env) {
f->running_on_valgrind = false;
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
f->io_sync = 1;
+ f->die_after_fork = true;
+
+ // DDFlags
+ f->second_deadlock_stack = false;
CommonFlags *cf = common_flags();
- SetCommonFlagDefaults();
- *static_cast<CommonFlags*>(f) = *cf;
+ SetCommonFlagsDefaults(cf);
+ // Override some common flags defaults.
+ cf->allow_addr2line = true;
+ cf->detect_deadlocks = true;
+ cf->print_suppressions = false;
+ cf->stack_trace_format = " #%n %f %S %M";
// Let a frontend override.
- OverrideFlags(f);
ParseFlags(f, __tsan_default_options());
- ParseCommonFlagsFromString(__tsan_default_options());
+ ParseCommonFlagsFromString(cf, __tsan_default_options());
// Override from command line.
ParseFlags(f, env);
- ParseCommonFlagsFromString(env);
- *static_cast<CommonFlags*>(f) = *cf;
+ ParseCommonFlagsFromString(cf, env);
// Sanity check.
if (!f->report_bugs) {
@@ -111,6 +115,8 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
+ if (cf->help) PrintFlagDescriptions();
+
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])\n");
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
index 3916df3cc9e13..621ca139236f9 100644
--- a/lib/tsan/rtl/tsan_flags.h
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -14,34 +14,28 @@
#ifndef TSAN_FLAGS_H
#define TSAN_FLAGS_H
-// ----------- ATTENTION -------------
-// ThreadSanitizer user may provide its implementation of weak
-// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
-// header may be included in the user code, and shouldn't include
-// other headers from TSan or common sanitizer runtime.
-
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
namespace __tsan {
-struct Flags : CommonFlags {
+struct Flags : DDFlags {
// Enable dynamic annotations, otherwise they are no-ops.
bool enable_annotations;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// with the same stack.
bool suppress_equal_stacks;
- // Supress a race report if we've already output another race report
+ // Suppress a race report if we've already output another race report
// on the same address.
bool suppress_equal_addresses;
- // Suppress weird race reports that can be seen if JVM is embed
- // into the process.
- bool suppress_java;
// Turns off bug reporting entirely (useful for benchmarking).
bool report_bugs;
// Report thread leaks at exit?
bool report_thread_leaks;
// Report destruction of a locked mutex?
bool report_destroy_locked;
+ // Report incorrect usages of mutexes and mutex annotations?
+ bool report_mutex_bugs;
// Report violations of async signal-safety
// (e.g. malloc() call from a signal handler).
bool report_signal_unsafe;
@@ -50,10 +44,6 @@ struct Flags : CommonFlags {
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;
- // Suppressions filename.
- const char *suppressions;
- // Print matched suppressions at exit.
- bool print_suppressions;
// Print matched "benign" races at exit.
bool print_benign;
// Override exit status if something was reported.
@@ -87,6 +77,8 @@ struct Flags : CommonFlags {
// 1 - reasonable level of synchronization (write->read)
// 2 - global synchronization of all IO operations
int io_sync;
+ // Die after multi-threaded fork if the child creates new threads.
+ bool die_after_fork;
};
Flags *flags();
diff --git a/lib/tsan/rtl/tsan_ignoreset.cc b/lib/tsan/rtl/tsan_ignoreset.cc
new file mode 100644
index 0000000000000..cdb90d2299809
--- /dev/null
+++ b/lib/tsan/rtl/tsan_ignoreset.cc
@@ -0,0 +1,47 @@
+//===-- tsan_ignoreset.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_ignoreset.h"
+
+namespace __tsan {
+
+const uptr IgnoreSet::kMaxSize;
+
+IgnoreSet::IgnoreSet()
+ : size_() {
+}
+
+void IgnoreSet::Add(u32 stack_id) {
+ if (size_ == kMaxSize)
+ return;
+ for (uptr i = 0; i < size_; i++) {
+ if (stacks_[i] == stack_id)
+ return;
+ }
+ stacks_[size_++] = stack_id;
+}
+
+void IgnoreSet::Reset() {
+ size_ = 0;
+}
+
+uptr IgnoreSet::Size() const {
+ return size_;
+}
+
+u32 IgnoreSet::At(uptr i) const {
+ CHECK_LT(i, size_);
+ CHECK_LE(size_, kMaxSize);
+ return stacks_[i];
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_ignoreset.h b/lib/tsan/rtl/tsan_ignoreset.h
new file mode 100644
index 0000000000000..e747d819c7583
--- /dev/null
+++ b/lib/tsan/rtl/tsan_ignoreset.h
@@ -0,0 +1,38 @@
+//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// IgnoreSet holds a set of stack traces where ignores were enabled.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_IGNORESET_H
+#define TSAN_IGNORESET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class IgnoreSet {
+ public:
+ static const uptr kMaxSize = 16;
+
+ IgnoreSet();
+ void Add(u32 stack_id);
+ void Reset();
+ uptr Size() const;
+ u32 At(uptr i) const;
+
+ private:
+ uptr size_;
+ u32 stacks_[kMaxSize];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_IGNORESET_H
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index ef38f7987cde3..5bede0ec7d0cf 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -29,7 +29,17 @@
using namespace __tsan; // NOLINT
-const int kSigCount = 64;
+#if SANITIZER_FREEBSD
+#define __errno_location __error
+#define __libc_malloc __malloc
+#define __libc_realloc __realloc
+#define __libc_calloc __calloc
+#define __libc_free __free
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+const int kSigCount = 65;
struct my_siginfo_t {
// The size is determined by looking at sizeof of real siginfo_t on linux.
@@ -47,12 +57,13 @@ DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
-extern "C" int pthread_mutexattr_gettype(void *a, int *type);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int pthread_yield();
extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
// REAL(sigfillset) defined in common interceptors.
DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
@@ -61,7 +72,10 @@ extern "C" void *__libc_malloc(uptr size);
extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
+#if !SANITIZER_FREEBSD
extern "C" int mallopt(int param, int value);
+#endif
+extern __sanitizer_FILE *stdout, *stderr;
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
const int EINVAL = 22;
@@ -73,6 +87,7 @@ const int SIGABRT = 6;
const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
+const int SIGTERM = 15;
const int SIGBUS = 7;
const int SIGSYS = 31;
void *const MAP_FAILED = (void*)-1;
@@ -95,9 +110,14 @@ struct sigaction_t {
sighandler_t sa_handler;
void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
__sanitizer_sigset_t sa_mask;
int sa_flags;
void (*sa_restorer)();
+#endif
};
const sighandler_t SIG_DFL = (sighandler_t)0;
@@ -121,9 +141,9 @@ struct SignalDesc {
};
struct SignalContext {
- int in_blocking_func;
int int_signal_send;
- int pending_signal_count;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
};
@@ -135,7 +155,7 @@ static LibIgnore *libignore() {
}
void InitializeLibIgnore() {
- libignore()->Init(*GetSuppressionContext());
+ libignore()->Init(*SuppressionContext::Get());
libignore()->OnLibraryLoaded(0);
}
@@ -143,8 +163,7 @@ void InitializeLibIgnore() {
static SignalContext *SigCtx(ThreadState *thr) {
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
- if (ctx == 0 && thr->is_alive) {
- ScopedInRtl in_rtl;
+ if (ctx == 0 && !thr->is_dead) {
ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
thr->signal_ctx = ctx;
@@ -160,78 +179,92 @@ class ScopedInterceptor {
~ScopedInterceptor();
private:
ThreadState *const thr_;
- const int in_rtl_;
+ const uptr pc_;
bool in_ignored_lib_;
};
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
- , in_rtl_(thr->in_rtl)
+ , pc_(pc)
, in_ignored_lib_(false) {
- if (thr_->in_rtl == 0) {
+ if (!thr_->ignore_interceptors) {
Initialize(thr);
FuncEntry(thr, pc);
- thr_->in_rtl++;
- DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
- } else {
- thr_->in_rtl++;
}
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
- ThreadIgnoreBegin(thr_);
+ ThreadIgnoreBegin(thr_, pc_);
}
}
ScopedInterceptor::~ScopedInterceptor() {
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
- ThreadIgnoreEnd(thr_);
+ ThreadIgnoreEnd(thr_, pc_);
}
- thr_->in_rtl--;
- if (thr_->in_rtl == 0) {
- FuncExit(thr_);
+ if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
}
- CHECK_EQ(in_rtl_, thr_->in_rtl);
}
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
ThreadState *thr = cur_thread(); \
- StatInc(thr, StatInterceptor); \
- StatInc(thr, StatInt_##func); \
const uptr caller_pc = GET_CALLER_PC(); \
ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
if (REAL(func) == 0) { \
- Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
- } \
- if (thr->in_rtl > 1 || thr->in_ignored_lib) \
+ } \
+ if (thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : ctx(SigCtx(thr)) {
- ctx->in_blocking_func++;
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
}
~BlockingCall() {
- ctx->in_blocking_func--;
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
}
+ ThreadState *thr;
SignalContext *ctx;
};
@@ -256,116 +289,78 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
return res;
}
-TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
- SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
- // dlopen will execute global constructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- void *res = REAL(dlopen)(filename, flag);
- thr->in_rtl = 1;
- libignore()->OnLibraryLoaded(filename);
- return res;
-}
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
-TSAN_INTERCEPTOR(int, dlclose, void *handle) {
- SCOPED_INTERCEPTOR_RAW(dlclose, handle);
- // dlclose will execute global destructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- int res = REAL(dlclose)(handle);
- thr->in_rtl = 1;
- libignore()->OnLibraryUnloaded();
- return res;
+static void at_exit_wrapper(void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ __libc_free(ctx);
}
-class AtExitContext {
- public:
- AtExitContext()
- : mtx_(MutexTypeAtExit, StatMtxAtExit)
- , pos_() {
- }
-
- typedef void(*atexit_t)();
-
- int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
- atexit_t f, void *arg) {
- Lock l(&mtx_);
- if (pos_ == kMaxAtExit)
- return 1;
- Release(thr, pc, (uptr)this);
- stack_[pos_] = f;
- args_[pos_] = arg;
- is_on_exits_[pos_] = is_on_exit;
- pos_++;
- return 0;
- }
-
- void exit(ThreadState *thr, uptr pc) {
- CHECK_EQ(thr->in_rtl, 0);
- for (;;) {
- atexit_t f = 0;
- void *arg = 0;
- bool is_on_exit = false;
- {
- Lock l(&mtx_);
- if (pos_) {
- pos_--;
- f = stack_[pos_];
- arg = args_[pos_];
- is_on_exit = is_on_exits_[pos_];
- ScopedInRtl in_rtl;
- Acquire(thr, pc, (uptr)this);
- }
- }
- if (f == 0)
- break;
- DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
- CHECK_EQ(thr->in_rtl, 0);
- if (is_on_exit)
- ((void(*)(int status, void *arg))f)(0, arg);
- else
- ((void(*)(void *arg, void *dso))f)(arg, 0);
- }
- }
-
- private:
- static const int kMaxAtExit = 128;
- Mutex mtx_;
- atexit_t stack_[kMaxAtExit];
- void *args_[kMaxAtExit];
- bool is_on_exits_[kMaxAtExit];
- int pos_;
-};
-
-static AtExitContext *atexit_ctx;
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(atexit, f);
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
}
-TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
}
-TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ __libc_free(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- if (dso) {
- // Memory allocation in __cxa_atexit will race with free during exit,
- // because we do not see synchronization around atexit callback list.
- ThreadIgnoreBegin(thr);
- int res = REAL(__cxa_atexit)(f, arg, dso);
- ThreadIgnoreEnd(thr);
- return res;
- }
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
}
// Cleanup old bufs.
@@ -391,10 +386,21 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
buf->sp = sp;
buf->mangled_sp = mangled_sp;
buf->shadow_stack_pos = thr->shadow_stack_pos;
+ SignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
}
static void LongJmp(ThreadState *thr, uptr *env) {
+#if SANITIZER_FREEBSD
+ uptr mangled_sp = env[2];
+#else
uptr mangled_sp = env[6];
+#endif // SANITIZER_FREEBSD
// Find the saved buf by mangled_sp.
for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
JmpBuf *buf = &thr->jmp_bufs[i];
@@ -403,6 +409,14 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// Unwind the stack.
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
return;
}
@@ -413,7 +427,6 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// FIXME: put everything below into a common extern "C" block?
extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
- ScopedInRtl in_rtl;
SetJmp(cur_thread(), sp, mangled_sp);
}
@@ -540,7 +553,7 @@ TSAN_INTERCEPTOR(void, cfree, void *p) {
TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
- return user_alloc_usable_size(thr, pc, p);
+ return user_alloc_usable_size(p);
}
#define OPERATOR_NEW_BODY(mangled_name) \
@@ -587,21 +600,21 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
user_free(thr, pc, ptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete(void *ptr);
-void operator delete(void *ptr) {
+void operator delete(void *ptr) throw();
+void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(_ZdlPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete[](void *ptr);
-void operator delete[](void *ptr) {
- OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+void operator delete[](void *ptr) throw();
+void operator delete[](void *ptr) throw() {
+ OPERATOR_DELETE_BODY(_ZdaPv);
}
SANITIZER_INTERFACE_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&);
void operator delete(void *ptr, std::nothrow_t const&) {
- OPERATOR_DELETE_BODY(_ZdaPv);
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -635,7 +648,8 @@ TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
int res = 0;
uptr len = 0;
for (; len < n; len++) {
- if ((res = ((unsigned char*)s1)[len] - ((unsigned char*)s2)[len]))
+ if ((res = ((const unsigned char *)s1)[len] -
+ ((const unsigned char *)s2)[len]))
break;
}
MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false);
@@ -643,20 +657,6 @@ TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
return res;
}
-TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
- void *res = REAL(memchr)(s, c, n);
- uptr len = res ? (char*)res - (char*)s + 1 : n;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
- return res;
-}
-
-TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
- MemoryAccessRange(thr, pc, (uptr)s, n, false);
- return REAL(memrchr)(s, c, n);
-}
-
TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
MemoryAccessRange(thr, pc, (uptr)dst, n, true);
@@ -746,6 +746,7 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
int flags, int fd, u64 off) {
SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
@@ -759,6 +760,10 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
}
return res;
}
+#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
+#else
+#define TSAN_MAYBE_INTERCEPT_MMAP64
+#endif
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
@@ -767,21 +772,36 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
sz = RoundUp(sz, GetPageSizeCached());
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
@@ -827,7 +847,6 @@ static void thread_finalize(void *v) {
return;
}
{
- ScopedInRtl in_rtl;
ThreadState *thr = cur_thread();
ThreadFinish(thr);
SignalContext *sctx = thr->signal_ctx;
@@ -852,17 +871,19 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
int tid = 0;
{
ThreadState *thr = cur_thread();
- ScopedInRtl in_rtl;
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(g_thread_finalize_key,
(void *)kPthreadDestructorIterations)) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
+ ThreadIgnoreEnd(thr, 0);
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
pthread_yield();
atomic_store(&p->tid, 0, memory_order_release);
ThreadStart(thr, tid, GetTid());
- CHECK_EQ(thr->in_rtl, 1);
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -875,6 +896,17 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
@@ -882,13 +914,20 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
int detached = 0;
REAL(pthread_attr_getdetachstate)(attr, &detached);
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
- int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
if (res == 0) {
int tid = ThreadCreate(thr, pc, *(uptr*)th, detached);
CHECK_NE(tid, 0);
@@ -904,7 +943,9 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
@@ -921,6 +962,122 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
return res;
}
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
+ cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ CondMutexUnlockCtx arg = {thr, pc, m};
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ int res = call_pthread_cancel_with_cleanup(
+ REAL(pthread_cond_timedwait), cond, m, abstime,
+ (void(*)(void *arg))cond_mutex_unlock, &arg);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
@@ -928,7 +1085,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
bool recursive = false;
if (a) {
int type = 0;
- if (pthread_mutexattr_gettype(a, &type) == 0)
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
recursive = (type == PTHREAD_MUTEX_RECURSIVE
|| type == PTHREAD_MUTEX_RECURSIVE_NP);
}
@@ -952,7 +1109,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
if (res == EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
return res;
}
@@ -996,7 +1153,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1039,7 +1196,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
}
return res;
}
@@ -1066,7 +1223,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1087,23 +1244,6 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
- MemoryWrite(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_destroy)(c);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m,
- void *abstime) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
- MutexUnlock(thr, pc, (uptr)m);
- MemoryRead(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_timedwait)(c, m, abstime);
- MutexLock(thr, pc, (uptr)m);
- return res;
-}
-
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
@@ -1132,19 +1272,13 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
- // Using SCOPED_INTERCEPTOR_RAW, because if we are called from an ignored lib,
- // the user callback must be executed with thr->in_rtl == 0.
if (o == 0 || f == 0)
return EINVAL;
atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
memory_order_relaxed)) {
- const int old_in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
(*f)();
- CHECK_EQ(thr->in_rtl, 0);
- thr->in_rtl = old_in_rtl;
if (!thr->in_ignored_lib)
Release(thr, pc, (uptr)o);
atomic_store(a, 2, memory_order_release);
@@ -1214,73 +1348,135 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
return REAL(__xstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT
+#endif
TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
+ return REAL(stat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
return REAL(__xstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
return REAL(__xstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
return REAL(__xstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_STAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
return REAL(__lxstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
+ return REAL(lstat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
return REAL(__lxstat)(0, path, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
return REAL(__lxstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
return REAL(__lxstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_LSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(0, fd, buf);
+#endif
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(0, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
@@ -1290,6 +1486,7 @@ TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
int fd = REAL(open64)(name, flags, mode);
@@ -1297,6 +1494,10 @@ TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
@@ -1306,6 +1507,7 @@ TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
return fd;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
int fd = REAL(creat64)(name, mode);
@@ -1313,6 +1515,10 @@ TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
TSAN_INTERCEPTOR(int, dup, int oldfd) {
SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
@@ -1338,6 +1544,7 @@ TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
return newfd2;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
int fd = REAL(eventfd)(initval, flags);
@@ -1345,7 +1552,12 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
FdEventCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
if (fd >= 0)
@@ -1355,7 +1567,12 @@ TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
FdSignalCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init, int fake) {
SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
int fd = REAL(inotify_init)(fake);
@@ -1363,7 +1580,12 @@ TSAN_INTERCEPTOR(int, inotify_init, int fake) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
int fd = REAL(inotify_init1)(flags);
@@ -1371,6 +1593,10 @@ TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
@@ -1413,6 +1639,7 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create, int size) {
SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
int fd = REAL(epoll_create)(size);
@@ -1420,7 +1647,12 @@ TSAN_INTERCEPTOR(int, epoll_create, int size) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
int fd = REAL(epoll_create1)(flags);
@@ -1428,6 +1660,10 @@ TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
+#endif
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
@@ -1436,14 +1672,20 @@ TSAN_INTERCEPTOR(int, close, int fd) {
return REAL(close)(fd);
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, __close, int fd) {
SCOPED_TSAN_INTERCEPTOR(__close, fd);
if (fd >= 0)
FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
// glibc guts
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
int fds[64];
@@ -1454,6 +1696,10 @@ TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
}
REAL(__res_iclose)(state, free_addr);
}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
@@ -1509,10 +1755,9 @@ TSAN_INTERCEPTOR(int, unlink, char *path) {
return res;
}
-TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
- SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
- void *res = REAL(fopen)(path, mode);
- Acquire(thr, pc, File2addr(path));
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1521,15 +1766,10 @@ TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
return res;
}
-TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
- SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- void *res = REAL(freopen)(path, mode, stream);
- Acquire(thr, pc, File2addr(path));
+#if !SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1537,19 +1777,10 @@ TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
}
return res;
}
-
-TSAN_INTERCEPTOR(int, fclose, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fclose, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- }
- return REAL(fclose)(stream);
-}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
// libc file streams can call user-supplied functions, see fopencookie.
@@ -1569,17 +1800,16 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
return REAL(fwrite)(p, size, nmemb, f);
}
-TSAN_INTERCEPTOR(int, fflush, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fflush, stream);
- }
- return REAL(fflush)(stream);
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
}
TSAN_INTERCEPTOR(void, abort, int fake) {
SCOPED_TSAN_INTERCEPTOR(abort, fake);
- REAL(fflush)(0);
+ FlushStreams();
REAL(abort)(fake);
}
@@ -1604,6 +1834,7 @@ TSAN_INTERCEPTOR(void*, opendir, char *path) {
return res;
}
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1615,7 +1846,12 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
+#endif
+#if !SANITIZER_FREEBSD
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1625,31 +1861,119 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
+
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig, my_siginfo_t *info, void *uctx) {
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // Need to remember pc before the call, because the handler can reset it.
+ uptr pc = sigact ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ pc += 1; // return address is expected, OutputReport() will undo this
+ if (sigact)
+ sigactions[sig].sa_sigaction(sig, info, uctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, rep, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ return;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ // These are too big for stack.
+ static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
+ REAL(sigfillset)(&emptyset);
+ pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ if (sigactions[sig].sa_handler != SIG_DFL
+ && sigactions[sig].sa_handler != SIG_IGN) {
+ CallUserSignalHandler(thr, false, true, signal->sigaction,
+ sig, &signal->siginfo, &signal->ctx);
+ }
+ }
+ }
+ pthread_sigmask(SIG_SETMASK, &oldset, 0);
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(SignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
my_siginfo_t *info, void *ctx) {
ThreadState *thr = cur_thread();
SignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
// Don't mess with synchronous signals.
- if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send) ||
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
- int in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
- CHECK_EQ(thr->in_signal_handler, false);
- thr->in_signal_handler = true;
- if (sigact)
- sigactions[sig].sa_sigaction(sig, info, ctx);
- else
- sigactions[sig].sa_handler(sig);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
- thr->in_rtl = in_rtl;
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ // We ignore interceptors in blocking functions,
+ // temporary enbled them again while we are calling user function.
+ int const i = thr->ignore_interceptors;
+ thr->ignore_interceptors = 0;
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+ thr->ignore_interceptors = i;
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
}
@@ -1663,7 +1987,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
internal_memcpy(&signal->siginfo, info, sizeof(*info));
if (ctx)
internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- sctx->pending_signal_count++;
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
}
}
@@ -1691,6 +2015,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
else
newact.sa_handler = rtl_sighandler;
}
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
int res = REAL(sigaction)(sig, &newact, 0);
return res;
}
@@ -1768,61 +2093,52 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// We miss atomic synchronization in getaddrinfo,
// and can report false race between malloc and free
// inside of getaddrinfo. So ignore memory accesses.
- ThreadIgnoreBegin(thr);
- // getaddrinfo calls fopen, which can be intercepted by user.
- thr->in_rtl--;
- CHECK_EQ(thr->in_rtl, 0);
+ ThreadIgnoreBegin(thr, pc);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- thr->in_rtl++;
- ThreadIgnoreEnd(thr);
+ ThreadIgnoreEnd(thr, pc);
return res;
}
-// Linux kernel has a bug that leads to kernel deadlock if a process
-// maps TBs of memory and then calls mlock().
-static void MlockIsUnsupported() {
- static atomic_uint8_t printed;
- if (atomic_exchange(&printed, 1, memory_order_relaxed))
- return;
- if (flags()->verbosity > 0)
- Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
-}
-
-TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
-}
-
TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (cur_thread()->in_symbolizer)
+ return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
int pid = REAL(fork)(fake);
if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
return pid;
}
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
static int OnExit(ThreadState *thr) {
int status = Finalize(thr);
- REAL(fflush)(0);
+ FlushStreams();
return status;
}
@@ -1832,20 +2148,31 @@ struct TsanInterceptorContext {
const uptr pc;
};
+static void HandleRecvmsg(ThreadState *thr, uptr pc,
+ __sanitizer_msghdr *msg) {
+ int fds[64];
+ int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++)
+ FdEventCreate(thr, pc, fds[i]);
+}
+
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Causes interceptor recursion (getpwuid_r() calls fopen())
-#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
// Causes interceptor recursion (getaddrinfo() and fopen())
#undef SANITIZER_INTERCEPT_GETADDRINFO
-#undef SANITIZER_INTERCEPT_GETNAMEINFO
-// Causes interceptor recursion (glob64() calls lstat64())
-#undef SANITIZER_INTERCEPT_GLOB
+// There interceptors do not seem to be strictly necessary for tsan.
+// But we see cases where the interceptors consume 70% of execution time.
+// Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
+// First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
+// function "writes to" the buffer. Then, the same memory is "written to"
+// twice, first as buf and then as pwbufp (both of them refer to the same
+// addresses).
+#undef SANITIZER_INTERCEPT_GETPWENT
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -1863,6 +2190,31 @@ struct TsanInterceptorContext {
ctx = (void *)&_ctx; \
(void) ctx;
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
@@ -1879,7 +2231,7 @@ struct TsanInterceptorContext {
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- CTX()->thread_registry->SetThreadNameByUserId(thread, name)
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
@@ -1898,10 +2250,16 @@ struct TsanInterceptorContext {
MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, msg)
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
ScopedSyscall scoped_syscall(thr) \
/**/
@@ -1910,15 +2268,11 @@ struct ScopedSyscall {
explicit ScopedSyscall(ThreadState *thr)
: thr(thr) {
- if (thr->in_rtl == 0)
- Initialize(thr);
- thr->in_rtl++;
+ Initialize(thr);
}
~ScopedSyscall() {
- thr->in_rtl--;
- if (thr->in_rtl == 0)
- ProcessPendingSignals(thr);
+ ProcessPendingSignals(thr);
}
};
@@ -1927,131 +2281,131 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
MemoryAccessRange(thr, pc, p, s, write);
}
+static void syscall_acquire(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Acquire(thr, pc, addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
+}
+
+static void syscall_release(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_release(%p)\n", addr);
+ Release(thr, pc, addr);
+}
+
static void syscall_fd_close(uptr pc, int fd) {
TSAN_SYSCALL();
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ FdClose(thr, pc, fd);
+}
+
+static USED void syscall_fd_acquire(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdAcquire(thr, pc, fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
+}
+
+static USED void syscall_fd_release(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_fd_release(%p)\n", fd);
+ FdRelease(thr, pc, fd);
}
static void syscall_pre_fork(uptr pc) {
TSAN_SYSCALL();
+ ForkBefore(thr, pc);
}
-static void syscall_post_fork(uptr pc, int res) {
+static void syscall_post_fork(uptr pc, int pid) {
TSAN_SYSCALL();
- if (res == 0) {
+ if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
- } else if (res > 0) {
+ } else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
}
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
+
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
+
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
(void)(p); \
(void)(s); \
} while (false)
+
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
do { \
(void)(p); \
(void)(s); \
} while (false)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_RELEASE(addr) \
+ syscall_release(GET_CALLER_PC(), (uptr)(addr))
+
#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
+
#define COMMON_SYSCALL_PRE_FORK() \
syscall_pre_fork(GET_CALLER_PC())
+
#define COMMON_SYSCALL_POST_FORK(res) \
syscall_post_fork(GET_CALLER_PC(), res)
+
#include "sanitizer_common/sanitizer_common_syscalls.inc"
namespace __tsan {
static void finalize(void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- atexit_ctx->exit(thr, pc);
int status = Finalize(thr);
- REAL(fflush)(0);
+ // Make sure the output is not lost.
+ FlushStreams();
if (status)
REAL(_exit)(status);
}
-void ProcessPendingSignals(ThreadState *thr) {
- CHECK_EQ(thr->in_rtl, 0);
- SignalContext *sctx = SigCtx(thr);
- if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
- return;
- Context *ctx = CTX();
- thr->in_signal_handler = true;
- sctx->pending_signal_count = 0;
- // These are too big for stack.
- static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
- REAL(sigfillset)(&emptyset);
- pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
- for (int sig = 0; sig < kSigCount; sig++) {
- SignalDesc *signal = &sctx->pending_signals[sig];
- if (signal->armed) {
- signal->armed = false;
- if (sigactions[sig].sa_handler != SIG_DFL
- && sigactions[sig].sa_handler != SIG_IGN) {
- // Insure that the handler does not spoil errno.
- const int saved_errno = errno;
- errno = 0;
- if (signal->sigaction)
- sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
- else
- sigactions[sig].sa_handler(sig);
- if (flags()->report_bugs && errno != 0) {
- ScopedInRtl in_rtl;
- __tsan::StackTrace stack;
- uptr pc = signal->sigaction ?
- (uptr)sigactions[sig].sa_sigaction :
- (uptr)sigactions[sig].sa_handler;
- pc += 1; // return address is expected, OutputReport() will undo this
- stack.Init(&pc, 1);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
- }
- }
- errno = saved_errno;
- }
- }
- }
- pthread_sigmask(SIG_SETMASK, &oldset, 0);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
-}
-
static void unreachable() {
- Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
}
void InitializeInterceptors() {
- CHECK_GT(cur_thread()->in_rtl, 0);
-
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
REAL(memcmp) = internal_memcmp;
// Instruct libc malloc to consume less memory.
+#if !SANITIZER_FREEBSD
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
+
+ // We can not use TSAN_INTERCEPT to get setjmp addr,
+ // because it does &setjmp and setjmp is not present in some versions of libc.
+ using __interception::GetRealFunctionAddress;
+ GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
+ GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
+ GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
- TSAN_INTERCEPT(setjmp);
- TSAN_INTERCEPT(_setjmp);
- TSAN_INTERCEPT(sigsetjmp);
- TSAN_INTERCEPT(__sigsetjmp);
TSAN_INTERCEPT(longjmp);
TSAN_INTERCEPT(siglongjmp);
@@ -2062,18 +2416,16 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(free);
TSAN_INTERCEPT(cfree);
TSAN_INTERCEPT(mmap);
- TSAN_INTERCEPT(mmap64);
+ TSAN_MAYBE_INTERCEPT_MMAP64;
TSAN_INTERCEPT(munmap);
- TSAN_INTERCEPT(memalign);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(valloc);
- TSAN_INTERCEPT(pvalloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
TSAN_INTERCEPT(strlen);
TSAN_INTERCEPT(memset);
TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memchr);
- TSAN_INTERCEPT(memrchr);
TSAN_INTERCEPT(memmove);
TSAN_INTERCEPT(memcmp);
TSAN_INTERCEPT(strchr);
@@ -2088,6 +2440,13 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_join);
TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
@@ -2109,9 +2468,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
TSAN_INTERCEPT(pthread_rwlock_unlock);
- INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
- INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
-
TSAN_INTERCEPT(pthread_barrier_init);
TSAN_INTERCEPT(pthread_barrier_destroy);
TSAN_INTERCEPT(pthread_barrier_wait);
@@ -2127,38 +2483,38 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sem_getvalue);
TSAN_INTERCEPT(stat);
- TSAN_INTERCEPT(__xstat);
- TSAN_INTERCEPT(stat64);
- TSAN_INTERCEPT(__xstat64);
+ TSAN_MAYBE_INTERCEPT___XSTAT;
+ TSAN_MAYBE_INTERCEPT_STAT64;
+ TSAN_MAYBE_INTERCEPT___XSTAT64;
TSAN_INTERCEPT(lstat);
- TSAN_INTERCEPT(__lxstat);
- TSAN_INTERCEPT(lstat64);
- TSAN_INTERCEPT(__lxstat64);
+ TSAN_MAYBE_INTERCEPT___LXSTAT;
+ TSAN_MAYBE_INTERCEPT_LSTAT64;
+ TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
- TSAN_INTERCEPT(__fxstat);
- TSAN_INTERCEPT(fstat64);
- TSAN_INTERCEPT(__fxstat64);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
- TSAN_INTERCEPT(open64);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
- TSAN_INTERCEPT(creat64);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
TSAN_INTERCEPT(dup);
TSAN_INTERCEPT(dup2);
TSAN_INTERCEPT(dup3);
- TSAN_INTERCEPT(eventfd);
- TSAN_INTERCEPT(signalfd);
- TSAN_INTERCEPT(inotify_init);
- TSAN_INTERCEPT(inotify_init1);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
TSAN_INTERCEPT(socket);
TSAN_INTERCEPT(socketpair);
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_INTERCEPT(epoll_create);
- TSAN_INTERCEPT(epoll_create1);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
TSAN_INTERCEPT(close);
- TSAN_INTERCEPT(__close);
- TSAN_INTERCEPT(__res_iclose);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
@@ -2167,19 +2523,17 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(recv);
TSAN_INTERCEPT(unlink);
- TSAN_INTERCEPT(fopen);
- TSAN_INTERCEPT(freopen);
- TSAN_INTERCEPT(fclose);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
- TSAN_INTERCEPT(fflush);
TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(opendir);
- TSAN_INTERCEPT(epoll_ctl);
- TSAN_INTERCEPT(epoll_wait);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
+ TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
@@ -2193,14 +2547,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
- TSAN_INTERCEPT(mlock);
- TSAN_INTERCEPT(munlock);
- TSAN_INTERCEPT(mlockall);
- TSAN_INTERCEPT(munlockall);
-
TSAN_INTERCEPT(fork);
- TSAN_INTERCEPT(dlopen);
- TSAN_INTERCEPT(dlclose);
+ TSAN_INTERCEPT(vfork);
TSAN_INTERCEPT(on_exit);
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
@@ -2208,9 +2556,6 @@ void InitializeInterceptors() {
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
- atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
- AtExitContext();
-
if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
@@ -2224,16 +2569,19 @@ void InitializeInterceptors() {
FdInit();
}
-void internal_start_thread(void(*func)(void *arg), void *arg) {
- // Start the thread with signals blocked, otherwise it can steal users
- // signals.
- __sanitizer_kernel_sigset_t set, old;
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
internal_sigfillset(&set);
internal_sigprocmask(SIG_SETMASK, &set, &old);
void *th;
REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
- REAL(pthread_detach)(th);
internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ REAL(pthread_join)(th, 0);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc
index cacbc0281d0fe..fd3c846678f5f 100644
--- a/lib/tsan/rtl/tsan_interface_ann.cc
+++ b/lib/tsan/rtl/tsan_interface_ann.cc
@@ -33,33 +33,28 @@ class ScopedAnnotation {
public:
ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
uptr pc)
- : thr_(thr)
- , in_rtl_(thr->in_rtl) {
- CHECK_EQ(thr_->in_rtl, 0);
+ : thr_(thr) {
FuncEntry(thr_, pc);
- thr_->in_rtl++;
DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
}
~ScopedAnnotation() {
- thr_->in_rtl--;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
FuncExit(thr_);
+ CheckNoLocks(thr_);
}
private:
ThreadState *const thr_;
- const int in_rtl_;
};
#define SCOPED_ANNOTATION(typ) \
if (!flags()->enable_annotations) \
return; \
ThreadState *thr = cur_thread(); \
- const uptr pc = (uptr)__builtin_return_address(0); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __FUNCTION__, f, l, \
- (uptr)__builtin_return_address(0)); \
+ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -131,8 +126,6 @@ static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
ExpectRace *race = FindRace(list, addr, size);
- if (race == 0 && AlternativeAddress(addr))
- race = FindRace(list, AlternativeAddress(addr), size);
if (race == 0)
return false;
DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
@@ -311,7 +304,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
ExpectRace *race = dyn_ann_ctx->expect.next;
if (race->hitcount == 0) {
- CTX()->nmissed_expected++;
+ ctx->nmissed_expected++;
ReportMissedExpectedRace(race);
}
race->prev->next = race->next;
@@ -383,32 +376,32 @@ void INTERFACE_ATTRIBUTE AnnotateBenignRace(
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
- ThreadIgnoreBegin(thr);
+ ThreadIgnoreBegin(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
- ThreadIgnoreEnd(thr);
+ ThreadIgnoreEnd(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
- ThreadIgnoreBegin(thr);
+ ThreadIgnoreBegin(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
- ThreadIgnoreEnd(thr);
+ ThreadIgnoreEnd(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
- ThreadIgnoreSyncBegin(thr);
+ ThreadIgnoreSyncBegin(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
- ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreSyncEnd(thr, pc);
}
void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
@@ -441,7 +434,7 @@ void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
char *f, int l, uptr mem, uptr sz, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
- BenignRaceImpl(f, l, mem, 1, desc);
+ BenignRaceImpl(f, l, mem, sz, desc);
}
int INTERFACE_ATTRIBUTE RunningOnValgrind() {
@@ -461,4 +454,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index d9f8cdf5b1060..9b699511674a9 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
// ThreadSanitizer atomic operations are based on C++11/C1x standards.
-// For background see C++11 standard. A slightly older, publically
+// For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
@@ -21,72 +21,40 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_interface_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
-#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- mo = ConvertOrder(mo); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
- ThreadState *const thr = cur_thread(); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-// Some shortcuts.
-typedef __tsan_memory_order morder;
-typedef __tsan_atomic8 a8;
-typedef __tsan_atomic16 a16;
-typedef __tsan_atomic32 a32;
-typedef __tsan_atomic64 a64;
-typedef __tsan_atomic128 a128;
-const morder mo_relaxed = __tsan_memory_order_relaxed;
-const morder mo_consume = __tsan_memory_order_consume;
-const morder mo_acquire = __tsan_memory_order_acquire;
-const morder mo_release = __tsan_memory_order_release;
-const morder mo_acq_rel = __tsan_memory_order_acq_rel;
-const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302))
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- CHECK_EQ(thr_->in_rtl, 0);
- ProcessPendingSignals(thr);
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- thr_->in_rtl++;
- }
- ~ScopedAtomic() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
+#ifndef SANITIZER_GO
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
@@ -110,27 +78,6 @@ static bool IsAcqRelOrder(morder mo) {
return mo == mo_acq_rel || mo == mo_seq_cst;
}
-static morder ConvertOrder(morder mo) {
- if (mo > (morder)100500) {
- mo = morder(mo - 100500);
- if (mo == morder(1 << 0))
- mo = mo_relaxed;
- else if (mo == morder(1 << 1))
- mo = mo_consume;
- else if (mo == morder(1 << 2))
- mo = mo_acquire;
- else if (mo == morder(1 << 3))
- mo = mo_release;
- else if (mo == morder(1 << 4))
- mo = mo_acq_rel;
- else if (mo == morder(1 << 5))
- mo = mo_seq_cst;
- }
- CHECK_GE(mo, mo_relaxed);
- CHECK_LE(mo, mo_seq_cst);
- return mo;
-}
-
template<typename T> T func_xchg(volatile T *v, T op) {
T res = __sync_lock_test_and_set(v, op);
// __sync_lock_test_and_set does not contain full barrier.
@@ -178,50 +125,58 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO)
a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = op;
return cmp;
}
a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp + op;
return cmp;
}
a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp - op;
return cmp;
}
a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp & op;
return cmp;
}
a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp | op;
return cmp;
}
a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp ^ op;
return cmp;
}
a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = ~(cmp & op);
return cmp;
}
a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
a128 cur = *v;
if (cur == cmp)
*v = xch;
@@ -243,26 +198,80 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
+#ifndef SANITIZER_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ if (!IsAcquireOrder(mo)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a; // as if atomic
+ return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
AcquireImpl(thr, pc, &s->clock);
- T v = *a;
+ T v = NoTsanAtomicLoad(a, mo);
s->mtx.ReadUnlock();
- __sync_synchronize();
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
@@ -271,21 +280,18 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
- if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v; // as if atomic
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
return;
}
__sync_synchronize();
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ReleaseImpl(thr, pc, &s->clock);
- *a = v;
+ NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
- // Trainling memory barrier to provide sequential consistency
- // for Dekker-like store-load synchronization.
- __sync_synchronize();
}
template<typename T, T (*F)(volatile T *v, T op)>
@@ -293,7 +299,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -311,6 +317,41 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
}
template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
@@ -353,13 +394,37 @@ static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
}
template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -372,8 +437,12 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
}
T cc = *c;
T pr = func_cas(a, cc, v);
- if (s)
- s->mtx.Unlock();
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
if (pr == cc)
return true;
*c = pr;
@@ -387,293 +456,498 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
+#ifndef SANITIZER_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
// FIXME(dvyukov): not implemented.
__sync_synchronize();
}
+#endif
+
+// Interface functions follow.
+#ifndef SANITIZER_GO
+
+// C/C++
+
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_thread_fence(morder mo) {
char* a = 0;
SCOPED_ATOMIC(Fence, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
}
+} // extern "C"
+
+#else // #ifndef SANITIZER_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #ifndef SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_interface_atomic.h b/lib/tsan/rtl/tsan_interface_atomic.h
deleted file mode 100644
index 5352d56679f77..0000000000000
--- a/lib/tsan/rtl/tsan_interface_atomic.h
+++ /dev/null
@@ -1,205 +0,0 @@
-//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-#ifndef INTERFACE_ATTRIBUTE
-# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
-
-#if defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)
-__extension__ typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
-#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
-#endif
-
-// Part of ABI, do not change.
-// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
-typedef enum {
- __tsan_memory_order_relaxed,
- __tsan_memory_order_consume,
- __tsan_memory_order_acquire,
- __tsan_memory_order_release,
- __tsan_memory_order_acq_rel,
- __tsan_memory_order_seq_cst
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-
-__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#undef INTERFACE_ATTRIBUTE
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 53f14cf07b596..8615349f657fb 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -22,54 +22,17 @@
using namespace __tsan; // NOLINT
-namespace __tsan {
-
-const uptr kHeapShadow = 0x300000000000ull;
-const uptr kHeapAlignment = 8;
+const jptr kHeapAlignment = 8;
-struct BlockDesc {
- bool begin;
- Mutex mtx;
- SyncVar *head;
-
- BlockDesc()
- : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
- , head() {
- CHECK_EQ(begin, false);
- begin = true;
- }
-
- ~BlockDesc() {
- CHECK_EQ(begin, true);
- begin = false;
- ThreadState *thr = cur_thread();
- SyncVar *s = head;
- while (s) {
- SyncVar *s1 = s->next;
- StatInc(thr, StatSyncDestroyed);
- s->mtx.Lock();
- s->mtx.Unlock();
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
- s = s1;
- }
- }
-};
+namespace __tsan {
struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
- BlockDesc *heap_shadow;
JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
- uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
- heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
- if ((uptr)heap_shadow != kHeapShadow) {
- Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
- Die();
- }
}
};
@@ -79,13 +42,9 @@ class ScopedJavaFunc {
: thr_(thr) {
Initialize(thr_);
FuncEntry(thr, pc);
- CHECK_EQ(thr_->in_rtl, 0);
- thr_->in_rtl++;
}
~ScopedJavaFunc() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
FuncExit(thr_);
// FIXME(dvyukov): process pending signals.
}
@@ -97,69 +56,12 @@ class ScopedJavaFunc {
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
-static BlockDesc *getblock(uptr addr) {
- uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
- return &jctx->heap_shadow[i];
-}
-
-static uptr USED getmem(BlockDesc *b) {
- uptr i = b - jctx->heap_shadow;
- uptr p = jctx->heap_begin + i * kHeapAlignment;
- CHECK_GE(p, jctx->heap_begin);
- CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
- return p;
-}
-
-static BlockDesc *getblockbegin(uptr addr) {
- for (BlockDesc *b = getblock(addr);; b--) {
- CHECK_GE(b, jctx->heap_shadow);
- if (b->begin)
- return b;
- }
- return 0;
-}
-
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create) {
- if (jctx == 0 || addr < jctx->heap_begin
- || addr >= jctx->heap_begin + jctx->heap_size)
- return 0;
- BlockDesc *b = getblockbegin(addr);
- DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
- Lock l(&b->mtx);
- SyncVar *s = b->head;
- for (; s; s = s->next) {
- if (s->addr == addr) {
- DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
- break;
- }
- }
- if (s == 0 && create) {
- DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
- s = CTX()->synctab.Create(thr, pc, addr);
- s->next = b->head;
- b->head = s;
- }
- if (s) {
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
- }
- return s;
-}
-
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
- // We do not destroy Java mutexes other than in __tsan_java_free().
- return 0;
-}
-
} // namespace __tsan
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \
/**/
@@ -196,8 +98,7 @@ void __tsan_java_alloc(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *b = getblock(ptr);
- new(b) BlockDesc();
+ OnUserAlloc(thr, pc, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
@@ -210,12 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *beg = getblock(ptr);
- BlockDesc *end = getblock(ptr + size);
- for (BlockDesc *b = beg; b != end; b++) {
- if (b->begin)
- b->~BlockDesc();
- }
+ ctx->metamap.FreeRange(thr, pc, ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
@@ -230,42 +126,36 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK(dst >= src + size || src >= dst + size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
- { // NOLINT
- BlockDesc *s = getblock(src);
- BlockDesc *d = getblock(dst);
- BlockDesc *send = getblock(src + size);
- for (; s != send; s++, d++) {
- CHECK_EQ(d->begin, false);
- if (s->begin) {
- DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
- new(d) BlockDesc;
- d->head = s->head;
- for (SyncVar *sync = d->head; sync; sync = sync->next) {
- uptr newaddr = sync->addr - src + dst;
- DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
- sync->addr = newaddr;
- }
- s->head = 0;
- s->~BlockDesc();
- }
- }
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
}
-
- { // NOLINT
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- for (; s != send; s++, d++) {
- *d = *s;
- *s = 0;
- }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
}
}
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
void __tsan_java_mutex_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h
index 9ac78e074bbe0..1f793df712de3 100644
--- a/lib/tsan/rtl/tsan_interface_java.h
+++ b/lib/tsan/rtl/tsan_interface_java.h
@@ -50,8 +50,13 @@ void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
-// The ranges must not overlap.
+// The ranges can overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
diff --git a/lib/tsan/rtl/tsan_md5.cc b/lib/tsan/rtl/tsan_md5.cc
index 66e8240431537..51279c10d17aa 100644
--- a/lib/tsan/rtl/tsan_md5.cc
+++ b/lib/tsan/rtl/tsan_md5.cc
@@ -25,7 +25,7 @@ namespace __tsan {
(a) += (b);
#define SET(n) \
- (*(MD5_u32plus *)&ptr[(n) * 4])
+ (*(const MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
@@ -39,13 +39,11 @@ typedef struct {
MD5_u32plus block[16];
} MD5_CTX;
-static void *body(MD5_CTX *ctx, void *data, ulong_t size) {
- unsigned char *ptr;
+static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) {
+ const unsigned char *ptr = (const unsigned char *)data;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
- ptr = (unsigned char*)data;
-
a = ctx->a;
b = ctx->b;
c = ctx->c;
@@ -151,7 +149,7 @@ void MD5_Init(MD5_CTX *ctx) {
ctx->hi = 0;
}
-void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
+void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) {
MD5_u32plus saved_lo;
ulong_t used, free;
@@ -171,7 +169,7 @@ void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
}
internal_memcpy(&ctx->buffer[used], data, free);
- data = (unsigned char *)data + free;
+ data = (const unsigned char *)data + free;
size -= free;
body(ctx, ctx->buffer, 64);
}
@@ -238,7 +236,7 @@ MD5Hash md5_hash(const void *data, uptr size) {
MD5Hash res;
MD5_CTX ctx;
MD5_Init(&ctx);
- MD5_Update(&ctx, (void*)data, size);
+ MD5_Update(&ctx, data, size);
MD5_Final((unsigned char*)&res.hash[0], &ctx);
return res;
}
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 8547f714be13f..285bdb34d91df 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -10,6 +10,7 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
@@ -18,43 +19,17 @@
#include "tsan_flags.h"
// May be overriden by front-end.
-extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-extern "C" void WEAK __tsan_free_hook(void *ptr) {
+extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
namespace __tsan {
-COMPILER_CHECK(sizeof(MBlock) == 16);
-
-void MBlock::Lock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- for (int iter = 0;; iter++) {
- if (v & 1) {
- if (iter < 10)
- proc_yield(20);
- else
- internal_sched_yield();
- v = atomic_load(a, memory_order_relaxed);
- continue;
- }
- if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
- break;
- }
-}
-
-void MBlock::Unlock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- DCHECK(v & 1);
- atomic_store(a, v & ~1, memory_order_relaxed);
-}
-
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
@@ -88,66 +63,58 @@ void AllocatorPrintStats() {
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
- if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
+ !flags()->report_signal_unsafe)
return;
- Context *ctx = CTX();
- StackTrace stack;
- stack.ObtainCurrent(thr, pc);
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
}
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
- CHECK_GT(thr->in_rtl, 0);
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return AllocatorReturnNull();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
- MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->Init(sz, thr->tid, CurrentStackId(thr, pc));
- if (CTX() && CTX()->initialized) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- else
- MemoryResetRange(thr, pc, (uptr)p, sz);
- }
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
- SignalUnsafeCall(thr, pc);
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
return p;
}
-void user_free(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
- CHECK_NE(p, (void*)0);
- DPrintf("#%d: free(%p)\n", thr->tid, p);
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->ListHead()) {
- MBlock::ScopedLock l(b);
- for (SyncVar *s = b->ListHead(); s;) {
- SyncVar *res = s;
- s = s->next;
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- DestroyAndFree(res);
- }
- b->ListReset();
- }
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
- }
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
- SignalUnsafeCall(thr, pc);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- CHECK_GT(thr->in_rtl, 0);
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
@@ -156,9 +123,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
if (p2 == 0)
return 0;
if (p) {
- MBlock *b = user_mblock(thr, p);
- CHECK_NE(b, 0);
- internal_memcpy(p2, p, min(b->Size(), sz));
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
}
}
if (p)
@@ -166,43 +132,29 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
return p2;
}
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
+uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b ? b->Size() : 0;
-}
-
-MBlock *user_mblock(ThreadState *thr, void *p) {
- CHECK_NE(p, 0);
- Allocator *a = allocator();
- void *b = a->GetBlockBegin(p);
- if (b == 0)
- return 0;
- return (MBlock*)a->GetMetaData(b);
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ return b ? b->siz : 0;
}
void invoke_malloc_hook(void *ptr, uptr size) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_malloc_hook(ptr, size);
+ __sanitizer_malloc_hook(ptr, size);
}
void invoke_free_hook(void *ptr) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_free_hook(ptr);
+ __sanitizer_free_hook(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
- CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -212,7 +164,6 @@ void *internal_alloc(MBlockType typ, uptr sz) {
void internal_free(void *p) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -225,51 +176,42 @@ void internal_free(void *p) {
using namespace __tsan;
extern "C" {
-uptr __tsan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
-uptr __tsan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
-uptr __tsan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
return 1;
}
-uptr __tsan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 1;
}
-uptr __tsan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __tsan_get_ownership(void *p) {
+int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
-uptr __tsan_get_allocated_size(void *p) {
- if (p == 0)
- return 0;
- p = allocator()->GetBlockBegin(p);
- if (p == 0)
- return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b->Size();
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
}
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->alloc_cache);
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+ ctx->metamap.OnThreadIdle(thr);
}
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 19d555437f3ec..7d41fa864a263 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -26,15 +26,12 @@ void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment);
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
-void user_free(ThreadState *thr, uptr pc, void *p);
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
-// Given the pointer p into a valid allocated block,
-// returns the descriptor of the block.
-MBlock *user_mblock(ThreadState *thr, void *p);
+uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
void invoke_malloc_hook(void *ptr, uptr size);
@@ -62,7 +59,6 @@ enum MBlockType {
MBlockSuppression,
MBlockExpectRace,
MBlockSignal,
- MBlockFD,
MBlockJmpBuf,
// This must be the last.
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
index a92fd90fd9c15..9ea9bae21b505 100644
--- a/lib/tsan/rtl/tsan_mutex.cc
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -25,28 +25,29 @@ namespace __tsan {
// then Report mutex can be locked while under Threads mutex.
// The leaf mutexes can be locked under any other mutexes.
// Recursive locking is not supported.
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
const MutexType MutexTypeLeaf = (MutexType)-1;
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
- /*4 MutexTypeSyncVar*/ {},
- /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
+ /*5 MutexTypeSyncTab*/ {}, // unused
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
- /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
#endif
void InitializeMutex() {
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
// Build the "can lock" adjacency matrix.
// If [i][j]==true, then one can lock mutex j while under mutex i.
const int N = MutexTypeCount;
@@ -123,12 +124,12 @@ void InitializeMutex() {
#endif
}
-DeadlockDetector::DeadlockDetector() {
+InternalDeadlockDetector::InternalDeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor.
}
-#if TSAN_DEBUG && !TSAN_GO
-void DeadlockDetector::Lock(MutexType t) {
+#if TSAN_DEBUG && !SANITIZER_GO
+void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
CHECK_LT(t, MutexTypeCount);
@@ -155,13 +156,25 @@ void DeadlockDetector::Lock(MutexType t) {
}
}
-void DeadlockDetector::Unlock(MutexType t) {
+void InternalDeadlockDetector::Unlock(MutexType t) {
// Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
CHECK(locked_[t]);
locked_[t] = 0;
}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
#endif
+void CheckNoLocks(ThreadState *thr) {
+#if TSAN_DEBUG && !SANITIZER_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
+#endif
+}
+
const uptr kUnlocked = 0;
const uptr kWriteLock = 1;
const uptr kReadLock = 2;
@@ -209,8 +222,8 @@ Mutex::~Mutex() {
}
void Mutex::Lock() {
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+#if TSAN_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
@@ -221,7 +234,7 @@ void Mutex::Lock() {
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -234,14 +247,14 @@ void Mutex::Unlock() {
uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+#if TSAN_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+#if TSAN_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
@@ -249,7 +262,7 @@ void Mutex::ReadLock() {
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -262,8 +275,8 @@ void Mutex::ReadUnlock() {
(void)prev;
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+#if TSAN_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h
index a2b489107a985..7bb1c48fcac81 100644
--- a/lib/tsan/rtl/tsan_mutex.h
+++ b/lib/tsan/rtl/tsan_mutex.h
@@ -31,6 +31,7 @@ enum MutexType {
MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
+ MutexTypeDDetector,
// This must be the last.
MutexTypeCount
@@ -65,11 +66,12 @@ class Mutex {
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
-class DeadlockDetector {
+class InternalDeadlockDetector {
public:
- DeadlockDetector();
+ InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
+ void CheckNoLocks();
private:
u64 seq_;
u64 locked_[MutexTypeCount];
@@ -77,6 +79,10 @@ class DeadlockDetector {
void InitializeMutex();
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
} // namespace __tsan
#endif // TSAN_MUTEX_H
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index eebfd4d70a142..68f0ec26fa256 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -38,19 +38,24 @@ class MutexSet {
uptr Size() const;
Desc Get(uptr i) const;
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
private:
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
};
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
@@ -62,4 +67,4 @@ MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
} // namespace __tsan
-#endif // TSAN_REPORT_H
+#endif // TSAN_MUTEXSET_H
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 32e22ba604341..270a7519dd0ae 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -12,161 +12,259 @@
// Platform-specific code.
//===----------------------------------------------------------------------===//
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !defined(SANITIZER_GO)
+
/*
-C++ linux memory layout:
-0000 0000 0000 - 03c0 0000 0000: protected
-03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 6000 0000 0000: protected
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
-
-C++ COMPAT linux memory layout:
-0000 0000 0000 - 0400 0000 0000: protected
-0400 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 2900 0000 0000: protected
-2900 0000 0000 - 2c00 0000 0000: modules
-2c00 0000 0000 - 6000 0000 0000: -
+C/C++ on linux and freebsd
+0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
+0100 0000 0000 - 0200 0000 0000: -
+0200 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7f00 0000 0000: -
-7f00 0000 0000 - 7fff ffff ffff: main thread stack
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
-Go linux and darwin memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
-00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7fff ffff ffff: -
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x020000000000ull;
+const uptr kShadowEnd = 0x100000000000ull;
+const uptr kHeapMemBeg = 0x7d0000000000ull;
+const uptr kHeapMemEnd = 0x7e0000000000ull;
+const uptr kLoAppMemBeg = 0x000000001000ull;
+const uptr kLoAppMemEnd = 0x010000000000ull;
+const uptr kHiAppMemBeg = 0x7e8000000000ull;
+const uptr kHiAppMemEnd = 0x800000000000ull;
+const uptr kAppMemMsk = 0x7c0000000000ull;
+const uptr kAppMemXor = 0x020000000000ull;
-Go windows memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
+ (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
+ (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
+ ^ kAppMemXor) * kShadowCnt;
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
+ ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
+ return (s / kShadowCnt) ^ kAppMemXor;
+ else
+ return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
+}
+
+static USED uptr UserRegions[] = {
+ kLoAppMemBeg, kLoAppMemEnd,
+ kHiAppMemBeg, kHiAppMemEnd,
+ kHeapMemBeg, kHeapMemEnd,
+};
+
+#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+
+/* Go on linux, darwin and freebsd
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0560 0000 0000: shadow
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07ff ffff ffff: -
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
*/
-#ifndef TSAN_PLATFORM_H
-#define TSAN_PLATFORM_H
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x200000000000ull;
+const uptr kShadowEnd = 0x238000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-#include "tsan_defs.h"
-#include "tsan_trace.h"
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
-#if defined(__LP64__) || defined(_WIN64)
-namespace __tsan {
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
-#if defined(TSAN_GO)
-static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
-# if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-# else
-static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-# endif
-// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
-// when memory addresses are of the 0x2axxxxxxxxxx form.
-// The option is enabled with 'setarch x86_64 -L'.
-#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#else
-static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
-static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+}
-#if SANITIZER_WINDOWS
-const uptr kTraceMemBegin = 0x056000000000ULL;
-#else
-const uptr kTraceMemBegin = 0x600000000000ULL;
-#endif
-const uptr kTraceMemSize = 0x020000000000ULL;
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
-// This has to be a macro to allow constant initialization of constants below.
-#ifndef TSAN_GO
-#define MemToShadow(addr) \
- (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
-#else
-#define MemToShadow(addr) \
- ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
-#endif
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ return (s & ~kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
-static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
-static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | 0xff;
+#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
-static inline bool IsAppMem(uptr mem) {
- return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0380 0000 0000: shadow
+0380 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+const uptr kMetaShadowBeg = 0x076000000000ull;
+const uptr kMetaShadowEnd = 0x07d000000000ull;
+const uptr kTraceMemBeg = 0x056000000000ull;
+const uptr kTraceMemEnd = 0x076000000000ull;
+const uptr kShadowBeg = 0x010000000000ull;
+const uptr kShadowEnd = 0x038000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
}
-static inline bool IsShadowMem(uptr mem) {
- return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
}
-static inline uptr ShadowToMem(uptr shadow) {
- CHECK(IsShadowMem(shadow));
-#ifdef TSAN_GO
- return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
-#else
- return (shadow / kShadowCnt) | kLinuxAppMemMsk;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
-// For COMPAT mapping returns an alternative address
-// that mapped to the same shadow address.
-// COMPAT mapping is not quite one-to-one.
-static inline uptr AlternativeAddress(uptr addr) {
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
- return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL;
-#else
- return 0;
-#endif
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
}
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size);
-uptr GetRSS();
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
+ return (x & ~kShadowBeg) / kShadowCnt;
+}
-const char *InitializePlatform();
-void FinalizePlatform();
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#else
+# error "Unknown platform"
+#endif
// The additional page is to catch shadow stack overflow as paging fault.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + 4096
- + 4095) & ~4095;
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
-void internal_start_thread(void(*func)(void*), void *arg);
+void InitializePlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
+int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
-} // namespace __tsan
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
-#else // defined(__LP64__) || defined(_WIN64)
-# error "Only 64-bit is supported"
-#endif
+} // namespace __tsan
#endif // TSAN_PLATFORM_H
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index 906b5dc73be4a..4dcfa558529c5 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -9,17 +9,18 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
@@ -32,8 +33,8 @@
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
-#include <sys/prctl.h>
#include <sys/syscall.h>
+#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
@@ -42,9 +43,10 @@
#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
+#if SANITIZER_LINUX
#define __need_res_state
#include <resolv.h>
-#include <malloc.h>
+#endif
#ifdef sa_handler
# undef sa_handler
@@ -54,87 +56,84 @@
# undef sa_sigaction
#endif
-extern "C" struct mallinfo __libc_mallinfo();
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
namespace __tsan {
-const uptr kPageSize = 4096;
+static uptr g_data_start;
+static uptr g_data_end;
-#ifndef TSAN_GO
-ScopedInRtl::ScopedInRtl()
- : thr_(cur_thread()) {
- in_rtl_ = thr_->in_rtl;
- thr_->in_rtl++;
- errno_ = errno;
-}
+const uptr kPageSize = 4096;
-ScopedInRtl::~ScopedInRtl() {
- thr_->in_rtl--;
- errno = errno_;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
-}
+enum {
+ MemTotal = 0,
+ MemShadow = 1,
+ MemMeta = 2,
+ MemFile = 3,
+ MemMmap = 4,
+ MemTrace = 5,
+ MemHeap = 6,
+ MemOther = 7,
+ MemCount = 8,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file,
+ uptr *mem, uptr stats_size) {
+ mem[MemTotal] += rss;
+ if (p >= kShadowBeg && p < kShadowEnd)
+ mem[MemShadow] += rss;
+ else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
+ mem[MemMeta] += rss;
+#ifndef SANITIZER_GO
+ else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+ mem[MemHeap] += rss;
+ else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#else
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
+ else if (p >= kAppMemBeg && p < kAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#endif
-
-void FillProfileCallback(uptr start, uptr rss, bool file,
- uptr *mem, uptr stats_size) {
- CHECK_EQ(7, stats_size);
- mem[6] += rss; // total
- start >>= 40;
- if (start < 0x10) // shadow
- mem[0] += rss;
- else if (start >= 0x20 && start < 0x30) // compat modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x7e) // modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x60 && start < 0x62) // traces
- mem[3] += rss;
- else if (start >= 0x7d && start < 0x7e) // heap
- mem[4] += rss;
- else // other
- mem[5] += rss;
-}
-
-void WriteMemoryProfile(char *buf, uptr buf_size) {
- uptr mem[7] = {};
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- char *buf_pos = buf;
- char *buf_end = buf + buf_size;
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
- mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
- mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
- struct mallinfo mi = __libc_mallinfo();
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
- mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
+ else if (p >= kTraceMemBeg && p < kTraceMemEnd)
+ mem[MemTrace] += rss;
+ else
+ mem[MemOther] += rss;
}
-uptr GetRSS() {
- uptr mem[7] = {};
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- return mem[6];
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+ mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ stacks->allocated >> 20, stacks->n_uniq_ids,
+ nlive, nthread);
}
-
+#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+ FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
}
+#endif
void FlushShadowMemory() {
+#if SANITIZER_LINUX
StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ProtectRange(uptr beg, uptr end) {
- ScopedInRtl in_rtl;
CHECK_LE(beg, end);
if (beg == end)
return;
@@ -144,9 +143,7 @@ static void ProtectRange(uptr beg, uptr end) {
Die();
}
}
-#endif
-#ifndef TSAN_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -160,17 +157,19 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char filename[256];
- internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
InternalScopedBuffer<u64> marker(kMarkerSize);
- for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ // volatile to prevent insertion of memset
+ for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size());
// Map the file into memory.
@@ -178,13 +177,12 @@ static void MapRodata() {
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
- internal_unlink(filename);
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
- char name[128];
+ // Reusing the buffer 'name'.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
if (name[0] != 0 && name[0] != '['
&& (prot & MemoryMappingLayout::kProtectionRead)
@@ -201,66 +199,63 @@ static void MapRodata() {
}
}
internal_close(fd);
- internal_unlink(filename);
}
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ // Map memory shadow.
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
+ "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
Die();
}
- const uptr kClosedLowBeg = 0x200000;
- const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
- const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
- ProtectRange(kClosedLowBeg, kClosedLowEnd);
- ProtectRange(kClosedMidBeg, kClosedMidEnd);
- DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
- kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
- kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- DPrintf("stack %zx\n", (uptr)&shadow);
-
- MapRodata();
-}
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+#ifdef MADV_NOHUGEPAGE
+ madvise((void*)MemToShadow(0x7f0000000000ULL),
+ 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
#endif
-
-static uptr g_data_start;
-static uptr g_data_end;
-
-#ifndef TSAN_GO
-static void CheckPIE() {
- // Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps(true);
- uptr start, end;
- if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
- "something is mapped at 0x%zx < 0x%zx)\n",
- start, kLinuxAppMemBeg);
- Printf("FATAL: Make sure to compile with -fPIE"
- " and to link with -pie.\n");
- Die();
- }
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+
+ // Map meta shadow.
+ uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
+ uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size);
+ if (meta != kMetaShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
+ Die();
}
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ meta, meta + meta_size, meta_size >> 30);
+
+ MapRodata();
}
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
+#if SANITIZER_FREEBSD
+ // On FreeBSD BSS is usually the last block allocated within the
+ // low range and heap is the last block allocated within the range
+ // 0x800000000-0x8ffffffff.
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
+ DPrintf("%p-%p %p %s\n", start, end, offset, name);
+ if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
+ name[0] == '\0') {
+ g_data_start = start;
+ g_data_end = end;
+ }
+ }
+#else
bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
@@ -276,34 +271,42 @@ static void InitDataSeg() {
g_data_end = end;
prev_is_data = is_data;
}
+#endif
DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end);
}
-#endif // #ifndef TSAN_GO
-
-static rlim_t getlim(int res) {
- rlimit rlim;
- CHECK_EQ(0, getrlimit(res, &rlim));
- return rlim.rlim_cur;
-}
+static void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr p, end;
+ while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) {
+ if (IsAppMem(p))
+ continue;
+ if (p >= kHeapMemEnd &&
+ p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
+ continue;
+ if (p >= 0xf000000000000000ull) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Die();
+ }
-static void setlim(int res, rlim_t lim) {
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit rlim;
- rlim.rlim_cur = lim;
- rlim.rlim_max = lim;
- setrlimit(res, (rlimit*)&rlim);
+ ProtectRange(kLoAppMemEnd, kShadowBeg);
+ ProtectRange(kShadowEnd, kMetaShadowBeg);
+ ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ // Memory for traces is mapped lazily in MapThreadTrace.
+ // Protect the whole range for now, so that user does not map something here.
+ ProtectRange(kTraceMemBeg, kTraceMemEnd);
+ ProtectRange(kTraceMemEnd, kHeapMemBeg);
+ ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
}
+#endif // #ifndef SANITIZER_GO
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- setlim(RLIMIT_CORE, 0);
- }
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
@@ -313,41 +316,44 @@ const char *InitializePlatform() {
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ if (StackSizeIsUnlimited()) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
- Report("WARNING: Program is run with unlimited stack size, which "
- "wouldn't work with ThreadSanitizer.\n");
- Report("Re-execing with stack size limited to %zd bytes.\n",
- kMaxStackSize);
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
- if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ if (!AddressSpaceIsUnlimited()) {
Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
- setlim(RLIMIT_AS, -1);
+ SetAddressSpaceUnlimited();
reexec = true;
}
if (reexec)
ReExec();
}
-#ifndef TSAN_GO
- CheckPIE();
+#ifndef SANITIZER_GO
+ CheckAndProtect();
InitTlsSize();
InitDataSeg();
#endif
- return GetEnv(kTsanOptionsEnv);
}
bool IsGlobalVar(uptr addr) {
return g_data_start && addr >= g_data_start && addr < g_data_end;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
+// Extract file descriptors passed to glibc internal __res_iclose function.
+// This is required to properly "close" the fds, because we do not see internal
+// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX
int cnt = 0;
__res_state *statp = (__res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
@@ -355,10 +361,44 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
fds[cnt++] = statp->_u._ext.nssocks[i];
}
return cnt;
-}
+#else
+ return 0;
#endif
+}
+
+// Extract file descriptors passed via UNIX domain sockets.
+// This is requried to properly handle "open" of these fds.
+// see 'man recvmsg' and 'man 3 cmsg'.
+int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
+ int res = 0;
+ msghdr *msg = (msghdr*)msgp;
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
+ continue;
+ int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
+ for (int i = 0; i < n; i++) {
+ fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
+ if (res == nfd)
+ return res;
+ }
+ }
+ return res;
+}
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
} // namespace __tsan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 99d4533a4fa26..15b9f9d2cb190 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -40,12 +40,6 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
-
uptr GetShadowMemoryConsumption() {
return 0;
}
@@ -53,42 +47,45 @@ uptr GetShadowMemoryConsumption() {
void FlushShadowMemory() {
}
-#ifndef TSAN_GO
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+}
+
+#ifndef SANITIZER_GO
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie.\n");
Die();
}
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("kShadow %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+ DPrintf("kAppMem %zx-%zx (%zuGB)\n",
+ kAppMemBeg, kAppMemEnd,
+ (kAppMemEnd - kAppMemBeg) >> 30);
}
#endif
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
- }
-
- return GetEnv(kTsanOptionsEnv);
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
}
-void FinalizePlatform() {
- fflush(0);
+#ifndef SANITIZER_GO
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
}
+#endif
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc
index 711db72ce6847..cfbe77da2c075 100644
--- a/lib/tsan/rtl/tsan_platform_windows.cc
+++ b/lib/tsan/rtl/tsan_platform_windows.cc
@@ -21,12 +21,6 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
-
uptr GetShadowMemoryConsumption() {
return 0;
}
@@ -34,12 +28,10 @@ uptr GetShadowMemoryConsumption() {
void FlushShadowMemory() {
}
-const char *InitializePlatform() {
- return GetEnv(kTsanOptionsEnv);
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-void FinalizePlatform() {
- fflush(0);
+void InitializePlatform() {
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 66307ae09dcd1..c22f12a1bfa55 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -13,13 +13,31 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
namespace __tsan {
-class Decorator: private __sanitizer::AnsiColorDecorator {
+ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
+
+ReportStack *ReportStack::New() {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ return new(mem) ReportStack();
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Warning() { return Red(); }
const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
@@ -40,6 +58,7 @@ ReportDesc::ReportDesc()
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
, threads(MBlockReportThread)
+ , unique_tids(MBlockReportThread)
, sleep()
, count() {
}
@@ -52,7 +71,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -69,30 +88,40 @@ static const char *ReportTypeString(ReportType typ) {
return "data race on vptr (ctor/dtor vs virtual call)";
if (typ == ReportTypeUseAfterFree)
return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree)
+ return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
return "destroy of a locked mutex";
+ if (typ == ReportTypeMutexDoubleLock)
+ return "double lock of a mutex";
+ if (typ == ReportTypeMutexBadUnlock)
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ if (typ == ReportTypeMutexBadReadLock)
+ return "read lock of a write locked mutex";
+ if (typ == ReportTypeMutexBadReadUnlock)
+ return "read unlock of a write locked mutex";
if (typ == ReportTypeSignalUnsafe)
return "signal-unsafe call inside of a signal";
if (typ == ReportTypeErrnoInSignal)
return "signal handler spoils errno";
+ if (typ == ReportTypeDeadlock)
+ return "lock-order-inversion (potential deadlock)";
return "";
}
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
- if (ent->col)
- Printf(":%d", ent->col);
- if (ent->module && ent->offset)
- Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
- else
- Printf(" (%p)\n", (void*)ent->pc);
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ common_flags()->strip_path_prefix, "__interceptor_");
+ Printf("%s\n", res.data());
}
Printf("\n");
}
@@ -134,12 +163,15 @@ static void PrintLocation(const ReportLocation *loc) {
bool print_stack = false;
Printf("%s", d.Location());
if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- loc->name, loc->size, loc->addr, loc->module, loc->offset);
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->size, loc->addr, thread_name(thrbuf, loc->tid));
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
@@ -155,6 +187,17 @@ static void PrintLocation(const ReportLocation *loc) {
PrintStack(loc->stack);
}
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+}
+
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
if (rm->destroyed) {
@@ -163,7 +206,7 @@ static void PrintMutex(const ReportMutex *rm) {
Printf("%s", d.EndMutex());
} else {
Printf("%s", d.Mutex());
- Printf(" Mutex M%llu created at:\n", rm->id);
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
Printf("%s", d.EndMutex());
PrintStack(rm->stack);
}
@@ -208,10 +251,20 @@ static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
return 0;
}
-ReportStack *SkipTsanInternalFrames(ReportStack *ent) {
- while (FrameIsInternal(ent) && ent->next)
- ent = ent->next;
- return ent;
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ return file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_"));
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
}
void PrintReport(const ReportDesc *rep) {
@@ -223,10 +276,42 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.EndWarning());
- for (uptr i = 0; i < rep->stacks.Size(); i++) {
- if (i)
- Printf(" and:\n");
- PrintStack(rep->stacks[i]);
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.EndThreadDescription());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
}
for (uptr i = 0; i < rep->mops.Size(); i++)
@@ -238,8 +323,10 @@ void PrintReport(const ReportDesc *rep) {
for (uptr i = 0; i < rep->locs.Size(); i++)
PrintLocation(rep->locs[i]);
- for (uptr i = 0; i < rep->mutexes.Size(); i++)
- PrintMutex(rep->mutexes[i]);
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
@@ -247,24 +334,30 @@ void PrintReport(const ReportDesc *rep) {
if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
- if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
- ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames)) {
+ const AddressInfo &info = frame->info;
+ ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
+ }
+ }
Printf("==================\n");
}
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
const int kMainThreadId = 1;
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" %s()\n %s:%d +0x%zx\n",
- ent->func, ent->file, ent->line, (void*)ent->offset);
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line,
+ (void *)info.module_offset);
}
}
@@ -291,11 +384,26 @@ static void PrintThread(const ReportThread *rt) {
void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
- Printf("WARNING: DATA RACE");
- for (uptr i = 0; i < rep->mops.Size(); i++)
- PrintMop(rep->mops[i], i == 0);
- for (uptr i = 0; i < rep->threads.Size(); i++)
- PrintThread(rep->threads[i]);
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
Printf("==================\n");
}
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index b2ce0dd66a272..3e344a048e435 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -13,6 +13,7 @@
#ifndef TSAN_REPORT_H
#define TSAN_REPORT_H
+#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
#include "tsan_vector.h"
@@ -22,21 +23,25 @@ enum ReportType {
ReportTypeRace,
ReportTypeVptrRace,
ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
- ReportTypeErrnoInSignal
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
};
struct ReportStack {
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
+ SymbolizedStack *frames;
+ bool suppressable;
+ static ReportStack *New();
+
+ private:
+ ReportStack();
};
struct ReportMopMutex {
@@ -66,16 +71,17 @@ enum ReportLocationType {
struct ReportLocation {
ReportLocationType type;
- uptr addr;
- uptr size;
- char *module;
- uptr offset;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
int tid;
int fd;
- char *name;
- char *file;
- int line;
+ bool suppressable;
ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
};
struct ReportThread {
@@ -89,6 +95,7 @@ struct ReportThread {
struct ReportMutex {
u64 id;
+ uptr addr;
bool destroyed;
ReportStack *stack;
};
@@ -101,6 +108,7 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
ReportStack *sleep;
int count;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 1f66d29a741c9..7cb7008e29801 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -25,6 +25,16 @@
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
+
volatile int __tsan_resumed = 0;
extern "C" void __tsan_resume() {
@@ -33,25 +43,25 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
// Can be overriden by a front-end.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnFinalize(bool failed);
+void OnInitialize();
#else
+SANITIZER_INTERFACE_ATTRIBUTE
bool WEAK OnFinalize(bool failed) {
return failed;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void WEAK OnInitialize() {}
#endif
-static Context *ctx;
-Context *CTX() {
- return ctx;
-}
-
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
@@ -63,7 +73,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -75,7 +85,7 @@ Context::Context()
, nreported()
, nmissed_expected()
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize))
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions(8) {
@@ -83,14 +93,16 @@ Context::Context()
// The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
: fast_state(tid, epoch)
// Do not touch these, rely on zero initialization,
// they may be accessed before the ctor.
// , ignore_reads_and_writes()
- // , in_rtl()
-#ifndef TSAN_GO
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
+#ifndef SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -98,7 +110,11 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_addr(stk_addr)
, stk_size(stk_size)
, tls_addr(tls_addr)
- , tls_size(tls_size) {
+ , tls_size(tls_size)
+#ifndef SANITIZER_GO
+ , last_sleep_clock(tid)
+#endif
+{
}
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
@@ -106,62 +122,67 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_running_threads;
ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
InternalScopedBuffer<char> buf(4096);
- internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
- i, n_threads, n_running_threads);
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
- WriteMemoryProfile(buf.data(), buf.size());
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
internal_write(fd, buf.data(), internal_strlen(buf.data()));
}
static void BackgroundThread(void *arg) {
- ScopedInRtl in_rtl;
- Context *ctx = CTX();
+#ifndef SANITIZER_GO
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread()->ignore_interceptors++;
+#endif
const u64 kMs2Ns = 1000 * 1000;
fd_t mprof_fd = kInvalidFd;
if (flags()->profile_memory && flags()->profile_memory[0]) {
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, (int)internal_getpid());
- uptr openrv = OpenFile(filename.data(), true);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
} else {
- mprof_fd = openrv;
+ InternalScopedString filename(kMaxPathLength);
+ filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
+ uptr openrv = OpenFile(filename.data(), true);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = openrv;
+ }
}
}
u64 last_flush = NanoTime();
uptr last_rss = 0;
- for (int i = 0; ; i++) {
- SleepForSeconds(1);
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: periodic memory flush\n");
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
last_flush = NanoTime();
}
}
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- if (flags()->verbosity > 0) {
- Printf("ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
- (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
- }
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: flushing memory due to RSS\n");
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
}
last_rss = rss;
}
@@ -170,7 +191,7 @@ static void BackgroundThread(void *arg) {
if (mprof_fd != kInvalidFd)
MemoryProfiler(ctx, mprof_fd, i);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
u64 last = atomic_load(&ctx->last_symbolize_time_ns,
@@ -186,6 +207,18 @@ static void BackgroundThread(void *arg) {
}
}
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef SANITIZER_GO
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
@@ -193,13 +226,43 @@ void DontNeedShadowFor(uptr addr, uptr size) {
}
void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
}
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBegin);
- CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ CHECK_GE(addr, kTraceMemBeg);
+ CHECK_LE(addr + size, kTraceMemEnd);
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
@@ -208,57 +271,75 @@ void MapThreadTrace(uptr addr, uptr size) {
}
}
+static void CheckShadowMapping() {
+ for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
+ const uptr beg = UserRegions[i];
+ const uptr end = UserRegions[i + 1];
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -1; x <= 1; x++) {
+ const uptr p = p0 + x;
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ VPrintf(3, " checking pointer %p -> %p\n", p, s);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ const uptr m = (uptr)MemToMeta(p);
+ CHECK(IsMetaMem(m));
+ }
+ }
+ }
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
if (is_initialized)
return;
is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
- ScopedInRtl in_rtl;
-#ifndef TSAN_GO
+ ctx = new(ctx_placeholder) Context;
+ const char *options = GetEnv(kTsanOptionsEnv);
+ InitializeFlags(&ctx->flags, options);
+#ifndef SANITIZER_GO
InitializeAllocator();
#endif
InitializeInterceptors();
- const char *env = InitializePlatform();
+ CheckShadowMapping();
+ InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
- ctx = new(ctx_placeholder) Context;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeShadowMemory();
#endif
- InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
- __sanitizer_set_report_path(flags()->log_path);
+ __sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeLibIgnore();
- // Initialize external symbolizer before internal threads are started.
- const char *external_symbolizer = flags()->external_symbolizer_path;
- bool external_symbolizer_started =
- Symbolizer::Init(external_symbolizer)->IsExternalAvailable();
- if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
- !external_symbolizer_started) {
- Printf("Failed to start external symbolizer: '%s'\n",
- external_symbolizer);
- Die();
- }
- Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+#endif
+ StartBackgroundThread();
+#ifndef SANITIZER_GO
+ SetSandboxingCallback(StopBackgroundThread);
#endif
- internal_start_thread(&BackgroundThread, 0);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
- if (ctx->flags.verbosity)
- Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
- (int)internal_getpid());
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(thr, tid, internal_getpid());
- CHECK_EQ(thr->in_rtl, 1);
ctx->initialized = true;
if (flags()->stop_on_start) {
@@ -267,11 +348,11 @@ void Initialize(ThreadState *thr) {
(int)internal_getpid());
while (__tsan_resumed == 0) {}
}
+
+ OnInitialize();
}
int Finalize(ThreadState *thr) {
- ScopedInRtl in_rtl;
- Context *ctx = __tsan::ctx;
bool failed = false;
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
@@ -283,8 +364,8 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef TSAN_GO
- if (ctx->flags.verbosity)
+#ifndef SANITIZER_GO
+ if (common_flags()->verbosity)
AllocatorPrintStats();
#endif
@@ -292,7 +373,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -305,9 +386,9 @@ int Finalize(ThreadState *thr) {
ctx->nmissed_expected);
}
- if (flags()->print_suppressions)
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -319,31 +400,82 @@ int Finalize(ThreadState *thr) {
return failed ? flags()->exitcode : 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ internal_start_thread(&BackgroundThread, 0);
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#ifdef SANITIZER_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
- if (pc) {
+ if (pc != 0) {
+#ifndef SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(thr->shadow_stack,
- thr->shadow_stack_pos - thr->shadow_stack);
- if (pc)
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
-#endif
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
- ScopedInRtl in_rtl;
Trace *thr_trace = ThreadTrace(thr->tid);
Lock l(&thr_trace->mtx);
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
- hdr->stack0.ObtainCurrent(thr, 0);
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
hdr->mset0 = thr->mset;
thr->nomalloc--;
}
@@ -366,7 +498,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -393,28 +525,25 @@ void StoreIfNotYetStored(u64 *sp, u64 *s) {
*s = 0;
}
-static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
Shadow cur, Shadow old) {
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
#endif
}
-static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
- return old.epoch() >= thr->fast_synch_epoch;
-}
-
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
@@ -492,13 +621,13 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
while (size) {
int size1 = 1;
int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
size1 = 8;
kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
size1 = 4;
kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
size1 = 2;
kAccessSizeLog = kSizeLog2;
}
@@ -508,6 +637,92 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
}
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads.
+ DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
@@ -529,7 +744,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (*shadow_mem == kShadowRodata) {
+ if (kCppMode && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -540,20 +755,54 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
+ if (fast_state.GetIgnoreBit()) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
return;
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
+ }
+
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
cur.SetAtomic(kIsAtomic);
- // We must not store to the trace if we do not store to the shadow.
- // That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cc
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
@@ -582,7 +831,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < 64*1024) {
+ if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -632,8 +881,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
thr->is_freeing = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
@@ -643,8 +894,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.SetWrite(true);
@@ -654,29 +907,21 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end) {
- const int sz = thr->shadow_stack_end - thr->shadow_stack;
- const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
- internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
- thr->shadow_stack = newstack;
- thr->shadow_stack_pos = newstack + sz;
- thr->shadow_stack_end = newstack + newsz;
- }
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
@@ -684,44 +929,61 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
}
-void ThreadIgnoreBegin(ThreadState *thr) {
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
+#ifndef SANITIZER_GO
+ if (!ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
}
-void ThreadIgnoreEnd(ThreadState *thr) {
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
thr->ignore_reads_and_writes--;
CHECK_GE(thr->ignore_reads_and_writes, 0);
- if (thr->ignore_reads_and_writes == 0)
+ if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
+#ifndef SANITIZER_GO
+ thr->mop_ignore_set.Reset();
+#endif
+ }
}
-void ThreadIgnoreSyncBegin(ThreadState *thr) {
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
+#ifndef SANITIZER_GO
+ if (!ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
}
-void ThreadIgnoreSyncEnd(ThreadState *thr) {
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
+#ifndef SANITIZER_GO
+ if (thr->ignore_sync == 0)
+ thr->sync_ignore_set.Reset();
+#endif
}
bool MD5Hash::operator==(const MD5Hash &other) const {
@@ -752,7 +1014,7 @@ void build_consistency_shadow8() {}
} // namespace __tsan
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 4ee667543a6ea..8d886875159b3 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -28,7 +28,9 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
@@ -41,6 +43,8 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_mutexset.h"
+#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -48,87 +52,9 @@
namespace __tsan {
-// Descriptor of user's memory block.
-struct MBlock {
- /*
- u64 mtx : 1; // must be first
- u64 lst : 44;
- u64 stk : 31; // on word boundary
- u64 tid : kTidBits;
- u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
- */
- u64 raw[2];
-
- void Init(uptr siz, u32 tid, u32 stk) {
- raw[0] = raw[1] = 0;
- raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
- raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
- raw[0] |= (u64)stk << (1 + 44);
- raw[1] |= (u64)stk >> (64 - 44 - 1);
- DCHECK_EQ(Size(), siz);
- DCHECK_EQ(Tid(), tid);
- DCHECK_EQ(StackId(), stk);
- }
-
- u32 Tid() const {
- return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
- }
-
- uptr Size() const {
- return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
- }
-
- u32 StackId() const {
- return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
- }
-
- SyncVar *ListHead() const {
- return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
- }
-
- void ListPush(SyncVar *v) {
- SyncVar *lst = ListHead();
- v->next = lst;
- u64 x = (u64)v ^ (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), v);
- }
-
- SyncVar *ListPop() {
- SyncVar *lst = ListHead();
- SyncVar *nxt = lst->next;
- lst->next = 0;
- u64 x = (u64)lst ^ (u64)nxt;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), nxt);
- return lst;
- }
-
- void ListReset() {
- SyncVar *lst = ListHead();
- u64 x = (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), 0);
- }
-
- void Lock();
- void Unlock();
- typedef GenericScopedLock<MBlock> ScopedLock;
-};
-
-#ifndef TSAN_GO
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#else
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#endif
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-
+#ifndef SANITIZER_GO
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -145,14 +71,14 @@ const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
// FastState (from most significant bit):
// ignore : 1
// tid : kTidBits
-// epoch : kClkBits
// unused : -
// history_size : 3
+// epoch : kClkBits
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << kTidShift;
- x_ |= epoch << kClkShift;
+ x_ |= epoch;
DCHECK_EQ(tid, this->tid());
DCHECK_EQ(epoch, this->epoch());
DCHECK_EQ(GetIgnoreBit(), false);
@@ -177,13 +103,13 @@ class FastState {
}
u64 epoch() const {
- u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ u64 res = x_ & ((1ull << kClkBits) - 1);
return res;
}
void IncrementEpoch() {
u64 old_epoch = epoch();
- x_ += 1 << kClkShift;
+ x_ += 1;
DCHECK_EQ(old_epoch + 1, epoch());
(void)old_epoch;
}
@@ -195,17 +121,19 @@ class FastState {
void SetHistorySize(int hs) {
CHECK_GE(hs, 0);
CHECK_LE(hs, 7);
- x_ = (x_ & ~7) | hs;
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
}
+ ALWAYS_INLINE
int GetHistorySize() const {
- return (int)(x_ & 7);
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
}
void ClearHistorySize() {
- x_ &= ~7;
+ SetHistorySize(0);
}
+ ALWAYS_INLINE
u64 GetTracePos() const {
const int hs = GetHistorySize();
// When hs == 0, the trace consists of 2 parts.
@@ -216,20 +144,21 @@ class FastState {
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
- static const int kClkShift = kTidShift - kClkBits;
static const u64 kIgnoreBit = 1ull << 63;
static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
u64 x_;
};
// Shadow (from most significant bit):
// freed : 1
// tid : kTidBits
-// epoch : kClkBits
// is_atomic : 1
// is_read : 1
// size_log : 2
// addr0 : 3
+// epoch : kClkBits
class Shadow : public FastState {
public:
explicit Shadow(u64 x)
@@ -242,10 +171,10 @@ class Shadow : public FastState {
}
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ(x_ & 31, 0);
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
DCHECK_LE(addr0, 7);
DCHECK_LE(kAccessSizeLog, 3);
- x_ |= (kAccessSizeLog << 3) | addr0;
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
DCHECK_EQ(kAccessSizeLog, size_log());
DCHECK_EQ(addr0, this->addr0());
}
@@ -278,47 +207,34 @@ class Shadow : public FastState {
return shifted_xor == 0;
}
- static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
return masked_xor == 0;
}
- static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
unsigned kS2AccessSize) {
bool res = false;
u64 diff = s1.addr0() - s2.addr0();
if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
// if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff) res = true;
+ if (s1.size() > -diff)
+ res = true;
} else {
// if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff) res = true;
+ if (kS2AccessSize > diff)
+ res = true;
}
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
return res;
}
- // The idea behind the offset is as follows.
- // Consider that we have 8 bool's contained within a single 8-byte block
- // (mapped to a single shadow "cell"). Now consider that we write to the bools
- // from a single thread (which we consider the common case).
- // W/o offsetting each access will have to scan 4 shadow values at average
- // to find the corresponding shadow value for the bool.
- // With offsetting we start scanning shadow with the offset so that
- // each access hits necessary shadow straight off (at least in an expected
- // optimistic case).
- // This logic works seamlessly for any layout of user data. For example,
- // if user data is {int, short, char, char}, then accesses to the int are
- // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
- // from a single thread won't need to scan all 8 shadow values.
- unsigned ComputeSearchOffset() {
- return x_ & 7;
- }
- u64 addr0() const { return x_ & 7; }
- u64 size() const { return 1ull << size_log(); }
- bool IsWrite() const { return !IsRead(); }
- bool IsRead() const { return x_ & kReadBit; }
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
@@ -343,15 +259,14 @@ class Shadow : public FastState {
return res;
}
- bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
- bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
- | (kIsAtomic << kAtomicShift));
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
return v;
}
- bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
<= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
@@ -359,7 +274,7 @@ class Shadow : public FastState {
return v;
}
- bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
>= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
@@ -368,14 +283,14 @@ class Shadow : public FastState {
}
private:
- static const u64 kReadShift = 5;
+ static const u64 kReadShift = 5 + kClkBits;
static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6;
+ static const u64 kAtomicShift = 6 + kClkBits;
static const u64 kAtomicBit = 1ull << kAtomicShift;
- u64 size_log() const { return (x_ >> 3) & 3; }
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
- static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
if (s1.addr0() == s2.addr0()) return true;
if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
return true;
@@ -390,6 +305,9 @@ struct SignalContext;
struct JmpBuf {
uptr sp;
uptr mangled_sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
uptr *shadow_stack_pos;
};
@@ -413,6 +331,11 @@ struct ThreadState {
// for better performance.
int ignore_reads_and_writes;
int ignore_sync;
+ // Go does not support ignores.
+#ifndef SANITIZER_GO
+ IgnoreSet mop_ignore_set;
+ IgnoreSet sync_ignore_set;
+#endif
// C/C++ uses fixed size shadow stack embed into Trace.
// Go uses malloc-allocated shadow stack with dynamic size.
uptr *shadow_stack;
@@ -422,31 +345,38 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
#endif
u64 stat[StatCnt];
const int tid;
const int unique_id;
- int in_rtl;
bool in_symbolizer;
bool in_ignored_lib;
- bool is_alive;
+ bool is_dead;
bool is_freeing;
bool is_vptr_access;
const uptr stk_addr;
const uptr stk_size;
const uptr tls_addr;
const uptr tls_size;
+ ThreadContext *tctx;
- DeadlockDetector deadlock_detector;
+ InternalDeadlockDetector internal_deadlock_detector;
+ DDPhysicalThread *dd_pt;
+ DDLogicalThread *dd_lt;
- bool in_signal_handler;
+ atomic_uintptr_t in_signal_handler;
SignalContext *signal_ctx;
-#ifndef TSAN_GO
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+
+#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -456,13 +386,13 @@ struct ThreadState {
int nomalloc;
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
-Context *CTX();
-
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
+__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
@@ -474,11 +404,7 @@ class ThreadContext : public ThreadContextBase {
explicit ThreadContext(int tid);
~ThreadContext();
ThreadState *thr;
-#ifdef TSAN_GO
- StackTrace creation_stack;
-#else
u32 creation_stack_id;
-#endif
SyncClock sync;
// Epoch at which the thread had started.
// If we see an event from the thread stamped by an older epoch,
@@ -493,6 +419,7 @@ class ThreadContext : public ThreadContextBase {
void OnStarted(void *arg);
void OnCreated(void *arg);
void OnReset();
+ void OnDetached(void *arg);
};
struct RacyStacks {
@@ -521,20 +448,27 @@ struct Context {
Context();
bool initialized;
+ bool after_multithreaded_fork;
- SyncTab synctab;
+ MetaMap metamap;
Mutex report_mtx;
int nreported;
int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
ThreadRegistry *thread_registry;
Vector<RacyStacks> racy_stacks;
Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
Flags flags;
@@ -543,14 +477,20 @@ struct Context {
u64 int_alloc_siz[MBlockTypeCount];
};
-class ScopedInRtl {
- public:
- ScopedInRtl();
- ~ScopedInRtl();
- private:
- ThreadState*thr_;
- int in_rtl_;
- int errno_;
+extern Context *ctx; // The one and the only global runtime context.
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#ifndef SANITIZER_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#ifndef SANITIZER_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
};
class ScopedReport {
@@ -558,11 +498,14 @@ class ScopedReport {
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddStack(const StackTrace *stack);
- void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+ void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
const MutexSet *mset);
- void AddThread(const ThreadContext *tctx);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
+ void AddUniqueTid(int unique_tid);
void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
void AddLocation(uptr addr, uptr size);
void AddSleep(u32 stack_id);
void SetCount(int count);
@@ -570,16 +513,31 @@ class ScopedReport {
const ReportDesc *GetReport() const;
private:
- Context *ctx_;
ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
- void AddMutex(u64 id);
+ void AddDeadMutex(u64 id);
ScopedReport(const ScopedReport&);
void operator = (const ScopedReport&);
};
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset);
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+}
+
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
@@ -600,19 +558,16 @@ void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
void ReportRace(ThreadState *thr);
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1 = 0,
- const ReportStack *suppress_stack2 = 0,
- const ReportLocation *suppress_loc = 0);
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
-bool FrameIsInternal(const ReportStack *frame);
-ReportStack *SkipTsanInternalFrames(ReportStack *ent);
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -627,15 +582,15 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent);
#endif
u32 CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(u32 stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
-void PrintCurrentStackSlow(); // uses libunwind
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create);
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
@@ -678,10 +633,10 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
-void ThreadIgnoreBegin(ThreadState *thr);
-void ThreadIgnoreEnd(ThreadState *thr);
-void ThreadIgnoreSyncBegin(ThreadState *thr);
-void ThreadIgnoreSyncEnd(ThreadState *thr);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
void FuncEntry(ThreadState *thr, uptr pc);
void FuncExit(ThreadState *thr);
@@ -700,14 +655,21 @@ void ProcessPendingSignals(ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
+ bool try_lock = false);
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
void Acquire(ThreadState *thr, uptr pc, uptr addr);
+// AcquireGlobal synchronizes the current thread with all other threads.
+// In terms of happens-before relation, it draws a HB edge from all threads
+// (where they happen to execute right now) to the current thread. We use it to
+// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
+// right before executing finalizers. This provides a coarse, but simple
+// approximation of the actual required synchronization.
void AcquireGlobal(ThreadState *thr, uptr pc);
void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
@@ -728,11 +690,11 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
// so we create a reserve stack frame for it (1024b must be enough).
#define HACKY_CALL(f) \
__asm__ __volatile__("sub $1024, %%rsp;" \
- ".cfi_adjust_cfa_offset 1024;" \
+ CFI_INL_ADJUST_CFA_OFFSET(1024) \
".hidden " #f "_thunk;" \
"call " #f "_thunk;" \
"add $1024, %%rsp;" \
- ".cfi_adjust_cfa_offset -1024;" \
+ CFI_INL_ADJUST_CFA_OFFSET(-1024) \
::: "memory", "cc");
#else
#define HACKY_CALL(f) f()
@@ -747,13 +709,15 @@ Trace *ThreadTrace(int tid);
extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, 61), addr);
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S
index 11c75c72dbe5b..8db62f9013a3d 100644
--- a/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -1,43 +1,44 @@
+#include "sanitizer_common/sanitizer_asm.h"
.section .text
.hidden __tsan_trace_switch
.globl __tsan_trace_switch_thunk
__tsan_trace_switch_thunk:
- .cfi_startproc
+ CFI_STARTPROC
# Save scratch registers.
push %rax
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rax, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
push %rcx
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rcx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
push %rdx
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
push %rsi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rsi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
push %r8
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r8, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
push %r9
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r9, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
push %r10
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r10, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
push %r11
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r11, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
# Align stack frame.
push %rbx # non-scratch
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rbx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
mov %rsp, %rbx # save current rsp
- .cfi_def_cfa_register %rbx
+ CFI_DEF_CFA_REGISTER(%rbx)
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
@@ -45,79 +46,79 @@ __tsan_trace_switch_thunk:
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
- .cfi_def_cfa_register %rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
pop %rbx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
# Restore scratch registers.
pop %r11
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r10
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r9
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r8
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rdi
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rsi
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rdx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rcx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rax
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rax
- .cfi_restore %rbx
- .cfi_restore %rcx
- .cfi_restore %rdx
- .cfi_restore %rsi
- .cfi_restore %rdi
- .cfi_restore %r8
- .cfi_restore %r9
- .cfi_restore %r10
- .cfi_restore %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
ret
- .cfi_endproc
+ CFI_ENDPROC
.hidden __tsan_report_race
.globl __tsan_report_race_thunk
__tsan_report_race_thunk:
- .cfi_startproc
+ CFI_STARTPROC
# Save scratch registers.
push %rax
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rax, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
push %rcx
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rcx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
push %rdx
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
push %rsi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rsi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
push %r8
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r8, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
push %r9
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r9, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
push %r10
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r10, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
push %r11
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %r11, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
# Align stack frame.
push %rbx # non-scratch
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rbx, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
mov %rsp, %rbx # save current rsp
- .cfi_def_cfa_register %rbx
+ CFI_DEF_CFA_REGISTER(%rbx)
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
@@ -125,179 +126,199 @@ __tsan_report_race_thunk:
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
- .cfi_def_cfa_register %rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
pop %rbx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
# Restore scratch registers.
pop %r11
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r10
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r9
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %r8
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rdi
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rsi
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rdx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rcx
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
pop %rax
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rax
- .cfi_restore %rbx
- .cfi_restore %rcx
- .cfi_restore %rdx
- .cfi_restore %rsi
- .cfi_restore %rdi
- .cfi_restore %r8
- .cfi_restore %r9
- .cfi_restore %r10
- .cfi_restore %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
ret
- .cfi_endproc
+ CFI_ENDPROC
.hidden __tsan_setjmp
.comm _ZN14__interception11real_setjmpE,8,8
.globl setjmp
.type setjmp, @function
setjmp:
- .cfi_startproc
+ CFI_STARTPROC
// save env parameter
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
pop %rdi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
- .cfi_endproc
+ CFI_ENDPROC
.size setjmp, .-setjmp
.comm _ZN14__interception12real__setjmpE,8,8
.globl _setjmp
.type _setjmp, @function
_setjmp:
- .cfi_startproc
+ CFI_STARTPROC
// save env parameter
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
pop %rdi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
- .cfi_endproc
+ CFI_ENDPROC
.size _setjmp, .-_setjmp
.comm _ZN14__interception14real_sigsetjmpE,8,8
.globl sigsetjmp
.type sigsetjmp, @function
sigsetjmp:
- .cfi_startproc
+ CFI_STARTPROC
// save env parameter
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
// save savesigs parameter
push %rsi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rsi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
// align stack frame
sub $8, %rsp
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
add $8, %rsp
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
// restore savesigs parameter
pop %rsi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
// restore env parameter
pop %rdi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
- .cfi_endproc
+ CFI_ENDPROC
.size sigsetjmp, .-sigsetjmp
.comm _ZN14__interception16real___sigsetjmpE,8,8
.globl __sigsetjmp
.type __sigsetjmp, @function
__sigsetjmp:
- .cfi_startproc
+ CFI_STARTPROC
// save env parameter
push %rdi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rdi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
// save savesigs parameter
push %rsi
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset %rsi, 0
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
// align stack frame
sub $8, %rsp
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
add $8, %rsp
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
// restore savesigs parameter
pop %rsi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
// restore env parameter
pop %rdi
- .cfi_adjust_cfa_offset -8
- .cfi_restore %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
- .cfi_endproc
+ CFI_ENDPROC
.size __sigsetjmp, .-__sigsetjmp
-#ifdef __linux__
+#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 409391678f013..ddf2b69bdbbcc 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -11,6 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_sync.h"
@@ -20,10 +23,51 @@
namespace __tsan {
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ virtual u32 Unwind() {
+ return CurrentStackId(thr, pc);
+ }
+ virtual int UniqueTid() {
+ return thr->unique_id;
+ }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (kGoMode)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr)) {
@@ -32,71 +76,92 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
+ if (kCppMode && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Global mutexes not marked as LINKER_INITIALIZED
// cause tons of not interesting reports, so just ignore it.
if (IsGlobalVar(addr))
return;
#endif
- SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
- if (s == 0)
- return;
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s == 0)
+ return;
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u32 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(s);
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace);
- FastState last(s->last_lock);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace);
+ FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(&trace);
- rep.AddLocation(s->addr, 1);
- OutputReport(ctx, rep);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+ }
+ if (unlock_locked) {
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s != 0) {
+ s->Reset(thr);
+ s->mtx.Unlock();
+ }
}
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
+ thr->mset.Remove(mid);
+ // s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else {
- Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
+ } else if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_double_lock = true;
}
if (s->recursion == 0) {
StatInc(thr, StatMutexLock);
@@ -107,30 +172,36 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
}
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
+ Callback cb(thr, pc);
+ if (!try_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
- if (s->recursion == 0) {
- if (!s->is_broken) {
+ bool report_bad_unlock = false;
+ if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
- }
- } else if (s->owner_tid != thr->tid) {
- if (!s->is_broken) {
- s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
} else {
rec = all ? s->recursion : 1;
@@ -144,56 +215,97 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
}
}
thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_lock = true;
+ }
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ if (!trylock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ }
+ u64 mid = s->GetId();
s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_unlock = true;
+ }
}
ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
- thr->mset.Del(s->GetId(), false);
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
+ bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
// Seems to be read unlock.
write = false;
@@ -216,30 +328,37 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
} else if (!s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
}
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -257,17 +376,16 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateClockCallback, thr);
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -276,11 +394,10 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
}
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -288,7 +405,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
@@ -303,8 +420,8 @@ void AfterSleep(ThreadState *thr, uptr pc) {
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateSleepClockCallback, thr);
}
#endif
@@ -312,37 +429,63 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.acquire(c);
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(c);
+ thr->clock.release(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(c);
+ thr->clock.ReleaseStore(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(c);
+ thr->clock.acq_rel(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 4fed43faf25f4..0481b23b7be01 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -30,15 +30,18 @@ namespace __tsan {
using namespace __sanitizer; // NOLINT
-static ReportStack *SymbolizeStack(const StackTrace& trace);
+static ReportStack *SymbolizeStack(StackTrace trace);
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- ScopedInRtl in_rtl;
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow();
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
Die();
}
@@ -53,108 +56,113 @@ bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
}
#endif
-static void StackStripMain(ReportStack *stack) {
- ReportStack *last_frame = 0;
- ReportStack *last_frame2 = 0;
- const char *prefix = "__interceptor_";
- uptr prefix_len = internal_strlen(prefix);
- const char *path_prefix = flags()->strip_path_prefix;
- uptr path_prefix_len = internal_strlen(path_prefix);
- char *pos;
- for (ReportStack *ent = stack; ent; ent = ent->next) {
- if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
- ent->func += prefix_len;
- if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
- ent->file = pos + path_prefix_len;
- if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
- ent->file += 2;
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
last_frame2 = last_frame;
- last_frame = ent;
+ last_frame = cur;
}
if (last_frame2 == 0)
return;
- const char *last = last_frame->func;
-#ifndef TSAN_GO
- const char *last2 = last_frame2->func;
+#ifndef SANITIZER_GO
+ const char *last = last_frame->info.function;
+ const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip our internal thread start routine.
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip global ctors init.
} else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// If both are 0, then we probably just failed to symbolize.
} else if (last || last2) {
// Ensure that we recovered stack completely. Trimmed stack
// can actually happen if we do not instrument some code,
// so it's only a debug print. However we must try hard to not miss it
// due to our fault.
- DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
+ DPrintf("Bottom stack frame of stack %zx is missed\n", stack->info.address);
}
#else
// The last frame always point into runtime (gosched0, goexit0, runtime.main).
- last_frame2->next = 0;
- (void)last;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
#endif
}
-static ReportStack *SymbolizeStack(const StackTrace& trace) {
- if (trace.IsEmpty())
+ReportStack *SymbolizeStackId(u32 stack_id) {
+ if (stack_id == 0)
return 0;
- ReportStack *stack = 0;
- for (uptr si = 0; si < trace.Size(); si++) {
- const uptr pc = trace.Get(si);
-#ifndef TSAN_GO
- // We obtain the return address, that is, address of the next instruction,
- // so offset it by 1 byte.
- const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
+}
+
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
+ return 0;
+ SymbolizedStack *top = nullptr;
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
+ uptr pc1 = pc;
+#ifndef SANITIZER_GO
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
#else
// FIXME(dvyukov): Go sometimes uses address of a function as top pc.
- uptr pc1 = pc;
- if (si != trace.Size() - 1)
+ if (si != trace.size - 1)
pc1 -= 1;
#endif
- ReportStack *ent = SymbolizeCode(pc1);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
CHECK_NE(ent, 0);
- ReportStack *last = ent;
+ SymbolizedStack *last = ent;
while (last->next) {
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last = last->next;
}
- last->pc = pc; // restore original pc for report
- last->next = stack;
- stack = ent;
+ last->info.address = pc; // restore original pc for report
+ last->next = top;
+ top = ent;
}
- StackStripMain(stack);
+ StackStripMain(top);
+
+ ReportStack *stack = ReportStack::New();
+ stack->frames = top;
return stack;
}
ScopedReport::ScopedReport(ReportType typ) {
- ctx_ = CTX();
- ctx_->thread_registry->CheckLocked();
+ ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
- ctx_->report_mtx.Lock();
+ ctx->report_mtx.Lock();
CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
CommonSanitizerReportMutex.Unlock();
- ctx_->report_mtx.Unlock();
+ ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(const StackTrace *stack) {
+void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
- *rs = SymbolizeStack(*stack);
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
- const StackTrace *stack, const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -163,30 +171,22 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->size = s.size();
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
- mop->stack = SymbolizeStack(*stack);
+ mop->stack = SymbolizeStack(stack);
+ if (mop->stack)
+ mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 uid = 0;
- uptr addr = SyncVar::SplitId(d.id, &uid);
- SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- ReportMopMutex mtx = {s->uid, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(s);
- } else {
- ReportMopMutex mtx = {d.id, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(d.id);
- }
- if (s)
- s->mtx.ReadUnlock();
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
}
}
-void ScopedReport::AddThread(const ThreadContext *tctx) {
+void ScopedReport::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
@@ -197,25 +197,16 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->id = tctx->tid;
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
- rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
+ rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
rt->stack = 0;
-#ifdef TSAN_GO
- rt->stack = SymbolizeStack(tctx->creation_stack);
-#else
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz);
- if (stack) {
- StackTrace trace;
- trace.Init(stack, ssz);
- rt->stack = SymbolizeStack(trace);
- }
-#endif
+ rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -228,7 +219,6 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) {
}
static ThreadContext *FindThreadByTidLocked(int tid) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -246,7 +236,6 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
@@ -260,6 +249,12 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
+void ScopedReport::AddThread(int unique_tid, bool suppressable) {
+#ifndef SANITIZER_GO
+ AddThread(FindThreadByUidLocked(unique_tid), suppressable);
+#endif
+}
+
void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
@@ -269,20 +264,31 @@ void ScopedReport::AddMutex(const SyncVar *s) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
+ rm->addr = s->addr;
rm->destroyed = false;
- rm->stack = 0;
-#ifndef TSAN_GO
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz);
- if (stack) {
- StackTrace trace;
- trace.Init(stack, ssz);
- rm->stack = SymbolizeStack(trace);
- }
-#endif
+ rm->stack = SymbolizeStackId(s->creation_stack_id);
}
-void ScopedReport::AddMutex(u64 id) {
+u64 ScopedReport::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.Unlock();
+ return mid;
+}
+
+void ScopedReport::AddDeadMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
@@ -291,6 +297,7 @@ void ScopedReport::AddMutex(u64 id) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = id;
+ rm->addr = 0;
rm->destroyed = true;
rm->stack = 0;
}
@@ -298,82 +305,59 @@ void ScopedReport::AddMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
- || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = ReportLocationFD;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
loc->fd = fd;
loc->tid = creat_tid;
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(creat_stack, &ssz);
- if (stack) {
- StackTrace trace;
- trace.Init(stack, ssz);
- loc->stack = SymbolizeStack(trace);
- }
+ loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
MBlock *b = 0;
- if (allocator()->PointerIsMine((void*)addr)
- && (b = user_mblock(0, (void*)addr))) {
- ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- loc->type = ReportLocationHeap;
- loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->Size();
- loc->tid = tctx ? tctx->tid : b->Tid();
- loc->name = 0;
- loc->file = 0;
- loc->line = 0;
- loc->stack = 0;
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(b->StackId(), &ssz);
- if (stack) {
- StackTrace trace;
- trace.Init(stack, ssz);
- loc->stack = SymbolizeStack(trace);
- }
if (tctx)
AddThread(tctx);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
AddThread(tctx);
}
- ReportLocation *loc = SymbolizeData(addr);
- if (loc) {
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(stack_id, &ssz);
- if (stack) {
- StackTrace trace;
- trace.Init(stack, ssz);
- rep_->sleep = SymbolizeStack(trace);
- }
+ rep_->sleep = SymbolizeStackId(stack_id);
}
#endif
@@ -385,11 +369,11 @@ const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -411,13 +395,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
InternalScopedBuffer<uptr> stack(kShadowStackSize);
- for (uptr i = 0; i < hdr->stack0.Size(); i++) {
- stack[i] = hdr->stack0.Get(i);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
}
if (mset)
*mset = hdr->mset0;
- uptr pos = hdr->stack0.Size();
+ uptr pos = hdr->stack0.size;
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
@@ -452,14 +436,13 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
stk->Init(stack.data(), pos);
}
-static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
bool equal_stack = false;
RacyStacks hash;
if (flags()->suppress_equal_stacks) {
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
if (hash == ctx->racy_stacks[i]) {
DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
@@ -492,13 +475,12 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
return false;
}
-static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
ctx->racy_stacks.PushBack(hash);
}
if (flags()->suppress_equal_addresses) {
@@ -507,25 +489,31 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
}
}
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1,
- const ReportStack *suppress_stack2,
- const ReportLocation *suppress_loc) {
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
const ReportDesc *rep = srep.GetReport();
Suppression *supp = 0;
- uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
+ uptr suppress_pc = 0;
+ for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
if (suppress_pc != 0) {
FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
ctx->fired_suppressions.push_back(s);
}
- if (OnReport(rep, suppress_pc != 0))
- return false;
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, suppress_pc != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed)
+ return false;
+ }
PrintReport(rep);
ctx->nreported++;
if (flags()->halt_on_error)
@@ -533,15 +521,14 @@ bool OutputReport(Context *ctx,
return true;
}
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace) {
+bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
+ StackTrace trace) {
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
continue;
- for (uptr j = 0; j < trace.Size(); j++) {
+ for (uptr j = 0; j < trace.size; j++) {
FiredSuppression *s = &ctx->fired_suppressions[k];
- if (trace.Get(j) == s->pc) {
+ if (trace.trace[j] == s->pc) {
if (s->supp)
s->supp->hit_count++;
return true;
@@ -567,48 +554,6 @@ static bool IsFiredSuppression(Context *ctx,
return false;
}
-bool FrameIsInternal(const ReportStack *frame) {
- return frame != 0 && frame->file != 0
- && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
- internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(frame->file, "tsan_interface_"));
-}
-
-// On programs that use Java we see weird reports like:
-// WARNING: ThreadSanitizer: data race (pid=22512)
-// Read of size 8 at 0x7d2b00084318 by thread 100:
-// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
-// #1 <null> <null>:0 (0x7f7ad9b40193)
-// Previous write of size 8 at 0x7d2b00084318 by thread 105:
-// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
-// #1 <null> <null>:0 (0x7f7ad9b42707)
-static bool IsJavaNonsense(const ReportDesc *rep) {
-#ifndef TSAN_GO
- for (uptr i = 0; i < rep->mops.Size(); i++) {
- ReportMop *mop = rep->mops[i];
- ReportStack *frame = mop->stack;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- return true;
- }
- if (FrameIsInternal(frame)) {
- frame = frame->next;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- if (frame) {
- FiredSuppression supp = {rep->typ, frame->pc, 0};
- CTX()->fired_suppressions.push_back(supp);
- }
- return true;
- }
- }
- }
-#endif
- return false;
-}
-
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
@@ -623,10 +568,14 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}
void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
if (!flags()->report_bugs)
return;
- ScopedInRtl in_rtl;
-
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
@@ -651,11 +600,12 @@ void ReportRace(ThreadState *thr) {
return;
}
- Context *ctx = CTX();
ThreadRegistryLock l0(ctx->thread_registry);
ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access)
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
@@ -663,9 +613,9 @@ void ReportRace(ThreadState *thr) {
if (IsFiredSuppression(ctx, rep, addr))
return;
const uptr kMop = 2;
- StackTrace traces[kMop];
+ VarSizeStackTrace traces[kMop];
const uptr toppc = TraceTopPC(thr);
- traces[0].ObtainCurrent(thr, toppc);
+ ObtainCurrentStack(thr, toppc, &traces[0]);
if (IsFiredSuppression(ctx, rep, traces[0]))
return;
InternalScopedBuffer<MutexSet> mset2(1);
@@ -680,13 +630,10 @@ void ReportRace(ThreadState *thr) {
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, &traces[i],
+ rep.AddMemoryAccess(addr, s, traces[i],
i == 0 ? &thr->mset : mset2.data());
}
- if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
- return;
-
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -698,7 +645,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -706,37 +653,40 @@ void ReportRace(ThreadState *thr) {
}
#endif
- ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
- rep.GetReport()->locs[0] : 0;
- if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
- rep.GetReport()->mops[1]->stack,
- suppress_loc))
+ if (!OutputReport(thr, rep))
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
PrintStack(SymbolizeStack(trace));
}
-void PrintCurrentStackSlow() {
-#ifndef TSAN_GO
- __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
- sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
- ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(),
- 0, 0, 0, false);
+void PrintCurrentStackSlow(uptr pc) {
+#ifndef SANITIZER_GO
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
- uptr tmp = ptrace->trace[i];
- ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
- ptrace->trace[ptrace->size - i - 1] = tmp;
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
- StackTrace trace;
- trace.Init(ptrace->trace, ptrace->size);
- PrintStack(SymbolizeStack(trace));
+ PrintStack(SymbolizeStack(*ptrace));
#endif
}
} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 4e451b042947b..7b7b27c024f61 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,19 +30,19 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
void ThreadContext::OnDead() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
}
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset();
+ sync.Reset(&caller_thr->clock_cache);
}
struct OnCreatedArgs {
@@ -59,21 +59,22 @@ void ThreadContext::OnCreated(void *arg) {
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
-#ifdef TSAN_GO
- creation_stack.ObtainCurrent(args->thr, args->pc);
-#else
creation_stack_id = CurrentStackId(args->thr, args->pc);
-#endif
if (reuse_count == 0)
StatInc(args->thr, StatThreadMaxTid);
}
void ThreadContext::OnReset() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
//!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
}
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->clock_cache);
+}
+
struct OnStartedArgs {
ThreadState *thr;
uptr stk_addr;
@@ -89,9 +90,9 @@ void ThreadContext::OnStarted(void *arg) {
// from different threads.
epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, unique_id,
- epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef TSAN_GO
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#ifndef SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -103,9 +104,13 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadStart(thr);
#endif
+ if (common_flags()->detect_deadlocks) {
+ thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ }
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
thr->fast_state.SetHistorySize(flags()->history_size);
@@ -113,12 +118,11 @@ void ThreadContext::OnStarted(void *arg) {
Trace *thr_trace = ThreadTrace(thr->tid);
thr_trace->headers[trace].epoch0 = epoch0;
StatInc(thr, StatSyncAcquire);
- sync.Reset();
+ sync.Reset(&thr->clock_cache);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
tid, (uptr)epoch0, args->stk_addr, args->stk_size,
args->tls_addr, args->tls_size);
- thr->is_alive = true;
}
void ThreadContext::OnFinished() {
@@ -130,15 +134,21 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
-#ifndef TSAN_GO
+ if (common_flags()->detect_deadlocks) {
+ ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ }
+ ctx->clock_alloc.FlushCache(&thr->clock_cache);
+ ctx->metamap.OnThreadIdle(thr);
+#ifndef SANITIZER_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
- StatAggregate(CTX()->stat, thr->stat);
+ StatAggregate(ctx->stat, thr->stat);
thr = 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -160,48 +170,62 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-static void ThreadCheckIgnore(ThreadState *thr) {
- if (thr->ignore_reads_and_writes) {
- Printf("ThreadSanitizer: thread T%d finished with ignores enabled.\n",
- thr->tid);
+#ifndef SANITIZER_GO
+static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
+ if (tctx->tid == 0) {
+ Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
+ } else {
+ Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
+ " created at:\n", tctx->tid, tctx->name);
+ PrintStack(SymbolizeStackId(tctx->creation_stack_id));
}
- if (thr->ignore_sync) {
- Printf("ThreadSanitizer: thread T%d finished with sync ignores enabled.\n",
- thr->tid);
+ Printf(" One of the following ignores was not ended"
+ " (in order of probability)\n");
+ for (uptr i = 0; i < set->Size(); i++) {
+ Printf(" Ignore was enabled at:\n");
+ PrintStack(SymbolizeStackId(set->At(i)));
}
+ Die();
}
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
+ if (thr->ignore_reads_and_writes)
+ ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
+ if (thr->ignore_sync)
+ ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
+}
+#else
+static void ThreadCheckIgnore(ThreadState *thr) {}
+#endif
+
void ThreadFinalize(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
+ ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks(MBlockScopedBuf);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx);
+ rep.AddThread(leaks[i].tctx, true);
rep.SetCount(leaks[i].count);
- OutputReport(CTX(), rep);
+ OutputReport(thr, rep);
}
#endif
}
int ThreadCount(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
uptr result;
ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
return (int)result;
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- CHECK_GT(thr->in_rtl, 0);
StatInc(thr, StatThreadCreate);
- Context *ctx = CTX();
OnCreatedArgs args = { thr, pc };
int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
@@ -210,7 +234,6 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
}
void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
- CHECK_GT(thr->in_rtl, 0);
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -236,20 +259,31 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
}
}
+ ThreadRegistry *tr = ctx->thread_registry;
OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- CTX()->thread_registry->StartThread(tid, os_id, &args);
+ tr->StartThread(tid, os_id, &args);
+
+ tr->Lock();
+ thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
+ tr->Unlock();
+
+#ifndef SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
}
void ThreadFinish(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
- thr->is_alive = false;
- Context *ctx = CTX();
+ thr->is_dead = true;
ctx->thread_registry->FinishThread(thr->tid);
}
@@ -263,33 +297,26 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
}
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
return res;
}
void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- Context *ctx = CTX();
ctx->thread_registry->JoinThread(tid, thr);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
- Context *ctx = CTX();
- ctx->thread_registry->DetachThread(tid);
+ ctx->thread_registry->DetachThread(tid, thr);
}
void ThreadSetName(ThreadState *thr, const char *name) {
- CHECK_GT(thr->in_rtl, 0);
- CTX()->thread_registry->SetThreadName(thr->tid, name);
+ ctx->thread_registry->SetThreadName(thr->tid, name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
diff --git a/lib/tsan/rtl/tsan_stack_trace.cc b/lib/tsan/rtl/tsan_stack_trace.cc
new file mode 100644
index 0000000000000..ceca3f8e8738d
--- /dev/null
+++ b/lib/tsan/rtl/tsan_stack_trace.cc
@@ -0,0 +1,46 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
+ }
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stack_trace.h b/lib/tsan/rtl/tsan_stack_trace.h
new file mode 100644
index 0000000000000..5bf89bb7584f8
--- /dev/null
+++ b/lib/tsan/rtl/tsan_stack_trace.h
@@ -0,0 +1,39 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index f9dbf121cef4f..350a2ba482530 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -37,6 +37,7 @@ void StatOutput(u64 *stat) {
name[StatMop4] = " size 4 ";
name[StatMop8] = " size 8 ";
name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
name[StatMopRange] = " Including range ";
name[StatMopRodata] = " Including .rodata ";
name[StatMopRangeRodata] = " Including .rodata range ";
@@ -74,6 +75,28 @@ void StatOutput(u64 *stat) {
name[StatSyncAcquire] = " acquired ";
name[StatSyncRelease] = " released ";
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireLarge] = " contains my tid ";
+ name[StatClockAcquireRepeat] = " repeated (fast) ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast1] = " fast1 ";
+ name[StatClockReleaseFast2] = " fast2 ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
name[StatAtomic] = "Atomic operations ";
name[StatAtomicLoad] = " Including load ";
name[StatAtomicStore] = " store ";
@@ -98,334 +121,6 @@ void StatOutput(u64 *stat) {
name[StatAtomic8] = " size 8 ";
name[StatAtomic16] = " size 16 ";
- name[StatInterceptor] = "Interceptors ";
- name[StatInt_longjmp] = " longjmp ";
- name[StatInt_siglongjmp] = " siglongjmp ";
- name[StatInt_malloc] = " malloc ";
- name[StatInt___libc_memalign] = " __libc_memalign ";
- name[StatInt_calloc] = " calloc ";
- name[StatInt_realloc] = " realloc ";
- name[StatInt_free] = " free ";
- name[StatInt_cfree] = " cfree ";
- name[StatInt_malloc_usable_size] = " malloc_usable_size ";
- name[StatInt_mmap] = " mmap ";
- name[StatInt_mmap64] = " mmap64 ";
- name[StatInt_munmap] = " munmap ";
- name[StatInt_memalign] = " memalign ";
- name[StatInt_valloc] = " valloc ";
- name[StatInt_pvalloc] = " pvalloc ";
- name[StatInt_posix_memalign] = " posix_memalign ";
- name[StatInt__Znwm] = " _Znwm ";
- name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
- name[StatInt__Znam] = " _Znam ";
- name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
- name[StatInt__ZdlPv] = " _ZdlPv ";
- name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
- name[StatInt__ZdaPv] = " _ZdaPv ";
- name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
- name[StatInt_strlen] = " strlen ";
- name[StatInt_memset] = " memset ";
- name[StatInt_memcpy] = " memcpy ";
- name[StatInt_strcmp] = " strcmp ";
- name[StatInt_memchr] = " memchr ";
- name[StatInt_memrchr] = " memrchr ";
- name[StatInt_memmove] = " memmove ";
- name[StatInt_memcmp] = " memcmp ";
- name[StatInt_strchr] = " strchr ";
- name[StatInt_strchrnul] = " strchrnul ";
- name[StatInt_strrchr] = " strrchr ";
- name[StatInt_strncmp] = " strncmp ";
- name[StatInt_strcpy] = " strcpy ";
- name[StatInt_strncpy] = " strncpy ";
- name[StatInt_strstr] = " strstr ";
- name[StatInt_strdup] = " strdup ";
- name[StatInt_strcasecmp] = " strcasecmp ";
- name[StatInt_strncasecmp] = " strncasecmp ";
- name[StatInt_atexit] = " atexit ";
- name[StatInt__exit] = " _exit ";
- name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
- name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
- name[StatInt___cxa_guard_abort] = " __cxa_guard_abort ";
- name[StatInt_pthread_create] = " pthread_create ";
- name[StatInt_pthread_join] = " pthread_join ";
- name[StatInt_pthread_detach] = " pthread_detach ";
- name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
- name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
- name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
- name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
- name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
- name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
- name[StatInt_pthread_spin_init] = " pthread_spin_init ";
- name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
- name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
- name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
- name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
- name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
- name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
- name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
- name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
- name[StatInt_pthread_rwlock_timedrdlock]
- = " pthread_rwlock_timedrdlock ";
- name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
- name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
- name[StatInt_pthread_rwlock_timedwrlock]
- = " pthread_rwlock_timedwrlock ";
- name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
- name[StatInt_pthread_cond_init] = " pthread_cond_init ";
- name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
- name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
- name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
- name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
- name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
- name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
- name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
- name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
- name[StatInt_pthread_once] = " pthread_once ";
- name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
- name[StatInt_pthread_setname_np] = " pthread_setname_np ";
- name[StatInt_sem_init] = " sem_init ";
- name[StatInt_sem_destroy] = " sem_destroy ";
- name[StatInt_sem_wait] = " sem_wait ";
- name[StatInt_sem_trywait] = " sem_trywait ";
- name[StatInt_sem_timedwait] = " sem_timedwait ";
- name[StatInt_sem_post] = " sem_post ";
- name[StatInt_sem_getvalue] = " sem_getvalue ";
- name[StatInt_stat] = " stat ";
- name[StatInt___xstat] = " __xstat ";
- name[StatInt_stat64] = " stat64 ";
- name[StatInt___xstat64] = " __xstat64 ";
- name[StatInt_lstat] = " lstat ";
- name[StatInt___lxstat] = " __lxstat ";
- name[StatInt_lstat64] = " lstat64 ";
- name[StatInt___lxstat64] = " __lxstat64 ";
- name[StatInt_fstat] = " fstat ";
- name[StatInt___fxstat] = " __fxstat ";
- name[StatInt_fstat64] = " fstat64 ";
- name[StatInt___fxstat64] = " __fxstat64 ";
- name[StatInt_open] = " open ";
- name[StatInt_open64] = " open64 ";
- name[StatInt_creat] = " creat ";
- name[StatInt_creat64] = " creat64 ";
- name[StatInt_dup] = " dup ";
- name[StatInt_dup2] = " dup2 ";
- name[StatInt_dup3] = " dup3 ";
- name[StatInt_eventfd] = " eventfd ";
- name[StatInt_signalfd] = " signalfd ";
- name[StatInt_inotify_init] = " inotify_init ";
- name[StatInt_inotify_init1] = " inotify_init1 ";
- name[StatInt_socket] = " socket ";
- name[StatInt_socketpair] = " socketpair ";
- name[StatInt_connect] = " connect ";
- name[StatInt_bind] = " bind ";
- name[StatInt_listen] = " listen ";
- name[StatInt_accept] = " accept ";
- name[StatInt_accept4] = " accept4 ";
- name[StatInt_epoll_create] = " epoll_create ";
- name[StatInt_epoll_create1] = " epoll_create1 ";
- name[StatInt_close] = " close ";
- name[StatInt___close] = " __close ";
- name[StatInt___res_iclose] = " __res_iclose ";
- name[StatInt_pipe] = " pipe ";
- name[StatInt_pipe2] = " pipe2 ";
- name[StatInt_read] = " read ";
- name[StatInt_prctl] = " prctl ";
- name[StatInt_pread] = " pread ";
- name[StatInt_pread64] = " pread64 ";
- name[StatInt_readv] = " readv ";
- name[StatInt_preadv] = " preadv ";
- name[StatInt_preadv64] = " preadv64 ";
- name[StatInt_write] = " write ";
- name[StatInt_pwrite] = " pwrite ";
- name[StatInt_pwrite64] = " pwrite64 ";
- name[StatInt_writev] = " writev ";
- name[StatInt_pwritev] = " pwritev ";
- name[StatInt_pwritev64] = " pwritev64 ";
- name[StatInt_send] = " send ";
- name[StatInt_sendmsg] = " sendmsg ";
- name[StatInt_recv] = " recv ";
- name[StatInt_recvmsg] = " recvmsg ";
- name[StatInt_unlink] = " unlink ";
- name[StatInt_fopen] = " fopen ";
- name[StatInt_freopen] = " freopen ";
- name[StatInt_fclose] = " fclose ";
- name[StatInt_fread] = " fread ";
- name[StatInt_fwrite] = " fwrite ";
- name[StatInt_fflush] = " fflush ";
- name[StatInt_abort] = " abort ";
- name[StatInt_puts] = " puts ";
- name[StatInt_rmdir] = " rmdir ";
- name[StatInt_opendir] = " opendir ";
- name[StatInt_epoll_ctl] = " epoll_ctl ";
- name[StatInt_epoll_wait] = " epoll_wait ";
- name[StatInt_poll] = " poll ";
- name[StatInt_ppoll] = " ppoll ";
- name[StatInt_sigaction] = " sigaction ";
- name[StatInt_signal] = " signal ";
- name[StatInt_sigsuspend] = " sigsuspend ";
- name[StatInt_raise] = " raise ";
- name[StatInt_kill] = " kill ";
- name[StatInt_pthread_kill] = " pthread_kill ";
- name[StatInt_sleep] = " sleep ";
- name[StatInt_usleep] = " usleep ";
- name[StatInt_nanosleep] = " nanosleep ";
- name[StatInt_gettimeofday] = " gettimeofday ";
- name[StatInt_fork] = " fork ";
- name[StatInt_vscanf] = " vscanf ";
- name[StatInt_vsscanf] = " vsscanf ";
- name[StatInt_vfscanf] = " vfscanf ";
- name[StatInt_scanf] = " scanf ";
- name[StatInt_sscanf] = " sscanf ";
- name[StatInt_fscanf] = " fscanf ";
- name[StatInt___isoc99_vscanf] = " vscanf ";
- name[StatInt___isoc99_vsscanf] = " vsscanf ";
- name[StatInt___isoc99_vfscanf] = " vfscanf ";
- name[StatInt___isoc99_scanf] = " scanf ";
- name[StatInt___isoc99_sscanf] = " sscanf ";
- name[StatInt___isoc99_fscanf] = " fscanf ";
- name[StatInt_on_exit] = " on_exit ";
- name[StatInt___cxa_atexit] = " __cxa_atexit ";
- name[StatInt_localtime] = " localtime ";
- name[StatInt_localtime_r] = " localtime_r ";
- name[StatInt_gmtime] = " gmtime ";
- name[StatInt_gmtime_r] = " gmtime_r ";
- name[StatInt_ctime] = " ctime ";
- name[StatInt_ctime_r] = " ctime_r ";
- name[StatInt_asctime] = " asctime ";
- name[StatInt_asctime_r] = " asctime_r ";
- name[StatInt_strptime] = " strptime ";
- name[StatInt_frexp] = " frexp ";
- name[StatInt_frexpf] = " frexpf ";
- name[StatInt_frexpl] = " frexpl ";
- name[StatInt_getpwnam] = " getpwnam ";
- name[StatInt_getpwuid] = " getpwuid ";
- name[StatInt_getgrnam] = " getgrnam ";
- name[StatInt_getgrgid] = " getgrgid ";
- name[StatInt_getpwnam_r] = " getpwnam_r ";
- name[StatInt_getpwuid_r] = " getpwuid_r ";
- name[StatInt_getgrnam_r] = " getgrnam_r ";
- name[StatInt_getgrgid_r] = " getgrgid_r ";
- name[StatInt_clock_getres] = " clock_getres ";
- name[StatInt_clock_gettime] = " clock_gettime ";
- name[StatInt_clock_settime] = " clock_settime ";
- name[StatInt_getitimer] = " getitimer ";
- name[StatInt_setitimer] = " setitimer ";
- name[StatInt_time] = " time ";
- name[StatInt_glob] = " glob ";
- name[StatInt_glob64] = " glob64 ";
- name[StatInt_wait] = " wait ";
- name[StatInt_waitid] = " waitid ";
- name[StatInt_waitpid] = " waitpid ";
- name[StatInt_wait3] = " wait3 ";
- name[StatInt_wait4] = " wait4 ";
- name[StatInt_inet_ntop] = " inet_ntop ";
- name[StatInt_inet_pton] = " inet_pton ";
- name[StatInt_inet_aton] = " inet_aton ";
- name[StatInt_getaddrinfo] = " getaddrinfo ";
- name[StatInt_getnameinfo] = " getnameinfo ";
- name[StatInt_getsockname] = " getsockname ";
- name[StatInt_gethostent] = " gethostent ";
- name[StatInt_gethostbyname] = " gethostbyname ";
- name[StatInt_gethostbyname2] = " gethostbyname2 ";
- name[StatInt_gethostbyaddr] = " gethostbyaddr ";
- name[StatInt_gethostent_r] = " gethostent_r ";
- name[StatInt_gethostbyname_r] = " gethostbyname_r ";
- name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
- name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
- name[StatInt_getsockopt] = " getsockopt ";
- name[StatInt_modf] = " modf ";
- name[StatInt_modff] = " modff ";
- name[StatInt_modfl] = " modfl ";
- name[StatInt_getpeername] = " getpeername ";
- name[StatInt_ioctl] = " ioctl ";
- name[StatInt_sysinfo] = " sysinfo ";
- name[StatInt_readdir] = " readdir ";
- name[StatInt_readdir64] = " readdir64 ";
- name[StatInt_readdir_r] = " readdir_r ";
- name[StatInt_readdir64_r] = " readdir64_r ";
- name[StatInt_ptrace] = " ptrace ";
- name[StatInt_setlocale] = " setlocale ";
- name[StatInt_getcwd] = " getcwd ";
- name[StatInt_get_current_dir_name] = " get_current_dir_name ";
- name[StatInt_strtoimax] = " strtoimax ";
- name[StatInt_strtoumax] = " strtoumax ";
- name[StatInt_mbstowcs] = " mbstowcs ";
- name[StatInt_mbsrtowcs] = " mbsrtowcs ";
- name[StatInt_mbsnrtowcs] = " mbsnrtowcs ";
- name[StatInt_wcstombs] = " wcstombs ";
- name[StatInt_wcsrtombs] = " wcsrtombs ";
- name[StatInt_wcsnrtombs] = " wcsnrtombs ";
- name[StatInt_tcgetattr] = " tcgetattr ";
- name[StatInt_realpath] = " realpath ";
- name[StatInt_canonicalize_file_name] = " canonicalize_file_name ";
- name[StatInt_confstr] = " confstr ";
- name[StatInt_sched_getaffinity] = " sched_getaffinity ";
- name[StatInt_strerror] = " strerror ";
- name[StatInt_strerror_r] = " strerror_r ";
- name[StatInt_scandir] = " scandir ";
- name[StatInt_scandir64] = " scandir64 ";
- name[StatInt_getgroups] = " getgroups ";
- name[StatInt_wordexp] = " wordexp ";
- name[StatInt_sigwait] = " sigwait ";
- name[StatInt_sigwaitinfo] = " sigwaitinfo ";
- name[StatInt_sigtimedwait] = " sigtimedwait ";
- name[StatInt_sigemptyset] = " sigemptyset ";
- name[StatInt_sigfillset] = " sigfillset ";
- name[StatInt_sigpending] = " sigpending ";
- name[StatInt_sigprocmask] = " sigprocmask ";
- name[StatInt_backtrace] = " backtrace ";
- name[StatInt_backtrace_symbols] = " backtrace_symbols ";
- name[StatInt_dlopen] = " dlopen ";
- name[StatInt_dlclose] = " dlclose ";
- name[StatInt_getmntent] = " getmntent ";
- name[StatInt_getmntent_r] = " getmntent_r ";
- name[StatInt_statfs] = " statfs ";
- name[StatInt_statfs64] = " statfs64 ";
- name[StatInt_fstatfs] = " fstatfs ";
- name[StatInt_fstatfs64] = " fstatfs64 ";
- name[StatInt_statvfs] = " statvfs ";
- name[StatInt_statvfs64] = " statvfs64 ";
- name[StatInt_fstatvfs] = " fstatvfs ";
- name[StatInt_fstatvfs64] = " fstatvfs64 ";
- name[StatInt_initgroups] = " initgroups ";
- name[StatInt_ether_ntoa] = " ether_ntoa ";
- name[StatInt_ether_aton] = " ether_aton ";
- name[StatInt_ether_ntoa_r] = " ether_ntoa_r ";
- name[StatInt_ether_aton_r] = " ether_aton_r ";
- name[StatInt_ether_ntohost] = " ether_ntohost ";
- name[StatInt_ether_hostton] = " ether_hostton ";
- name[StatInt_ether_line] = " ether_line ";
- name[StatInt_shmctl] = " shmctl ";
- name[StatInt_random_r] = " random_r ";
- name[StatInt_tmpnam] = " tmpnam ";
- name[StatInt_tmpnam_r] = " tmpnam_r ";
- name[StatInt_tempnam] = " tempnam ";
- name[StatInt_sincos] = " sincos ";
- name[StatInt_sincosf] = " sincosf ";
- name[StatInt_sincosl] = " sincosl ";
- name[StatInt_remquo] = " remquo ";
- name[StatInt_remquof] = " remquof ";
- name[StatInt_remquol] = " remquol ";
- name[StatInt_lgamma] = " lgamma ";
- name[StatInt_lgammaf] = " lgammaf ";
- name[StatInt_lgammal] = " lgammal ";
- name[StatInt_lgamma_r] = " lgamma_r ";
- name[StatInt_lgammaf_r] = " lgammaf_r ";
- name[StatInt_lgammal_r] = " lgammal_r ";
- name[StatInt_drand48_r] = " drand48_r ";
- name[StatInt_lrand48_r] = " lrand48_r ";
- name[StatInt_getline] = " getline ";
- name[StatInt_getdelim] = " getdelim ";
-
- name[StatInt_pthread_attr_getdetachstate] = " pthread_addr_getdetachstate "; // NOLINT
- name[StatInt_pthread_attr_getguardsize] = " pthread_addr_getguardsize "; // NOLINT
- name[StatInt_pthread_attr_getschedparam] = " pthread_addr_getschedparam "; // NOLINT
- name[StatInt_pthread_attr_getschedpolicy] = " pthread_addr_getschedpolicy "; // NOLINT
- name[StatInt_pthread_attr_getinheritsched] = " pthread_addr_getinheritsched "; // NOLINT
- name[StatInt_pthread_attr_getscope] = " pthread_addr_getscope "; // NOLINT
- name[StatInt_pthread_attr_getstacksize] = " pthread_addr_getstacksize "; // NOLINT
- name[StatInt_pthread_attr_getstack] = " pthread_addr_getstack "; // NOLINT
- name[StatInt_pthread_attr_getaffinity_np] = " pthread_addr_getaffinity_np "; // NOLINT
-
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
name[StatAnnotateHappensAfter] = " HappensAfter ";
@@ -473,11 +168,12 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
- Printf("%s: %zu\n", name[i], (uptr)stat[i]);
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
index a44edfcd65c6f..0bd949ed15633 100644
--- a/lib/tsan/rtl/tsan_stat.h
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -26,6 +26,7 @@ enum StatType {
StatMop4,
StatMop8,
StatMopSame,
+ StatMopIgnored,
StatMopRange,
StatMopRodata,
StatMopRangeRodata,
@@ -69,6 +70,32 @@ enum StatType {
StatSyncAcquire,
StatSyncRelease,
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireLarge,
+ StatClockAcquireRepeat,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast1,
+ StatClockReleaseFast2,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
// Atomics.
StatAtomic,
StatAtomicLoad,
@@ -94,333 +121,6 @@ enum StatType {
StatAtomic8,
StatAtomic16,
- // Interceptors.
- StatInterceptor,
- StatInt_longjmp,
- StatInt_siglongjmp,
- StatInt_malloc,
- StatInt___libc_memalign,
- StatInt_calloc,
- StatInt_realloc,
- StatInt_free,
- StatInt_cfree,
- StatInt_malloc_usable_size,
- StatInt_mmap,
- StatInt_mmap64,
- StatInt_munmap,
- StatInt_memalign,
- StatInt_valloc,
- StatInt_pvalloc,
- StatInt_posix_memalign,
- StatInt__Znwm,
- StatInt__ZnwmRKSt9nothrow_t,
- StatInt__Znam,
- StatInt__ZnamRKSt9nothrow_t,
- StatInt__ZdlPv,
- StatInt__ZdlPvRKSt9nothrow_t,
- StatInt__ZdaPv,
- StatInt__ZdaPvRKSt9nothrow_t,
- StatInt_strlen,
- StatInt_memset,
- StatInt_memcpy,
- StatInt_strcmp,
- StatInt_memchr,
- StatInt_memrchr,
- StatInt_memmove,
- StatInt_memcmp,
- StatInt_strchr,
- StatInt_strchrnul,
- StatInt_strrchr,
- StatInt_strncmp,
- StatInt_strcpy,
- StatInt_strncpy,
- StatInt_strcasecmp,
- StatInt_strncasecmp,
- StatInt_strstr,
- StatInt_strdup,
- StatInt_atexit,
- StatInt__exit,
- StatInt___cxa_guard_acquire,
- StatInt___cxa_guard_release,
- StatInt___cxa_guard_abort,
- StatInt_pthread_create,
- StatInt_pthread_join,
- StatInt_pthread_detach,
- StatInt_pthread_mutex_init,
- StatInt_pthread_mutex_destroy,
- StatInt_pthread_mutex_lock,
- StatInt_pthread_mutex_trylock,
- StatInt_pthread_mutex_timedlock,
- StatInt_pthread_mutex_unlock,
- StatInt_pthread_spin_init,
- StatInt_pthread_spin_destroy,
- StatInt_pthread_spin_lock,
- StatInt_pthread_spin_trylock,
- StatInt_pthread_spin_unlock,
- StatInt_pthread_rwlock_init,
- StatInt_pthread_rwlock_destroy,
- StatInt_pthread_rwlock_rdlock,
- StatInt_pthread_rwlock_tryrdlock,
- StatInt_pthread_rwlock_timedrdlock,
- StatInt_pthread_rwlock_wrlock,
- StatInt_pthread_rwlock_trywrlock,
- StatInt_pthread_rwlock_timedwrlock,
- StatInt_pthread_rwlock_unlock,
- StatInt_pthread_cond_init,
- StatInt_pthread_cond_destroy,
- StatInt_pthread_cond_signal,
- StatInt_pthread_cond_broadcast,
- StatInt_pthread_cond_wait,
- StatInt_pthread_cond_timedwait,
- StatInt_pthread_barrier_init,
- StatInt_pthread_barrier_destroy,
- StatInt_pthread_barrier_wait,
- StatInt_pthread_once,
- StatInt_pthread_getschedparam,
- StatInt_pthread_setname_np,
- StatInt_sem_init,
- StatInt_sem_destroy,
- StatInt_sem_wait,
- StatInt_sem_trywait,
- StatInt_sem_timedwait,
- StatInt_sem_post,
- StatInt_sem_getvalue,
- StatInt_stat,
- StatInt___xstat,
- StatInt_stat64,
- StatInt___xstat64,
- StatInt_lstat,
- StatInt___lxstat,
- StatInt_lstat64,
- StatInt___lxstat64,
- StatInt_fstat,
- StatInt___fxstat,
- StatInt_fstat64,
- StatInt___fxstat64,
- StatInt_open,
- StatInt_open64,
- StatInt_creat,
- StatInt_creat64,
- StatInt_dup,
- StatInt_dup2,
- StatInt_dup3,
- StatInt_eventfd,
- StatInt_signalfd,
- StatInt_inotify_init,
- StatInt_inotify_init1,
- StatInt_socket,
- StatInt_socketpair,
- StatInt_connect,
- StatInt_bind,
- StatInt_listen,
- StatInt_accept,
- StatInt_accept4,
- StatInt_epoll_create,
- StatInt_epoll_create1,
- StatInt_close,
- StatInt___close,
- StatInt___res_iclose,
- StatInt_pipe,
- StatInt_pipe2,
- StatInt_read,
- StatInt_prctl,
- StatInt_pread,
- StatInt_pread64,
- StatInt_readv,
- StatInt_preadv,
- StatInt_preadv64,
- StatInt_write,
- StatInt_pwrite,
- StatInt_pwrite64,
- StatInt_writev,
- StatInt_pwritev,
- StatInt_pwritev64,
- StatInt_send,
- StatInt_sendmsg,
- StatInt_recv,
- StatInt_recvmsg,
- StatInt_unlink,
- StatInt_fopen,
- StatInt_freopen,
- StatInt_fclose,
- StatInt_fread,
- StatInt_fwrite,
- StatInt_fflush,
- StatInt_abort,
- StatInt_puts,
- StatInt_rmdir,
- StatInt_opendir,
- StatInt_epoll_ctl,
- StatInt_epoll_wait,
- StatInt_poll,
- StatInt_ppoll,
- StatInt_sigaction,
- StatInt_signal,
- StatInt_sigsuspend,
- StatInt_raise,
- StatInt_kill,
- StatInt_pthread_kill,
- StatInt_sleep,
- StatInt_usleep,
- StatInt_nanosleep,
- StatInt_gettimeofday,
- StatInt_fork,
- StatInt_vscanf,
- StatInt_vsscanf,
- StatInt_vfscanf,
- StatInt_scanf,
- StatInt_sscanf,
- StatInt_fscanf,
- StatInt___isoc99_vscanf,
- StatInt___isoc99_vsscanf,
- StatInt___isoc99_vfscanf,
- StatInt___isoc99_scanf,
- StatInt___isoc99_sscanf,
- StatInt___isoc99_fscanf,
- StatInt_on_exit,
- StatInt___cxa_atexit,
- StatInt_localtime,
- StatInt_localtime_r,
- StatInt_gmtime,
- StatInt_gmtime_r,
- StatInt_ctime,
- StatInt_ctime_r,
- StatInt_asctime,
- StatInt_asctime_r,
- StatInt_strptime,
- StatInt_frexp,
- StatInt_frexpf,
- StatInt_frexpl,
- StatInt_getpwnam,
- StatInt_getpwuid,
- StatInt_getgrnam,
- StatInt_getgrgid,
- StatInt_getpwnam_r,
- StatInt_getpwuid_r,
- StatInt_getgrnam_r,
- StatInt_getgrgid_r,
- StatInt_clock_getres,
- StatInt_clock_gettime,
- StatInt_clock_settime,
- StatInt_getitimer,
- StatInt_setitimer,
- StatInt_time,
- StatInt_glob,
- StatInt_glob64,
- StatInt_wait,
- StatInt_waitid,
- StatInt_waitpid,
- StatInt_wait3,
- StatInt_wait4,
- StatInt_inet_ntop,
- StatInt_inet_pton,
- StatInt_inet_aton,
- StatInt_getaddrinfo,
- StatInt_getnameinfo,
- StatInt_getsockname,
- StatInt_gethostent,
- StatInt_gethostbyname,
- StatInt_gethostbyname2,
- StatInt_gethostbyaddr,
- StatInt_gethostent_r,
- StatInt_gethostbyname_r,
- StatInt_gethostbyname2_r,
- StatInt_gethostbyaddr_r,
- StatInt_getsockopt,
- StatInt_modf,
- StatInt_modff,
- StatInt_modfl,
- StatInt_getpeername,
- StatInt_ioctl,
- StatInt_sysinfo,
- StatInt_readdir,
- StatInt_readdir64,
- StatInt_readdir_r,
- StatInt_readdir64_r,
- StatInt_ptrace,
- StatInt_setlocale,
- StatInt_getcwd,
- StatInt_get_current_dir_name,
- StatInt_strtoimax,
- StatInt_strtoumax,
- StatInt_mbstowcs,
- StatInt_mbsrtowcs,
- StatInt_mbsnrtowcs,
- StatInt_wcstombs,
- StatInt_wcsrtombs,
- StatInt_wcsnrtombs,
- StatInt_tcgetattr,
- StatInt_realpath,
- StatInt_canonicalize_file_name,
- StatInt_confstr,
- StatInt_sched_getaffinity,
- StatInt_strerror,
- StatInt_strerror_r,
- StatInt_scandir,
- StatInt_scandir64,
- StatInt_getgroups,
- StatInt_wordexp,
- StatInt_sigwait,
- StatInt_sigwaitinfo,
- StatInt_sigtimedwait,
- StatInt_sigemptyset,
- StatInt_sigfillset,
- StatInt_sigpending,
- StatInt_sigprocmask,
- StatInt_backtrace,
- StatInt_backtrace_symbols,
- StatInt_dlopen,
- StatInt_dlclose,
- StatInt_getmntent,
- StatInt_getmntent_r,
- StatInt_statfs,
- StatInt_statfs64,
- StatInt_fstatfs,
- StatInt_fstatfs64,
- StatInt_statvfs,
- StatInt_statvfs64,
- StatInt_fstatvfs,
- StatInt_fstatvfs64,
- StatInt_initgroups,
- StatInt_ether_ntoa,
- StatInt_ether_aton,
- StatInt_ether_ntoa_r,
- StatInt_ether_aton_r,
- StatInt_ether_ntohost,
- StatInt_ether_hostton,
- StatInt_ether_line,
- StatInt_shmctl,
- StatInt_random_r,
- StatInt_tmpnam,
- StatInt_tmpnam_r,
- StatInt_tempnam,
- StatInt_sincos,
- StatInt_sincosf,
- StatInt_sincosl,
- StatInt_remquo,
- StatInt_remquof,
- StatInt_remquol,
- StatInt_lgamma,
- StatInt_lgammaf,
- StatInt_lgammal,
- StatInt_lgamma_r,
- StatInt_lgammaf_r,
- StatInt_lgammal_r,
- StatInt_drand48_r,
- StatInt_lrand48_r,
- StatInt_getline,
- StatInt_getdelim,
-
- StatInt_pthread_attr_getdetachstate,
- StatInt_pthread_attr_getguardsize,
- StatInt_pthread_attr_getschedparam,
- StatInt_pthread_attr_getschedpolicy,
- StatInt_pthread_attr_getinheritsched,
- StatInt_pthread_attr_getscope,
- StatInt_pthread_attr_getstacksize,
- StatInt_pthread_attr_getstack,
- StatInt_pthread_attr_getaffinity_np,
-
// Dynamic annotations.
StatAnnotation,
StatAnnotateHappensBefore,
@@ -470,6 +170,7 @@ enum StatType {
StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
+ StatMtxDeadlockDetector,
StatMtxFD,
// This must be the last.
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index 4b0ac04c48a6f..299fc80fd262e 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -33,7 +33,7 @@ static const char *const std_suppressions =
"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
// Can be overriden in frontend.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" const char *WEAK __tsan_default_suppressions() {
return 0;
}
@@ -41,55 +41,16 @@ extern "C" const char *WEAK __tsan_default_suppressions() {
namespace __tsan {
-static SuppressionContext* g_ctx;
-
-static char *ReadFile(const char *filename) {
- if (filename == 0 || filename[0] == 0)
- return 0;
- InternalScopedBuffer<char> tmp(4*1024);
- if (filename[0] == '/' || GetPwd() == 0)
- internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
- else
- internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
- uptr openrv = OpenFile(tmp.data(), false);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- fd_t fd = openrv;
- const uptr fsize = internal_filesize(fd);
- if (fsize == (uptr)-1) {
- Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
- if (fsize != internal_read(fd, buf, fsize)) {
- Printf("ThreadSanitizer: failed to read suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- internal_close(fd);
- buf[fsize] = 0;
- return buf;
-}
+static bool suppressions_inited = false;
void InitializeSuppressions() {
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- g_ctx = new(placeholder_) SuppressionContext;
- const char *supp = ReadFile(flags()->suppressions);
- g_ctx->Parse(supp);
-#ifndef TSAN_GO
- supp = __tsan_default_suppressions();
- g_ctx->Parse(supp);
- g_ctx->Parse(std_suppressions);
+ CHECK(!suppressions_inited);
+ SuppressionContext::InitIfNecessary();
+#ifndef SANITIZER_GO
+ SuppressionContext::Get()->Parse(__tsan_default_suppressions());
+ SuppressionContext::Get()->Parse(std_suppressions);
#endif
-}
-
-SuppressionContext *GetSuppressionContext() {
- CHECK_NE(g_ctx, 0);
- return g_ctx;
+ suppressions_inited = true;
}
SuppressionType conv(ReportType typ) {
@@ -99,62 +60,75 @@ SuppressionType conv(ReportType typ) {
return SuppressionRace;
else if (typ == ReportTypeUseAfterFree)
return SuppressionRace;
+ else if (typ == ReportTypeVptrUseAfterFree)
+ return SuppressionRace;
else if (typ == ReportTypeThreadLeak)
return SuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
return SuppressionMutex;
+ else if (typ == ReportTypeMutexDoubleLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadUnlock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadLock)
+ return SuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadUnlock)
+ return SuppressionMutex;
else if (typ == ReportTypeSignalUnsafe)
return SuppressionSignal;
else if (typ == ReportTypeErrnoInSignal)
return SuppressionNone;
+ else if (typ == ReportTypeDeadlock)
+ return SuppressionDeadlock;
Printf("ThreadSanitizer: unknown report type %d\n", typ),
Die();
}
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || stack == 0) return 0;
+ if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
- for (const ReportStack *frame = stack; frame; frame = frame->next) {
- if (g_ctx->Match(frame->func, stype, &s) ||
- g_ctx->Match(frame->file, stype, &s) ||
- g_ctx->Match(frame->module, stype, &s)) {
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
+ const AddressInfo &info = frame->info;
+ if (SuppressionContext::Get()->Match(info.function, stype, &s) ||
+ SuppressionContext::Get()->Match(info.file, stype, &s) ||
+ SuppressionContext::Get()->Match(info.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return frame->pc;
+ return info.address;
}
}
return 0;
}
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || loc == 0 ||
- loc->type != ReportLocationGlobal)
+ if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
- if (g_ctx->Match(loc->name, stype, &s) ||
- g_ctx->Match(loc->file, stype, &s) ||
- g_ctx->Match(loc->module, stype, &s)) {
+ const DataInfo &global = loc->global;
+ if (SuppressionContext::Get()->Match(global.name, stype, &s) ||
+ SuppressionContext::Get()->Match(global.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
- return loc->addr;
+ return global.start;
}
return 0;
}
void PrintMatchedSuppressions() {
- CHECK(g_ctx);
InternalMmapVector<Suppression *> matched(1);
- g_ctx->GetMatched(&matched);
+ SuppressionContext::Get()->GetMatched(&matched);
if (!matched.size())
return;
int hit_count = 0;
diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h
index fe7db588f50f6..c618b3db4c2da 100644
--- a/lib/tsan/rtl/tsan_suppressions.h
+++ b/lib/tsan/rtl/tsan_suppressions.h
@@ -22,7 +22,6 @@ void InitializeSuppressions();
void PrintMatchedSuppressions();
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
-SuppressionContext *GetSuppressionContext();
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc
index 75acf1d8438b7..3beb44f7ad3b8 100644
--- a/lib/tsan/rtl/tsan_symbolize.cc
+++ b/lib/tsan/rtl/tsan_symbolize.cc
@@ -26,50 +26,16 @@ void EnterSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(!thr->in_symbolizer);
thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
}
void ExitSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(thr->in_symbolizer);
thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
}
-ReportStack *NewReportStackEntry(uptr addr) {
- ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
- sizeof(ReportStack));
- internal_memset(ent, 0, sizeof(*ent));
- ent->pc = addr;
- return ent;
-}
-
-static ReportStack *NewReportStackEntry(const AddressInfo &info) {
- ReportStack *ent = NewReportStackEntry(info.address);
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.function)
- ent->func = internal_strdup(info.function);
- if (info.file)
- ent->file = internal_strdup(info.file);
- ent->line = info.line;
- ent->col = info.column;
- return ent;
-}
-
-
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
-
-
-// Denotes fake PC values that come from JIT/JAVA/etc.
-// For such PC values __tsan_symbolize_external() will be called.
-const uptr kExternalPCBit = 1ULL << 60;
-
// May be overriden by JIT/JAVA/etc,
// whatever produces PCs marked with kExternalPCBit.
extern "C" bool __tsan_symbolize_external(uptr pc,
@@ -85,7 +51,7 @@ bool __tsan_symbolize_external(uptr pc,
return false;
}
-ReportStack *SymbolizeCode(uptr addr) {
+SymbolizedStack *SymbolizeCode(uptr addr) {
// Check if PC comes from non-native land.
if (addr & kExternalPCBit) {
// Declare static to not consume too much stack space.
@@ -93,66 +59,30 @@ ReportStack *SymbolizeCode(uptr addr) {
static char func_buf[1024];
static char file_buf[1024];
int line, col;
- if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
- file_buf, sizeof(file_buf), &line, &col))
- return NewReportStackEntry(addr);
- ReportStack *ent = NewReportStackEntry(addr);
- ent->module = 0;
- ent->offset = 0;
- ent->func = internal_strdup(func_buf);
- ent->file = internal_strdup(file_buf);
- ent->line = line;
- ent->col = col;
- return ent;
- }
- if (!Symbolizer::Get()->IsAvailable())
- return SymbolizeCodeAddr2Line(addr);
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++)
- new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
- addr, addr_frames.data(), kMaxAddrFrames);
- if (addr_frames_num == 0)
- return NewReportStackEntry(addr);
- ReportStack *top = 0;
- ReportStack *bottom = 0;
- for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
- CHECK(cur_entry);
- addr_frames[i].Clear();
- if (i == 0)
- top = cur_entry;
- else
- bottom->next = cur_entry;
- bottom = cur_entry;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
}
- return top;
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
}
ReportLocation *SymbolizeData(uptr addr) {
- if (!Symbolizer::Get()->IsAvailable())
- return 0;
DataInfo info;
- if (!Symbolizer::Get()->SymbolizeData(addr, &info))
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
- sizeof(ReportLocation));
- internal_memset(ent, 0, sizeof(*ent));
- ent->type = ReportLocationGlobal;
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.name)
- ent->name = internal_strdup(info.name);
- ent->addr = info.start;
- ent->size = info.size;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ ent->global = info;
return ent;
}
void SymbolizeFlush() {
- if (!Symbolizer::Get()->IsAvailable())
- return;
- Symbolizer::Get()->Flush();
+ Symbolizer::GetOrInit()->Flush();
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h
index 6282e45b40d5b..b59b6cfb83418 100644
--- a/lib/tsan/rtl/tsan_symbolize.h
+++ b/lib/tsan/rtl/tsan_symbolize.h
@@ -18,14 +18,16 @@
namespace __tsan {
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external() will be called.
+const uptr kExternalPCBit = 1ULL << 60;
+
void EnterSymbolizer();
void ExitSymbolizer();
-ReportStack *SymbolizeCode(uptr addr);
+SymbolizedStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
void SymbolizeFlush();
-ReportStack *SymbolizeCodeAddr2Line(uptr addr);
-
ReportStack *NewReportStackEntry(uptr addr);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
deleted file mode 100644
index b186d3b38ec9c..0000000000000
--- a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_symbolize.h"
-#include "tsan_mman.h"
-#include "tsan_rtl.h"
-#include "tsan_platform.h"
-
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <link.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-
-namespace __tsan {
-
-struct ModuleDesc {
- const char *fullname;
- const char *name;
- uptr base;
- int inp_fd;
- int out_fd;
-};
-
-struct SectionDesc {
- SectionDesc *next;
- ModuleDesc *module;
- uptr base;
- uptr end;
-};
-
-struct DlIteratePhdrCtx {
- SectionDesc *sections;
- bool is_first;
-};
-
-static void NOINLINE InitModule(ModuleDesc *m) {
- int outfd[2] = {};
- if (pipe(&outfd[0])) {
- Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
- Die();
- }
- int infd[2] = {};
- if (pipe(&infd[0])) {
- Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
- Die();
- }
- int pid = fork();
- if (pid == 0) {
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
- _exit(0);
- } else if (pid < 0) {
- Printf("ThreadSanitizer: failed to fork symbolizer\n");
- Die();
- }
- internal_close(outfd[0]);
- internal_close(infd[1]);
- m->inp_fd = infd[0];
- m->out_fd = outfd[1];
-}
-
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
- InternalScopedBuffer<char> tmp(128);
- if (ctx->is_first) {
- internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
- (int)internal_getpid());
- info->dlpi_name = tmp.data();
- }
- ctx->is_first = false;
- if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
- return 0;
- ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
- sizeof(ModuleDesc));
- m->fullname = internal_strdup(info->dlpi_name);
- m->name = internal_strrchr(m->fullname, '/');
- if (m->name)
- m->name += 1;
- else
- m->name = m->fullname;
- m->base = (uptr)info->dlpi_addr;
- m->inp_fd = -1;
- m->out_fd = -1;
- DPrintf2("Module %s %zx\n", m->name, m->base);
- for (int i = 0; i < info->dlpi_phnum; i++) {
- const Elf64_Phdr *s = &info->dlpi_phdr[i];
- DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
- " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
- (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
- (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
- (uptr)s->p_flags, (uptr)s->p_align);
- if (s->p_type != PT_LOAD)
- continue;
- SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
- sizeof(SectionDesc));
- sec->module = m;
- sec->base = info->dlpi_addr + s->p_vaddr;
- sec->end = sec->base + s->p_memsz;
- sec->next = ctx->sections;
- ctx->sections = sec;
- DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
- }
- return 0;
-}
-
-static SectionDesc *InitSections() {
- DlIteratePhdrCtx ctx = {0, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
- return ctx.sections;
-}
-
-static SectionDesc *GetSectionDesc(uptr addr) {
- static SectionDesc *sections = 0;
- if (sections == 0)
- sections = InitSections();
- for (SectionDesc *s = sections; s; s = s->next) {
- if (addr >= s->base && addr < s->end) {
- if (s->module->inp_fd == -1)
- InitModule(s->module);
- return s;
- }
- }
- return 0;
-}
-
-ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
- SectionDesc *s = GetSectionDesc(addr);
- if (s == 0)
- return NewReportStackEntry(addr);
- ModuleDesc *m = s->module;
- uptr offset = addr - m->base;
- char addrstr[32];
- internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
- if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
- Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
- m->out_fd, errno);
- Die();
- }
- InternalScopedBuffer<char> func(1024);
- ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1);
- if (len <= 0) {
- Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
- m->inp_fd, errno);
- Die();
- }
- func.data()[len] = 0;
- ReportStack *res = NewReportStackEntry(addr);
- res->module = internal_strdup(m->name);
- res->offset = offset;
- char *pos = (char*)internal_strchr(func.data(), '\n');
- if (pos && func[0] != '?') {
- res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1);
- internal_memcpy(res->func, func.data(), pos - func.data());
- res->func[pos - func.data()] = 0;
- char *pos2 = (char*)internal_strchr(pos, ':');
- if (pos2) {
- res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
- internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
- res->file[pos2 - pos - 1] = 0;
- res->line = atoi(pos2 + 1);
- }
- }
- return res;
-}
-
-ReportStack *SymbolizeDataAddr2Line(uptr addr) {
- return 0;
-}
-
-} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index f8f3c40fab047..1041073bed510 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -17,290 +17,209 @@
namespace __tsan {
-SyncVar::SyncVar(uptr addr, u64 uid)
- : mtx(MutexTypeSyncVar, StatMtxSyncVar)
- , addr(addr)
- , uid(uid)
- , owner_tid(kInvalidTid)
- , last_lock()
- , recursion()
- , is_rw()
- , is_recursive()
- , is_broken()
- , is_linker_init() {
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
+}
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (kCppMode) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(ThreadState *thr) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ is_rw = 0;
+ is_recursive = 0;
+ is_broken = 0;
+ is_linker_init = 0;
+
+ if (thr == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&thr->clock_cache);
+ read_clock.Reset(&thr->clock_cache);
+ }
}
-SyncTab::Part::Part()
- : mtx(MutexTypeSyncTab, StatMtxSyncTab)
- , val() {
+MetaMap::MetaMap() {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
-SyncTab::SyncTab() {
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
}
-SyncTab::~SyncTab() {
- for (int i = 0; i < kPartCount; i++) {
- while (tab_[i].val) {
- SyncVar *tmp = tab_[i].val;
- tab_[i].val = tmp->next;
- DestroyAndFree(tmp);
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(thr, pc, p, sz);
+ return sz;
+}
+
+void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ *meta = 0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
}
}
}
-SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
}
-SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
}
-SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
- StatInc(thr, StatSyncCreated);
- void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- SyncVar *res = new(mem) SyncVar(addr, uid);
-#ifndef TSAN_GO
- res->creation_stack_id = CurrentStackId(thr, pc);
-#endif
- return res;
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
+ return GetAndLock(0, 0, addr, true, false);
}
-SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock, bool create) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
- if (res)
- return res;
- }
-
- // Here we ask only PrimaryAllocator, because
- // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
- // the hashmap anyway.
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- MBlock::ScopedLock l(b);
- SyncVar *res = 0;
- for (res = b->ListHead(); res; res = res->next) {
- if (res->addr == addr)
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
break;
- }
- if (res == 0) {
- if (!create)
- return 0;
- res = Create(thr, pc, addr);
- b->ListPush(res);
- }
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- {
- ReadLock l(&p->mtx);
- for (SyncVar *res = p->val; res; res = res->next) {
- if (res->addr == addr) {
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, myidx);
+ }
if (write_lock)
- res->mtx.Lock();
+ s->mtx.Lock();
else
- res->mtx.ReadLock();
- return res;
+ s->mtx.ReadLock();
+ return s;
}
+ idx = s->next;
}
- }
- if (!create)
- return 0;
- {
- Lock l(&p->mtx);
- SyncVar *res = p->val;
- for (; res; res = res->next) {
- if (res->addr == addr)
- break;
- }
- if (res == 0) {
- res = Create(thr, pc, addr);
- res->next = p->val;
- p->val = res;
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
}
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-}
-SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
- if (res)
- return res;
- }
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- SyncVar *res = 0;
- {
- MBlock::ScopedLock l(b);
- res = b->ListHead();
- if (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- b->ListPop();
- } else {
- SyncVar **prev = &res->next;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
- }
- }
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- }
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
}
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- SyncVar *res = 0;
- {
- Lock l(&p->mtx);
- SyncVar **prev = &p->val;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
}
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- return res;
-}
-
-int SyncTab::PartIdx(uptr addr) {
- return (addr >> 3) % kPartCount;
-}
-
-StackTrace::StackTrace()
- : n_()
- , s_()
- , c_() {
}
-StackTrace::StackTrace(uptr *buf, uptr cnt)
- : n_()
- , s_(buf)
- , c_(cnt) {
- CHECK_NE(buf, 0);
- CHECK_NE(cnt, 0);
-}
-
-StackTrace::~StackTrace() {
- Reset();
-}
-
-void StackTrace::Reset() {
- if (s_ && !c_) {
- CHECK_NE(n_, 0);
- internal_free(s_);
- s_ = 0;
- }
- n_ = 0;
-}
-
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
- Reset();
- if (cnt == 0)
- return;
- if (c_) {
- CHECK_NE(s_, 0);
- CHECK_LE(cnt, c_);
- } else {
- s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
}
- n_ = cnt;
- internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
-}
-
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
- Reset();
- n_ = thr->shadow_stack_pos - thr->shadow_stack;
- if (n_ + !!toppc == 0)
- return;
- uptr start = 0;
- if (c_) {
- CHECK_NE(s_, 0);
- if (n_ + !!toppc > c_) {
- start = n_ - c_ + !!toppc;
- n_ = c_ - !!toppc;
- }
- } else {
- // Cap potentially huge stacks.
- if (n_ + !!toppc > kTraceStackSize) {
- start = n_ - kTraceStackSize + !!toppc;
- n_ = kTraceStackSize - !!toppc;
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
}
- s_ = (uptr*)internal_alloc(MBlockStackTrace,
- (n_ + !!toppc) * sizeof(s_[0]));
- }
- for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[start + i];
- if (toppc) {
- s_[n_] = toppc;
- n_++;
}
}
-void StackTrace::CopyFrom(const StackTrace& other) {
- Reset();
- Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
- return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
- return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
- CHECK_LT(i, n_);
- return s_[i];
-}
-
-const uptr *StackTrace::Begin() const {
- return s_;
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+ block_alloc_.FlushCache(&thr->block_cache);
+ sync_alloc_.FlushCache(&thr->sync_cache);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index 823af543f590b..574810d8a542d 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -15,50 +15,22 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "tsan_clock.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
+#include "tsan_clock.h"
#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
-class SlabCache;
-
-class StackTrace {
- public:
- StackTrace();
- // Initialized the object in "static mode",
- // in this mode it never calls malloc/free but uses the provided buffer.
- StackTrace(uptr *buf, uptr cnt);
- ~StackTrace();
- void Reset();
-
- void Init(const uptr *pcs, uptr cnt);
- void ObtainCurrent(ThreadState *thr, uptr toppc);
- bool IsEmpty() const;
- uptr Size() const;
- uptr Get(uptr i) const;
- const uptr *Begin() const;
- void CopyFrom(const StackTrace& other);
-
- private:
- uptr n_;
- uptr *s_;
- const uptr c_;
-
- StackTrace(const StackTrace&);
- void operator = (const StackTrace&);
-};
-
struct SyncVar {
- explicit SyncVar(uptr addr, u64 uid);
+ SyncVar();
static const int kInvalidTid = -1;
+ uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- uptr addr;
- const u64 uid; // Globally unique id.
- SyncClock clock;
- SyncClock read_clock; // Used for rw mutexes only.
+ u64 uid; // Globally unique id.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
@@ -67,9 +39,16 @@ struct SyncVar {
bool is_recursive;
bool is_broken;
bool is_linker_init;
- SyncVar *next; // In SyncTab hashtable.
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(ThreadState *thr);
- uptr GetMemoryConsumption();
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
@@ -84,42 +63,39 @@ struct SyncVar {
}
};
-class SyncTab {
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
public:
- SyncTab();
- ~SyncTab();
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
+ void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
-
- // If the SyncVar does not exist, returns 0.
- SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+ SyncVar* GetIfExistsAndLock(uptr addr);
- SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+ void MoveMemory(uptr src, uptr dst, uptr sz);
- uptr GetMemoryConsumption(uptr *nsync);
+ void OnThreadIdle(ThreadState *thr);
private:
- struct Part {
- Mutex mtx;
- SyncVar *val;
- char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
- Part();
- };
-
- // FIXME: Implement something more sane.
- static const int kPartCount = 1009;
- Part tab_[kPartCount];
+ static const u32 kFlagMask = 3 << 30;
+ static const u32 kFlagBlock = 1 << 30;
+ static const u32 kFlagSync = 2 << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
- int PartIdx(uptr addr);
-
- SyncVar* GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create);
-
- SyncTab(const SyncTab&); // Not implemented.
- void operator = (const SyncTab&); // Not implemented.
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
};
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 5ed0356e2bf5e..1da8752f6fa7e 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -15,7 +15,7 @@
#include "tsan_defs.h"
#include "tsan_mutex.h"
-#include "tsan_sync.h"
+#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
namespace __tsan {
@@ -42,27 +42,21 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
- StackTrace stack0; // Start stack for the trace.
+#ifndef SANITIZER_GO
+ BufferedStackTrace stack0; // Start stack for the trace.
+#else
+ VarSizeStackTrace stack0;
+#endif
u64 epoch0; // Start epoch for the trace.
MutexSet mset0;
-#ifndef TSAN_GO
- uptr stack0buf[kTraceStackSize];
-#endif
- TraceHeader()
-#ifndef TSAN_GO
- : stack0(stack0buf, kTraceStackSize)
-#else
- : stack0()
-#endif
- , epoch0() {
- }
+ TraceHeader() : stack0(), epoch0() {}
};
struct Trace {
TraceHeader headers[kTraceParts];
Mutex mtx;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
index a11c9bc52509f..c80e0a88d0eb7 100644
--- a/lib/tsan/rtl/tsan_update_shadow_word_inl.h
+++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
@@ -16,8 +16,7 @@
do {
StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
- unsigned off = cur.ComputeSearchOffset();
- u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (old.IsZero()) {
StatInc(thr, StatShadowZero);
@@ -33,16 +32,6 @@ do {
// same thread?
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
- if (OldIsInSameSynchEpoch(old, thr)) {
- if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
- // found a slot that holds effectively the same info
- // (that is, same tid, same sync epoch and same size)
- StatInc(thr, StatMopSame);
- return;
- }
- StoreIfNotYetStored(sp, &store_word);
- break;
- }
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
diff --git a/lib/tsan/rtl/tsan_vector.h b/lib/tsan/rtl/tsan_vector.h
index fa236b1f1e44d..a7fb3fa58d543 100644
--- a/lib/tsan/rtl/tsan_vector.h
+++ b/lib/tsan/rtl/tsan_vector.h
@@ -58,10 +58,18 @@ class Vector {
return begin_[i];
}
- T *PushBack(T v = T()) {
+ T *PushBack() {
EnsureSize(Size() + 1);
- end_[-1] = v;
- return &end_[-1];
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
}
void PopBack() {
@@ -70,11 +78,15 @@ class Vector {
}
void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
uptr old_size = Size();
EnsureSize(size);
if (old_size < size) {
for (uptr i = old_size; i < size; i++)
- begin_[i] = T();
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
}
}
@@ -92,7 +104,7 @@ class Vector {
return;
}
uptr cap0 = last_ - begin_;
- uptr cap = 2 * cap0;
+ uptr cap = cap0 * 5 / 4; // 25% growth
if (cap == 0)
cap = 16;
if (cap < size)