aboutsummaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc162
-rw-r--r--lib/tsan/rtl/tsan_defs.h7
-rw-r--r--lib/tsan/rtl/tsan_flags.cc3
-rw-r--r--lib/tsan/rtl/tsan_flags.inc4
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc524
-rw-r--r--lib/tsan/rtl/tsan_interceptors.h10
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc242
-rw-r--r--lib/tsan/rtl/tsan_interface.h290
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc25
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc2
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc511
-rw-r--r--lib/tsan/rtl/tsan_malloc_mac.cc39
-rw-r--r--lib/tsan/rtl/tsan_mman.cc88
-rw-r--r--lib/tsan/rtl/tsan_mman.h5
-rw-r--r--lib/tsan/rtl/tsan_mutex.cc1
-rw-r--r--lib/tsan/rtl/tsan_mutex.h1
-rw-r--r--lib/tsan/rtl/tsan_new_delete.cc8
-rw-r--r--lib/tsan/rtl/tsan_platform.h11
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc130
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc26
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc2
-rw-r--r--lib/tsan/rtl/tsan_preinit.cc27
-rw-r--r--lib/tsan/rtl/tsan_report.cc35
-rw-r--r--lib/tsan/rtl/tsan_report.h3
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc12
-rw-r--r--lib/tsan/rtl/tsan_rtl.h57
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc59
-rw-r--r--lib/tsan/rtl/tsan_rtl_proc.cc61
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc27
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc22
-rw-r--r--lib/tsan/rtl/tsan_stat.cc1
-rw-r--r--lib/tsan/rtl/tsan_stat.h1
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.cc67
-rw-r--r--lib/tsan/rtl/tsan_sync.h12
35 files changed, 1849 insertions, 630 deletions
diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc
new file mode 100644
index 000000000000..ac24c89be7da
--- /dev/null
+++ b/lib/tsan/rtl/tsan_debugging.cc
@@ -0,0 +1,162 @@
+//===-- tsan_debugging.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// TSan debugging API implementation.
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+static const char *ReportTypeDescription(ReportType typ) {
+ if (typ == ReportTypeRace) return "data-race";
+ if (typ == ReportTypeVptrRace) return "data-race-vptr";
+ if (typ == ReportTypeUseAfterFree) return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr";
+ if (typ == ReportTypeThreadLeak) return "thread-leak";
+ if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy";
+ if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock";
+ if (typ == ReportTypeMutexInvalidAccess) return "mutex-invalid-access";
+ if (typ == ReportTypeMutexBadUnlock) return "mutex-bad-unlock";
+ if (typ == ReportTypeMutexBadReadLock) return "mutex-bad-read-lock";
+ if (typ == ReportTypeMutexBadReadUnlock) return "mutex-bad-read-unlock";
+ if (typ == ReportTypeSignalUnsafe) return "signal-unsafe-call";
+ if (typ == ReportTypeErrnoInSignal) return "errno-in-signal-handler";
+ if (typ == ReportTypeDeadlock) return "lock-order-inversion";
+ return "";
+}
+
+static const char *ReportLocationTypeDescription(ReportLocationType typ) {
+ if (typ == ReportLocationGlobal) return "global";
+ if (typ == ReportLocationHeap) return "heap";
+ if (typ == ReportLocationStack) return "stack";
+ if (typ == ReportLocationTLS) return "tls";
+ if (typ == ReportLocationFD) return "fd";
+ return "";
+}
+
+static void CopyTrace(SymbolizedStack *first_frame, void **trace,
+ uptr trace_size) {
+ uptr i = 0;
+ for (SymbolizedStack *frame = first_frame; frame != nullptr;
+ frame = frame->next) {
+ trace[i++] = (void *)frame->info.address;
+ if (i >= trace_size) break;
+ }
+}
+
+// Meant to be called by the debugger.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report() {
+ return const_cast<ReportDesc*>(cur_thread()->current_report);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *description = ReportTypeDescription(rep->typ);
+ *count = rep->count;
+ *stack_count = rep->stacks.Size();
+ *mop_count = rep->mops.Size();
+ *loc_count = rep->locs.Size();
+ *mutex_count = rep->mutexes.Size();
+ *thread_count = rep->threads.Size();
+ *unique_tid_count = rep->unique_tids.Size();
+ if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->stacks.Size());
+ ReportStack *stack = rep->stacks[idx];
+ if (stack) CopyTrace(stack->frames, trace, trace_size);
+ return stack ? 1 : 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mops.Size());
+ ReportMop *mop = rep->mops[idx];
+ *tid = mop->tid;
+ *addr = (void *)mop->addr;
+ *size = mop->size;
+ *write = mop->write ? 1 : 0;
+ *atomic = mop->atomic ? 1 : 0;
+ if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *type = ReportLocationTypeDescription(loc->type);
+ *addr = (void *)loc->global.start;
+ *start = loc->heap_chunk_start;
+ *size = loc->heap_chunk_size;
+ *tid = loc->tid;
+ *fd = loc->fd;
+ *suppressable = loc->suppressable;
+ if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mutexes.Size());
+ ReportMutex *mutex = rep->mutexes[idx];
+ *mutex_id = mutex->id;
+ *addr = (void *)mutex->addr;
+ *destroyed = mutex->destroyed;
+ if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->threads.Size());
+ ReportThread *thread = rep->threads[idx];
+ *tid = thread->id;
+ *os_id = thread->os_id;
+ *running = thread->running;
+ *name = thread->name;
+ *parent_tid = thread->parent_tid;
+ if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->unique_tids.Size());
+ *tid = rep->unique_tids[idx];
+ return 1;
+}
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 9c7b329dcf00..cdc23d0a7e49 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -29,7 +29,11 @@
#endif
#ifndef TSAN_CONTAINS_UBSAN
-# define TSAN_CONTAINS_UBSAN (CAN_SANITIZE_UB && !defined(SANITIZER_GO))
+# if CAN_SANITIZE_UB && !defined(SANITIZER_GO)
+# define TSAN_CONTAINS_UBSAN 1
+# else
+# define TSAN_CONTAINS_UBSAN 0
+# endif
#endif
namespace __tsan {
@@ -145,6 +149,7 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
+struct Processor;
struct ThreadState;
class ThreadContext;
struct Context;
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 761523171c77..93f598616f3a 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -71,6 +71,7 @@ void InitializeFlags(Flags *f, const char *env) {
cf.print_suppressions = false;
cf.stack_trace_format = " #%n %f %S %M";
cf.exitcode = 66;
+ cf.intercept_tls_get_addr = true;
OverrideCommonFlags(cf);
}
@@ -108,7 +109,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
- SetVerbosity(common_flags()->verbosity);
+ InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
index ab9ca9924936..4fb443612c42 100644
--- a/lib/tsan/rtl/tsan_flags.inc
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -76,3 +76,7 @@ TSAN_FLAG(int, io_sync, 1,
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_interceptors_accesses, false,
+ "Ignore reads and writes from all interceptors.")
+TSAN_FLAG(bool, shared_ptr_interceptor, true,
+ "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 7c835c6dc7df..fb6227651d21 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -19,6 +19,7 @@
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
@@ -40,20 +41,8 @@ using namespace __tsan; // NOLINT
#define stderr __stderrp
#endif
-#if SANITIZER_FREEBSD
-#define __libc_realloc __realloc
-#define __libc_calloc __calloc
-#elif SANITIZER_MAC
-#define __libc_malloc REAL(malloc)
-#define __libc_realloc REAL(realloc)
-#define __libc_calloc REAL(calloc)
-#define __libc_free REAL(free)
-#elif SANITIZER_ANDROID
+#if SANITIZER_ANDROID
#define __errno_location __errno
-#define __libc_malloc REAL(malloc)
-#define __libc_realloc REAL(realloc)
-#define __libc_calloc REAL(calloc)
-#define __libc_free REAL(free)
#define mallopt(a, b)
#endif
@@ -86,11 +75,9 @@ struct ucontext_t {
};
#endif
-#if defined(__x86_64__) || defined(__mips__) \
- || (defined(__powerpc64__) && defined(__BIG_ENDIAN__))
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
-#elif defined(__aarch64__) || (defined(__powerpc64__) \
- && defined(__LITTLE_ENDIAN__))
+#elif defined(__aarch64__) || SANITIZER_PPC64V2
#define PTHREAD_ABI_BASE "GLIBC_2.17"
#endif
@@ -103,8 +90,6 @@ extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
-// REAL(sigfillset) defined in common interceptors.
-DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
@@ -112,21 +97,22 @@ extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
extern "C" int fileno_unlocked(void *stream);
-#if !SANITIZER_ANDROID
-extern "C" void *__libc_calloc(uptr size, uptr n);
-extern "C" void *__libc_realloc(void *ptr, uptr size);
-#endif
extern "C" int dirfd(void *dirp);
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
extern "C" int mallopt(int param, int value);
#endif
extern __sanitizer_FILE *stdout, *stderr;
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+#else
+const int PTHREAD_MUTEX_RECURSIVE = 2;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
+#endif
const int EINVAL = 22;
const int EBUSY = 16;
const int EOWNERDEAD = 130;
-#if !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -135,7 +121,7 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_MAC
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
@@ -165,7 +151,7 @@ struct sigaction_t {
u32 sa_flags;
union {
sighandler_t sa_handler;
- sigactionhandler_t sa_sgiaction;
+ sigactionhandler_t sa_sigaction;
};
__sanitizer_sigset_t sa_mask;
void (*sa_restorer)();
@@ -271,19 +257,24 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
: thr_(thr)
, pc_(pc)
, in_ignored_lib_(false) {
- if (!thr_->ignore_interceptors) {
- Initialize(thr);
+ Initialize(thr);
+ if (!thr_->is_inited)
+ return;
+ if (!thr_->ignore_interceptors)
FuncEntry(thr, pc);
- }
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
ThreadIgnoreBegin(thr_, pc_);
}
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
}
ScopedInterceptor::~ScopedInterceptor() {
+ if (!thr_->is_inited)
+ return;
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
@@ -296,6 +287,7 @@ ScopedInterceptor::~ScopedInterceptor() {
}
void ScopedInterceptor::UserCallbackStart() {
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
@@ -307,6 +299,7 @@ void ScopedInterceptor::UserCallbackEnd() {
thr_->in_ignored_lib = true;
ThreadIgnoreBegin(thr_, pc_);
}
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
@@ -387,7 +380,7 @@ static void at_exit_wrapper(void *arg) {
Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
((void(*)(void *arg))ctx->f)(ctx->arg);
- __libc_free(ctx);
+ InternalFree(ctx);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
@@ -413,7 +406,7 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
void *arg, void *dso) {
- AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
ctx->f = f;
ctx->arg = arg;
Release(thr, pc, (uptr)ctx);
@@ -432,14 +425,14 @@ static void on_exit_wrapper(int status, void *arg) {
Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
- __libc_free(ctx);
+ InternalFree(ctx);
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
ctx->f = (void(*)())f;
ctx->arg = arg;
Release(thr, pc, (uptr)ctx);
@@ -571,8 +564,11 @@ DEFINE_REAL(int, __sigsetjmp, void *env)
#endif // SANITIZER_MAC
TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
+ // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
+ // bad things will happen. We will jump over ScopedInterceptor dtor and can
+ // leave thr->in_ignored_lib set.
{
- SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
}
LongJmp(cur_thread(), env);
REAL(longjmp)(env, val);
@@ -580,7 +576,7 @@ TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
{
- SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
}
LongJmp(cur_thread(), env);
REAL(siglongjmp)(env, val);
@@ -589,7 +585,7 @@ TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (cur_thread()->in_symbolizer)
- return __libc_malloc(size);
+ return InternalAlloc(size);
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(malloc, size);
@@ -606,7 +602,7 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (cur_thread()->in_symbolizer)
- return __libc_calloc(size, n);
+ return InternalCalloc(size, n);
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
@@ -618,7 +614,7 @@ TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
if (cur_thread()->in_symbolizer)
- return __libc_realloc(p, size);
+ return InternalRealloc(p, size);
if (p)
invoke_free_hook(p);
{
@@ -633,7 +629,7 @@ TSAN_INTERCEPTOR(void, free, void *p) {
if (p == 0)
return;
if (cur_thread()->in_symbolizer)
- return __libc_free(p);
+ return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(free, p);
user_free(thr, pc, p);
@@ -643,7 +639,7 @@ TSAN_INTERCEPTOR(void, cfree, void *p) {
if (p == 0)
return;
if (cur_thread()->in_symbolizer)
- return __libc_free(p);
+ return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(cfree, p);
user_free(thr, pc, p);
@@ -655,69 +651,6 @@ TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
}
#endif
-TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
- SCOPED_TSAN_INTERCEPTOR(strlen, s);
- uptr len = internal_strlen(s);
- MemoryAccessRange(thr, pc, (uptr)s, len + 1, false);
- return len;
-}
-
-TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- // On FreeBSD we get here from libthr internals on thread initialization.
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
- }
- return internal_memset(dst, v, size);
-}
-
-TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- // On FreeBSD we get here from libthr internals on thread initialization.
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
- MemoryAccessRange(thr, pc, (uptr)src, size, false);
- }
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same implementation.
- // We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
-}
-
-TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
- MemoryAccessRange(thr, pc, (uptr)dst, n, true);
- MemoryAccessRange(thr, pc, (uptr)src, n, false);
- }
- return REAL(memmove)(dst, src, n);
-}
-
-TSAN_INTERCEPTOR(char*, strchr, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strchr, s, c);
- char *res = REAL(strchr)(s, c);
- uptr len = internal_strlen(s);
- uptr n = res ? (char*)res - (char*)s + 1 : len + 1;
- READ_STRING_OF_LEN(thr, pc, s, len, n);
- return res;
-}
-
-#if !SANITIZER_MAC
-TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c);
- char *res = REAL(strchrnul)(s, c);
- uptr len = (char*)res - (char*)s + 1;
- READ_STRING(thr, pc, s, len);
- return res;
-}
-#endif
-
-TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strrchr, s, c);
- MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false);
- return REAL(strrchr)(s, c);
-}
-
TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
uptr srclen = internal_strlen(src);
@@ -763,7 +696,11 @@ TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
if (res != MAP_FAILED) {
if (fd > 0)
FdAccess(thr, pc, fd);
- MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)res, sz);
}
return res;
}
@@ -778,7 +715,11 @@ TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
if (res != MAP_FAILED) {
if (fd > 0)
FdAccess(thr, pc, fd);
- MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)res, sz);
}
return res;
}
@@ -792,7 +733,8 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
if (sz != 0) {
// If sz == 0, munmap will return EINVAL and don't unmap any memory.
DontNeedShadowFor((uptr)addr, sz);
- ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
+ ScopedGlobalProcessor sgp;
+ ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
}
int res = REAL(munmap)(addr, sz);
return res;
@@ -887,12 +829,16 @@ STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
namespace __tsan {
void DestroyThreadState() {
ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc();
ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
ThreadSignalContext *sctx = thr->signal_ctx;
if (sctx) {
thr->signal_ctx = 0;
UnmapOrDie(sctx, sizeof(*sctx));
}
+ DTLS_Destroy();
cur_thread_finalize();
}
} // namespace __tsan
@@ -938,6 +884,8 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
#endif
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
ThreadStart(thr, tid, GetTid());
atomic_store(&p->tid, 0, memory_order_release);
}
@@ -1095,12 +1043,12 @@ INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
return REAL(pthread_cond_init)(cond, a);
}
-INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
- void *cond = init_cond(c);
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
+ int (*fn)(void *c, void *m, void *abstime), void *c,
+ void *m, void *t) {
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {&si, thr, pc, m};
+ CondMutexUnlockCtx arg = {si, thr, pc, m};
int res = 0;
// This ensures that we handle mutex lock even in case of pthread_cancel.
// See test/tsan/cond_cancel.cc.
@@ -1108,35 +1056,37 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
// Enable signal delivery while the thread is blocked.
BlockingCall bc(thr);
res = call_pthread_cancel_with_cleanup(
- (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
- cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
}
- if (res == errno_EOWNERDEAD)
- MutexRepair(thr, pc, (uptr)m);
+ if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
MutexLock(thr, pc, (uptr)m);
return res;
}
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+ pthread_cond_wait),
+ cond, m, 0);
+}
+
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
- MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
- MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {&si, thr, pc, m};
- int res = 0;
- // This ensures that we handle mutex lock even in case of pthread_cancel.
- // See test/tsan/cond_cancel.cc.
- {
- BlockingCall bc(thr);
- res = call_pthread_cancel_with_cleanup(
- REAL(pthread_cond_timedwait), cond, m, abstime,
- (void(*)(void *arg))cond_mutex_unlock, &arg);
- }
- if (res == errno_EOWNERDEAD)
- MutexRepair(thr, pc, (uptr)m);
- MutexLock(thr, pc, (uptr)m);
- return res;
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
+ abstime);
+}
+
+#if SANITIZER_MAC
+INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
+ void *reltime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
+ m, reltime);
}
+#endif
INTERCEPTOR(int, pthread_cond_signal, void *c) {
void *cond = init_cond(c);
@@ -1395,96 +1345,6 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
-#else
-#define TSAN_MAYBE_INTERCEPT___XSTAT
-#endif
-
-TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
- SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(stat)(path, buf);
-#else
- SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat)(0, path, buf);
-#endif
-}
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat64)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT___XSTAT64
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat64)(0, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
-#else
-#define TSAN_MAYBE_INTERCEPT_STAT64
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
-#else
-#define TSAN_MAYBE_INTERCEPT___LXSTAT
-#endif
-
-TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
- SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(lstat)(path, buf);
-#else
- SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat)(0, path, buf);
-#endif
-}
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat64)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT___LXSTAT64
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat64)(0, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT_LSTAT64
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
@@ -1701,32 +1561,6 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, epoll_create, int size) {
- SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
- int fd = REAL(epoll_create)(size);
- if (fd >= 0)
- FdPollCreate(thr, pc, fd);
- return fd;
-}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
-#endif
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
- SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
- int fd = REAL(epoll_create1)(flags);
- if (fd >= 0)
- FdPollCreate(thr, pc, fd);
- return fd;
-}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
-#endif
-
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
if (fd >= 0)
@@ -1781,37 +1615,6 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
}
#endif
-TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
- SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
- if (fd >= 0) {
- FdAccess(thr, pc, fd);
- FdRelease(thr, pc, fd);
- }
- int res = REAL(send)(fd, buf, len, flags);
- return res;
-}
-
-TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
- SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
- if (fd >= 0) {
- FdAccess(thr, pc, fd);
- FdRelease(thr, pc, fd);
- }
- int res = REAL(sendmsg)(fd, msg, flags);
- return res;
-}
-
-TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
- SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
- if (fd >= 0)
- FdAccess(thr, pc, fd);
- int res = REAL(recv)(fd, buf, len, flags);
- if (res >= 0 && fd >= 0) {
- FdAcquire(thr, pc, fd);
- }
- return res;
-}
-
TSAN_INTERCEPTOR(int, unlink, char *path) {
SCOPED_TSAN_INTERCEPTOR(unlink, path);
Release(thr, pc, File2addr(path));
@@ -1900,6 +1703,22 @@ TSAN_INTERCEPTOR(int, closedir, void *dirp) {
}
#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1911,12 +1730,7 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
-#endif
-#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1926,9 +1740,26 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+
+TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
+ void *sigmask) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+#define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait)
#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
namespace __tsan {
@@ -1937,6 +1768,19 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
bool sigact, int sig, my_siginfo_t *info, void *uctx) {
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Signals are generally asynchronous, so if we receive a signals when
+ // ignores are enabled we should disable ignores. This is critical for sync
+ // and interceptors, because otherwise we can miss syncronization and report
+ // false races.
+ int ignore_reads_and_writes = thr->ignore_reads_and_writes;
+ int ignore_interceptors = thr->ignore_interceptors;
+ int ignore_sync = thr->ignore_sync;
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = 0;
+ thr->fast_state.ClearIgnoreBit();
+ thr->ignore_interceptors = 0;
+ thr->ignore_sync = 0;
+ }
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
errno = 99;
@@ -1952,6 +1796,13 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
else
((sighandler_t)pc)(sig);
}
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = ignore_reads_and_writes;
+ if (ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ thr->ignore_interceptors = ignore_interceptors;
+ thr->ignore_sync = ignore_sync;
+ }
// We do not detect errno spoiling for SIGTERM,
// because some SIGTERM handlers do spoil errno but reraise SIGTERM,
// tsan reports false positive in such case.
@@ -1981,7 +1832,7 @@ void ProcessPendingSignals(ThreadState *thr) {
return;
atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
- CHECK_EQ(0, REAL(sigfillset)(&sctx->emptyset));
+ internal_sigfillset(&sctx->emptyset);
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset));
for (int sig = 0; sig < kSigCount; sig++) {
SignalDesc *signal = &sctx->pending_signals[sig];
@@ -2021,13 +1872,8 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
(sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
- // We ignore interceptors in blocking functions,
- // temporary enbled them again while we are calling user function.
- int const i = thr->ignore_interceptors;
- thr->ignore_interceptors = 0;
atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
- thr->ignore_interceptors = i;
atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
} else {
// Be very conservative with when we do acquire in this case.
@@ -2065,7 +1911,10 @@ static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
}
TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
- SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old);
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through rtl_sigaction, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
if (old)
internal_memcpy(old, &sigactions[sig], sizeof(*old));
if (act == 0)
@@ -2085,7 +1934,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
#endif
sigaction_t newact;
internal_memcpy(&newact, act, sizeof(newact));
- REAL(sigfillset)(&newact.sa_mask);
+ internal_sigfillset(&newact.sa_mask);
if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
if (newact.sa_flags & SA_SIGINFO)
newact.sa_sigaction = rtl_sigaction;
@@ -2100,7 +1949,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
sigaction_t act;
act.sa_handler = h;
- REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask));
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
act.sa_flags = 0;
sigaction_t old;
int res = sigaction(sig, &act, &old);
@@ -2181,7 +2030,13 @@ TSAN_INTERCEPTOR(int, fork, int fake) {
return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
ForkBefore(thr, pc);
- int pid = REAL(fork)(fake);
+ int pid;
+ {
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ ScopedIgnoreInterceptors ignore;
+ pid = REAL(fork)(fake);
+ }
if (pid == 0) {
// child
ForkChildAfter(thr, pc);
@@ -2296,18 +2151,15 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#undef SANITIZER_INTERCEPT_FGETPWENT
#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
-// __tls_get_addr can be called with mis-aligned stack due to:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
-// There are two potential issues:
-// 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case
-// right now). or 2. ProcessPendingSignal calls user handler which contains
-// MOVDQA spill (this happens right now).
-// Since the interceptor only initializes memory for msan, the simplest solution
-// is to disable the interceptor in tsan (other sanitizers do not call
-// signal handlers from COMMON_INTERCEPTOR_ENTER).
+// We define our own.
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define NEED_TLS_GET_ADDR
+#endif
#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -2394,6 +2246,10 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
+ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
#if !SANITIZER_MAC
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
@@ -2408,6 +2264,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
*begin = *end = 0; \
}
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define TSAN_SYSCALL() \
@@ -2430,7 +2292,7 @@ struct ScopedSyscall {
}
};
-#if !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2524,6 +2386,31 @@ static void syscall_post_fork(uptr pc, int pid) {
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#ifdef NEED_TLS_GET_ADDR
+// Define own interceptor instead of sanitizer_common's for three reasons:
+// 1. It must not process pending signals.
+// Signal handlers may contain MOVDQA instruction (see below).
+// 2. It must be as simple as possible to not contain MOVDQA.
+// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
+// is empty for tsan (meant only for msan).
+// Note: __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// So the interceptor must work with mis-aligned stack, in particular, does not
+// execute MOVDQA with stack addresses.
+TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *res = REAL(__tls_get_addr)(arg);
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return res;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
+ if (!dtv)
+ return res;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+ return res;
+}
+#endif
+
namespace __tsan {
static void finalize(void *arg) {
@@ -2584,13 +2471,6 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
- TSAN_INTERCEPT(strlen);
- TSAN_INTERCEPT(memset);
- TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memmove);
- TSAN_INTERCEPT(strchr);
- TSAN_INTERCEPT(strchrnul);
- TSAN_INTERCEPT(strrchr);
TSAN_INTERCEPT(strcpy); // NOLINT
TSAN_INTERCEPT(strncpy);
TSAN_INTERCEPT(strdup);
@@ -2633,14 +2513,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_once);
- TSAN_INTERCEPT(stat);
- TSAN_MAYBE_INTERCEPT___XSTAT;
- TSAN_MAYBE_INTERCEPT_STAT64;
- TSAN_MAYBE_INTERCEPT___XSTAT64;
- TSAN_INTERCEPT(lstat);
- TSAN_MAYBE_INTERCEPT___LXSTAT;
- TSAN_MAYBE_INTERCEPT_LSTAT64;
- TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
TSAN_MAYBE_INTERCEPT___FXSTAT;
TSAN_MAYBE_INTERCEPT_FSTAT64;
@@ -2661,18 +2533,13 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
- TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
+ TSAN_MAYBE_INTERCEPT_EPOLL;
TSAN_INTERCEPT(close);
TSAN_MAYBE_INTERCEPT___CLOSE;
TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
- TSAN_INTERCEPT(send);
- TSAN_INTERCEPT(sendmsg);
- TSAN_INTERCEPT(recv);
-
TSAN_INTERCEPT(unlink);
TSAN_INTERCEPT(tmpfile);
TSAN_MAYBE_INTERCEPT_TMPFILE64;
@@ -2683,9 +2550,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(closedir);
- TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
- TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
-
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
TSAN_INTERCEPT(sigsuspend);
@@ -2707,6 +2571,10 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
+#ifdef NEED_TLS_GET_ADDR
+ TSAN_INTERCEPT(__tls_get_addr);
+#endif
+
#if !SANITIZER_MAC && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
diff --git a/lib/tsan/rtl/tsan_interceptors.h b/lib/tsan/rtl/tsan_interceptors.h
index d831620cfafe..a0f9a0753a63 100644
--- a/lib/tsan/rtl/tsan_interceptors.h
+++ b/lib/tsan/rtl/tsan_interceptors.h
@@ -34,7 +34,7 @@ class ScopedInterceptor {
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
} \
- if (thr->ignore_interceptors || thr->in_ignored_lib) \
+ if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
@@ -46,12 +46,4 @@ class ScopedInterceptor {
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
-#if SANITIZER_FREEBSD
-#define __libc_free __free
-#define __libc_malloc __malloc
-#endif
-
-extern "C" void __libc_free(void *ptr);
-extern "C" void *__libc_malloc(uptr size);
-
#endif // TSAN_INTERCEPTORS_H
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index 2bf7ad9861c4..593963825c58 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -17,11 +17,161 @@
#include "interception/interception.h"
#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
#include <libkern/OSAtomic.h>
+#include <xpc/xpc.h>
+
+typedef long long_t; // NOLINT
namespace __tsan {
+// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
+// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
+// actually aliases of each other, and we cannot have different interceptors for
+// them, because they're actually the same function. Thus, we have to stay
+// conservative and treat the non-barrier versions as mo_acq_rel.
+static const morder kMacOrderBarrier = mo_acq_rel;
+static const morder kMacOrderNonBarrier = mo_acq_rel;
+
+#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
+ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderBarrier)
+
+#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
+ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_X)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_1)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
+ OSATOMIC_INTERCEPTOR_MINUS_1)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
+ OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+
+#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
+ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderNonBarrier, kMacOrderNonBarrier); \
+ } \
+ \
+ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
+ t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderBarrier, kMacOrderNonBarrier); \
+ }
+
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
+ long_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
+ void *)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
+ int32_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
+ int64_t)
+
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, m, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ char *byte_ptr = ((char *)ptr) + (n >> 3); \
+ char bit_index = n & 7; \
+ char mask = m; \
+ char orig_byte = op((a8 *)byte_ptr, mask, mo); \
+ return orig_byte & mask; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, m) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, m, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, m, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or,
+ 0x80u >> bit_index)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
+ ~(0x80u >> bit_index))
+
+TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
+ void *item = REAL(OSAtomicDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
+#if !SANITIZER_IOS
+
+TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicFifoEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
+ void *item = REAL(OSAtomicFifoDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+#endif
+
TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
@@ -86,6 +236,98 @@ TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
REAL(os_lock_unlock)(lock);
}
+TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
+ xpc_connection_t connection, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
+ handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_set_event_handler)(connection, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
+ Release(thr, pc, (uptr)connection);
+ dispatch_block_t new_barrier = ^() {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ barrier();
+ };
+ REAL(xpc_connection_send_barrier)(connection, new_barrier);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
+ xpc_connection_t connection, xpc_object_t message,
+ dispatch_queue_t replyq, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
+ message, replyq, handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_send_message_with_reply)
+ (connection, message, replyq, new_handler);
+}
+
+// On macOS, libc++ is always linked dynamically, so intercepting works the
+// usual way.
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+
+namespace {
+struct fake_shared_weak_count {
+ volatile a64 shared_owners;
+ volatile a64 shared_weak_owners;
+ virtual void _unused_0x0() = 0;
+ virtual void _unused_0x8() = 0;
+ virtual void on_zero_shared() = 0;
+ virtual void _unused_0x18() = 0;
+ virtual void on_zero_shared_weak() = 0;
+};
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// Shared and weak pointers in C++ maintain reference counts via atomics in
+// libc++.dylib, which are TSan-invisible, and this leads to false positives in
+// destructor code. This interceptor re-implements the whole function so that
+// the mo_acq_rel semantics of the atomic decrement are visible.
+//
+// Unfortunately, this interceptor cannot simply Acquire/Release some sync
+// object and call the original function, because it would have a race between
+// the sync and the destruction of the object. Calling both under a lock will
+// not work because the destructor can invoke this interceptor again (and even
+// in a different thread, so recursive locks don't help).
+STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
+ o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
+ 0) {
+ Acquire(thr, pc, (uptr)&o->shared_weak_owners);
+ o->on_zero_shared_weak();
+ }
+ }
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index 41e084b82ab1..fbb099d07764 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -25,6 +25,8 @@
extern "C" {
#endif
+#ifndef SANITIZER_GO
+
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
@@ -75,8 +77,296 @@ void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+// User may provide function that would be called right when TSan detects
+// an error. The argument 'report' is an opaque pointer that can be used to
+// gather additional information using other TSan report API functions.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_report(void *report);
+
+// If TSan is currently reporting a detected issue on the current thread,
+// returns an opaque pointer to the current report. Otherwise returns NULL.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report();
+
+// Returns a report's description (issue type), number of duplicate issues
+// found, counts of array data (stack traces, memory operations, locations,
+// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
+// one was involved in the issue).
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size);
+
+// Returns information about stack traces included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size);
+
+// Returns information about memory operations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size);
+
+// Returns information about locations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size);
+
+// Returns information about mutexes included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size);
+
+// Returns information about threads included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size);
+
+// Returns information about unique thread IDs included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+
+#endif // SANITIZER_GO
+
#ifdef __cplusplus
} // extern "C"
#endif
+namespace __tsan {
+
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
+
+struct ThreadState;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+} // extern "C"
+
+} // namespace __tsan
+
#endif // TSAN_INTERFACE_H
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index 27031991438c..dc0873f7948b 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -23,39 +23,16 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
+#include "tsan_interface.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
-// These should match declarations from public tsan_interface_atomic.h header.
-typedef unsigned char a8;
-typedef unsigned short a16; // NOLINT
-typedef unsigned int a32;
-typedef unsigned long long a64; // NOLINT
-#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
-__extension__ typedef __int128 a128;
-# define __TSAN_HAS_INT128 1
-#else
-# define __TSAN_HAS_INT128 0
-#endif
-
#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
-// Part of ABI, do not change.
-// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
-typedef enum {
- mo_relaxed,
- mo_consume,
- mo_acquire,
- mo_release,
- mo_acq_rel,
- mo_seq_cst
-} morder;
-
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
|| mo == mo_acquire || mo == mo_seq_cst;
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 0aea63d11671..95be85994c64 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -111,7 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- ctx->metamap.FreeRange(thr, pc, ptr, size);
+ ctx->metamap.FreeRange(thr->proc(), ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index 617dc91b33d0..529cedba4d2b 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -33,8 +33,10 @@ typedef struct {
dispatch_queue_t queue;
void *orig_context;
dispatch_function_t orig_work;
- uptr object_to_acquire;
- dispatch_object_t object_to_release;
+ bool free_context_in_callback;
+ bool submitted_synchronously;
+ bool is_barrier_block;
+ uptr non_queue_sync_object;
} tsan_block_context_t;
// The offsets of different fields of the dispatch_queue_t structure, exported
@@ -66,6 +68,15 @@ static bool IsQueueSerial(dispatch_queue_t q) {
return width == 1;
}
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
+ dispatch_queue_t target_queue =
+ *(dispatch_queue_t *)(((uptr)source) +
+ dispatch_queue_offsets.dqo_target_queue);
+ CHECK_NE(target_queue, 0);
+ return target_queue;
+}
+
static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
dispatch_queue_t queue,
void *orig_context,
@@ -75,30 +86,40 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
new_context->queue = queue;
new_context->orig_context = orig_context;
new_context->orig_work = orig_work;
- new_context->object_to_acquire = (uptr)new_context;
- new_context->object_to_release = nullptr;
+ new_context->free_context_in_callback = true;
+ new_context->submitted_synchronously = false;
+ new_context->is_barrier_block = false;
return new_context;
}
-static void dispatch_callback_wrap_acquire(void *param) {
- SCOPED_INTERCEPTOR_RAW(dispatch_async_f_callback_wrap);
+static void dispatch_callback_wrap(void *param) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
tsan_block_context_t *context = (tsan_block_context_t *)param;
- Acquire(thr, pc, context->object_to_acquire);
+ bool is_queue_serial = context->queue && IsQueueSerial(context->queue);
+ uptr sync_ptr = (uptr)context->queue ?: context->non_queue_sync_object;
- // Extra retain/release is required for dispatch groups. We use the group
- // itself to synchronize, but in a notification (dispatch_group_notify
- // callback), it may be disposed already. To solve this, we retain the group
- // and release it here.
- if (context->object_to_release) dispatch_release(context->object_to_release);
+ uptr serial_sync = (uptr)sync_ptr;
+ uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr);
+ uptr submit_sync = (uptr)context;
+ bool serial_task = context->is_barrier_block || is_queue_serial;
+
+ Acquire(thr, pc, submit_sync);
+ Acquire(thr, pc, serial_sync);
+ if (serial_task) Acquire(thr, pc, concurrent_sync);
- // In serial queues, work items can be executed on different threads, we need
- // to explicitly synchronize on the queue itself.
- if (IsQueueSerial(context->queue)) Acquire(thr, pc, (uptr)context->queue);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
context->orig_work(context->orig_context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
- if (IsQueueSerial(context->queue)) Release(thr, pc, (uptr)context->queue);
- user_free(thr, pc, context);
+
+ Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ if (context->free_context_in_callback) user_free(thr, pc, context);
+}
+
+static void invoke_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
}
static void invoke_and_release_block(void *param) {
@@ -107,44 +128,97 @@ static void invoke_and_release_block(void *param) {
Block_release(block);
}
-#define DISPATCH_INTERCEPT_B(name) \
+#define DISPATCH_INTERCEPT_B(name, barrier) \
TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
SCOPED_TSAN_INTERCEPTOR(name, q, block); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
dispatch_block_t heap_block = Block_copy(block); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
tsan_block_context_t *new_context = \
AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); \
+ new_context->is_barrier_block = barrier; \
Release(thr, pc, (uptr)new_context); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
- REAL(name##_f)(q, new_context, dispatch_callback_wrap_acquire); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ dispatch_block_t heap_block = Block_copy(block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ tsan_block_context_t new_context = { \
+ q, heap_block, &invoke_and_release_block, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
}
-#define DISPATCH_INTERCEPT_F(name) \
+#define DISPATCH_INTERCEPT_F(name, barrier) \
TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
dispatch_function_t work) { \
SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
tsan_block_context_t *new_context = \
AllocContext(thr, pc, q, context, work); \
+ new_context->is_barrier_block = barrier; \
Release(thr, pc, (uptr)new_context); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
- REAL(name)(q, new_context, dispatch_callback_wrap_acquire); \
- SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ tsan_block_context_t new_context = { \
+ q, context, work, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
}
// We wrap dispatch_async, dispatch_sync and friends where we allocate a new
// context, which is used to synchronize (we release the context before
// submitting, and the callback acquires it before executing the original
// callback).
-DISPATCH_INTERCEPT_B(dispatch_async)
-DISPATCH_INTERCEPT_B(dispatch_barrier_async)
-DISPATCH_INTERCEPT_F(dispatch_async_f)
-DISPATCH_INTERCEPT_F(dispatch_barrier_async_f)
-DISPATCH_INTERCEPT_B(dispatch_sync)
-DISPATCH_INTERCEPT_B(dispatch_barrier_sync)
-DISPATCH_INTERCEPT_F(dispatch_sync_f)
-DISPATCH_INTERCEPT_F(dispatch_barrier_sync_f)
+DISPATCH_INTERCEPT_B(dispatch_async, false)
+DISPATCH_INTERCEPT_B(dispatch_barrier_async, true)
+DISPATCH_INTERCEPT_F(dispatch_async_f, false)
+DISPATCH_INTERCEPT_F(dispatch_barrier_async_f, true)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_sync, false)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_barrier_sync, true)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_sync_f, false)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_barrier_sync_f, true)
+
+TSAN_INTERCEPTOR(void, dispatch_after, dispatch_time_t when,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after, when, queue, block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ tsan_block_context_t *new_context =
+ AllocContext(thr, pc, queue, heap_block, &invoke_and_release_block);
+ Release(thr, pc, (uptr)new_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_after_f)(when, queue, new_context, dispatch_callback_wrap);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after_f, when, queue, context, work);
+ WRAP(dispatch_after)(when, queue, ^(void) {
+ work(context);
+ });
+}
// GCD's dispatch_once implementation has a fast path that contains a racy read
// and it's inlined into user's code. Furthermore, this fast path doesn't
@@ -161,7 +235,7 @@ DISPATCH_INTERCEPT_F(dispatch_barrier_sync_f)
#undef dispatch_once
TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
dispatch_block_t block) {
- SCOPED_TSAN_INTERCEPTOR(dispatch_once, predicate, block);
+ SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 &&
@@ -183,7 +257,7 @@ TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
#undef dispatch_once_f
TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate,
void *context, dispatch_function_t function) {
- SCOPED_TSAN_INTERCEPTOR(dispatch_once_f, predicate, context, function);
+ SCOPED_INTERCEPTOR_RAW(dispatch_once_f, predicate, context, function);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
WRAP(dispatch_once)(predicate, ^(void) {
function(context);
@@ -216,6 +290,7 @@ TSAN_INTERCEPTOR(long_t, dispatch_group_wait, dispatch_group_t group,
TSAN_INTERCEPTOR(void, dispatch_group_leave, dispatch_group_t group) {
SCOPED_TSAN_INTERCEPTOR(dispatch_group_leave, group);
+ // Acquired in the group noticifaction callback in dispatch_group_notify[_f].
Release(thr, pc, (uptr)group);
REAL(dispatch_group_leave)(group);
}
@@ -225,8 +300,10 @@ TSAN_INTERCEPTOR(void, dispatch_group_async, dispatch_group_t group,
SCOPED_TSAN_INTERCEPTOR(dispatch_group_async, group, queue, block);
dispatch_retain(group);
dispatch_group_enter(group);
+ __block dispatch_block_t block_copy = (dispatch_block_t)_Block_copy(block);
WRAP(dispatch_async)(queue, ^(void) {
- block();
+ block_copy();
+ _Block_release(block_copy);
WRAP(dispatch_group_leave)(group);
dispatch_release(group);
});
@@ -248,35 +325,355 @@ TSAN_INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
TSAN_INTERCEPTOR(void, dispatch_group_notify, dispatch_group_t group,
dispatch_queue_t q, dispatch_block_t block) {
SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify, group, q, block);
+
+ // To make sure the group is still available in the callback (otherwise
+ // it can be already destroyed). Will be released in the callback.
+ dispatch_retain(group);
+
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
- dispatch_block_t heap_block = Block_copy(block);
+ dispatch_block_t heap_block = Block_copy(^(void) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_read_callback);
+ // Released when leaving the group (dispatch_group_leave).
+ Acquire(thr, pc, (uptr)group);
+ }
+ dispatch_release(group);
+ block();
+ });
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
tsan_block_context_t *new_context =
AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
- new_context->object_to_acquire = (uptr)group;
-
- // Will be released in dispatch_callback_wrap_acquire.
- new_context->object_to_release = group;
- dispatch_retain(group);
-
- Release(thr, pc, (uptr)group);
- REAL(dispatch_group_notify_f)(group, q, new_context,
- dispatch_callback_wrap_acquire);
+ new_context->is_barrier_block = true;
+ Release(thr, pc, (uptr)new_context);
+ REAL(dispatch_group_notify_f)(group, q, new_context, dispatch_callback_wrap);
}
TSAN_INTERCEPTOR(void, dispatch_group_notify_f, dispatch_group_t group,
dispatch_queue_t q, void *context, dispatch_function_t work) {
- SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify_f, group, q, context, work);
- tsan_block_context_t *new_context = AllocContext(thr, pc, q, context, work);
- new_context->object_to_acquire = (uptr)group;
+ WRAP(dispatch_group_notify)(group, q, ^(void) { work(context); });
+}
- // Will be released in dispatch_callback_wrap_acquire.
- new_context->object_to_release = group;
- dispatch_retain(group);
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0 };
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_event_handler)(source, new_handler);
+ Block_release(new_handler);
+}
- Release(thr, pc, (uptr)group);
- REAL(dispatch_group_notify_f)(group, q, new_context,
- dispatch_callback_wrap_acquire);
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler_f, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_event_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_cancel_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_cancel_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_registration_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_registration_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
+ dispatch_queue_t queue, void (^block)(size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
+
+ void *parent_to_child_sync = nullptr;
+ uptr parent_to_child_sync_uptr = (uptr)&parent_to_child_sync;
+ void *child_to_parent_sync = nullptr;
+ uptr child_to_parent_sync_uptr = (uptr)&child_to_parent_sync;
+
+ Release(thr, pc, parent_to_child_sync_uptr);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply);
+ Acquire(thr, pc, parent_to_child_sync_uptr);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block(iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync_uptr);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply)(iterations, queue, new_block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync_uptr);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
+ dispatch_queue_t queue, void *context,
+ void (*work)(void *, size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply_f, iterations, queue, context, work);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ work(context, iteration);
+ };
+ WRAP(dispatch_apply)(iterations, queue, new_block);
+}
+
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+
+TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
+ size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_data_create, buffer, size, q, destructor);
+ if ((q == nullptr) || (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT))
+ return REAL(dispatch_data_create)(buffer, size, q, destructor);
+
+ if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE)
+ destructor = ^(void) { WRAP(free)((void *)buffer); };
+ else if (destructor == DISPATCH_DATA_DESTRUCTOR_MUNMAP)
+ destructor = ^(void) { WRAP(munmap)((void *)buffer, size); };
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(destructor);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ tsan_block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ uptr submit_sync = (uptr)new_context;
+ Release(thr, pc, submit_sync);
+ return REAL(dispatch_data_create)(buffer, size, q, ^(void) {
+ dispatch_callback_wrap(new_context);
+ });
+}
+
+typedef void (^fd_handler_t)(dispatch_data_t data, int error);
+typedef void (^cleanup_handler_t)(int error);
+
+TSAN_INTERCEPTOR(void, dispatch_read, dispatch_fd_t fd, size_t length,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_read, fd, length, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_read)(fd, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_write, dispatch_fd_t fd, dispatch_data_t data,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_write, fd, data, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_write)(fd, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_read, dispatch_io_t channel, off_t offset,
+ size_t length, dispatch_queue_t q, dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_read, channel, offset, length, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_read)(channel, offset, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_write, dispatch_io_t channel, off_t offset,
+ dispatch_data_t data, dispatch_queue_t q,
+ dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_write, channel, offset, data, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_write)(channel, offset, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_barrier, dispatch_io_t channel,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_barrier, channel, barrier);
+ __block tsan_block_context_t new_context = {
+ nullptr, nullptr, &invoke_block, false, false, false, 0};
+ new_context.non_queue_sync_object = (uptr)channel;
+ new_context.is_barrier_block = true;
+ dispatch_block_t new_block = Block_copy(^(void) {
+ new_context.orig_context = ^(void) {
+ barrier();
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_barrier)(channel, new_block);
+ Block_release(new_block);
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create, dispatch_io_type_t type,
+ dispatch_fd_t fd, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create, type, fd, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create)(type, fd, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_path,
+ dispatch_io_type_t type, const char *path, int oflag,
+ mode_t mode, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_path, type, path, oflag, mode,
+ q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel =
+ REAL(dispatch_io_create_with_path)(type, path, oflag, mode, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_io,
+ dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t q,
+ cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_io, type, io, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create_with_io)(type, io, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
+ dispatch_io_close_flags_t flags) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_close, channel, flags);
+ Release(thr, pc, (uptr)channel); // Acquire() in dispatch_io_create[_*].
+ return REAL(dispatch_io_close)(channel, flags);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc
index 7fd94273c314..8d31ccbcaaca 100644
--- a/lib/tsan/rtl/tsan_malloc_mac.cc
+++ b/lib/tsan/rtl/tsan_malloc_mac.cc
@@ -27,33 +27,28 @@ using namespace __tsan;
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
-#define COMMON_MALLOC_MALLOC(size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(malloc)(size); \
- SCOPED_INTERCEPTOR_RAW(malloc, size); \
+#define COMMON_MALLOC_MALLOC(size) \
+ if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
void *p = user_alloc(thr, pc, size)
-#define COMMON_MALLOC_REALLOC(ptr, size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(realloc)(ptr, size); \
- SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (cur_thread()->in_symbolizer) return InternalRealloc(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
void *p = user_realloc(thr, pc, ptr, size)
-#define COMMON_MALLOC_CALLOC(count, size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(calloc)(count, size); \
- SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (cur_thread()->in_symbolizer) return InternalCalloc(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
void *p = user_calloc(thr, pc, size, count)
-#define COMMON_MALLOC_VALLOC(size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(valloc)(size); \
- SCOPED_INTERCEPTOR_RAW(valloc, size); \
+#define COMMON_MALLOC_VALLOC(size) \
+ if (cur_thread()->in_symbolizer) \
+ return InternalAlloc(size, nullptr, GetPageSizeCached()); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
void *p = user_alloc(thr, pc, size, GetPageSizeCached())
-#define COMMON_MALLOC_FREE(ptr) \
- if (cur_thread()->in_symbolizer) \
- return REAL(free)(ptr); \
- SCOPED_INTERCEPTOR_RAW(free, ptr); \
+#define COMMON_MALLOC_FREE(ptr) \
+ if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
-#define COMMON_MALLOC_SIZE(ptr) \
- uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
#define COMMON_MALLOC_FILL_STATS(zone, stats)
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
(void)zone_name; \
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 7247c6e00035..7693077f622b 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -63,18 +63,69 @@ Allocator *allocator() {
return reinterpret_cast<Allocator*>(&allocator_placeholder);
}
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
+ , proc(ProcCreate()) {
+ }
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
void InitializeAllocator() {
allocator()->Init(common_flags()->allocator_may_return_null);
}
-void AllocatorThreadStart(ThreadState *thr) {
- allocator()->InitCache(&thr->alloc_cache);
- internal_allocator()->InitCache(&thr->internal_alloc_cache);
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
}
-void AllocatorThreadFinish(ThreadState *thr) {
- allocator()->DestroyCache(&thr->alloc_cache);
- internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
}
void AllocatorPrintStats() {
@@ -98,7 +149,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return allocator()->ReturnNullOrDie();
- void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
if (ctx && ctx->initialized)
@@ -118,9 +169,10 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
}
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
- allocator()->Deallocate(&thr->alloc_cache, p);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
if (signal)
SignalUnsafeCall(thr, pc);
}
@@ -136,7 +188,7 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
@@ -164,7 +216,11 @@ uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
- return b ? b->siz : 0;
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
}
void invoke_malloc_hook(void *ptr, uptr size) {
@@ -172,6 +228,7 @@ void invoke_malloc_hook(void *ptr, uptr size) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
}
void invoke_free_hook(void *ptr) {
@@ -179,6 +236,7 @@ void invoke_free_hook(void *ptr) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
@@ -187,7 +245,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- return InternalAlloc(sz, &thr->internal_alloc_cache);
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
void internal_free(void *p) {
@@ -196,7 +254,7 @@ void internal_free(void *p) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- InternalFree(p, &thr->internal_alloc_cache);
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
}
} // namespace __tsan
@@ -238,8 +296,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- allocator()->SwallowCache(&thr->alloc_cache);
- internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
- ctx->metamap.OnThreadIdle(thr);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
}
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index b419b58ca457..8cdeeb35aa6b 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -20,9 +20,10 @@ namespace __tsan {
const uptr kDefaultAlignment = 16;
void InitializeAllocator();
+void InitializeAllocatorLate();
void ReplaceSystemMalloc();
-void AllocatorThreadStart(ThreadState *thr);
-void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
index 9dd24803b183..22afefce3384 100644
--- a/lib/tsan/rtl/tsan_mutex.cc
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -43,6 +43,7 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*11 MutexTypeDDetector*/ {},
/*12 MutexTypeFired*/ {MutexTypeLeaf},
/*13 MutexTypeRacy*/ {MutexTypeLeaf},
+ /*14 MutexTypeGlobalProc*/ {},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h
index 27f55385c959..22ee2f33fcc7 100644
--- a/lib/tsan/rtl/tsan_mutex.h
+++ b/lib/tsan/rtl/tsan_mutex.h
@@ -34,6 +34,7 @@ enum MutexType {
MutexTypeDDetector,
MutexTypeFired,
MutexTypeRacy,
+ MutexTypeGlobalProc,
// This must be the last.
MutexTypeCount
diff --git a/lib/tsan/rtl/tsan_new_delete.cc b/lib/tsan/rtl/tsan_new_delete.cc
index ebb422cf2023..b6478bb08c57 100644
--- a/lib/tsan/rtl/tsan_new_delete.cc
+++ b/lib/tsan/rtl/tsan_new_delete.cc
@@ -23,14 +23,10 @@ struct nothrow_t {};
DECLARE_REAL(void *, malloc, uptr size)
DECLARE_REAL(void, free, void *ptr)
-#if SANITIZER_MAC || SANITIZER_ANDROID
-#define __libc_malloc REAL(malloc)
-#define __libc_free REAL(free)
-#endif
#define OPERATOR_NEW_BODY(mangled_name) \
if (cur_thread()->in_symbolizer) \
- return __libc_malloc(size); \
+ return InternalAlloc(size); \
void *p = 0; \
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
@@ -66,7 +62,7 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
#define OPERATOR_DELETE_BODY(mangled_name) \
if (ptr == 0) return; \
if (cur_thread()->in_symbolizer) \
- return __libc_free(ptr); \
+ return InternalFree(ptr); \
invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
user_free(thr, pc, ptr);
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index c2b487155300..2bd6637d0ea3 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -297,7 +297,7 @@ struct Mapping {
static const uptr kShadowEnd = 0x050000000000ull;
static const uptr kAppMemBeg = 0x000000001000ull;
static const uptr kAppMemEnd = 0x00e000000000ull;
-}
+};
#else
# error "Unknown platform"
@@ -587,7 +587,11 @@ uptr MemToShadowImpl(uptr x) {
return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
^ Mapping::kAppMemXor) * kShadowCnt;
#else
+# ifndef SANITIZER_WINDOWS
return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
+# else
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
+# endif
#endif
}
@@ -662,7 +666,6 @@ uptr ShadowToMemImpl(uptr s) {
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else
- // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
return (s - Mapping::kShadowBeg) / kShadowCnt;
# endif // SANITIZER_WINDOWS
#endif
@@ -754,10 +757,6 @@ void CheckAndProtect();
void InitializeShadowMemoryPlatform();
void FlushShadowMemory();
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
-
-// Says whether the addr relates to a global var.
-// Guesses with high probability, may yield both false positives and negatives.
-bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index 6602561186ce..d7182fdc1a4d 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -18,6 +18,8 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@@ -34,6 +36,9 @@
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/personality.h>
+#endif
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
@@ -64,9 +69,6 @@ void *__libc_stack_end = 0;
namespace __tsan {
-static uptr g_data_start;
-static uptr g_data_end;
-
#ifdef TSAN_RUNTIME_VMA
// Runtime detected VMA size.
uptr vmaSize;
@@ -199,46 +201,6 @@ void InitializeShadowMemoryPlatform() {
MapRodata();
}
-static void InitDataSeg() {
- MemoryMappingLayout proc_maps(true);
- uptr start, end, offset;
- char name[128];
-#if SANITIZER_FREEBSD
- // On FreeBSD BSS is usually the last block allocated within the
- // low range and heap is the last block allocated within the range
- // 0x800000000-0x8ffffffff.
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
- /*protection*/ 0)) {
- DPrintf("%p-%p %p %s\n", start, end, offset, name);
- if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
- name[0] == '\0') {
- g_data_start = start;
- g_data_end = end;
- }
- }
-#else
- bool prev_is_data = false;
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
- /*protection*/ 0)) {
- DPrintf("%p-%p %p %s\n", start, end, offset, name);
- bool is_data = offset != 0 && name[0] != 0;
- // BSS may get merged with [heap] in /proc/self/maps. This is not very
- // reliable.
- bool is_bss = offset == 0 &&
- (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
- if (g_data_start == 0 && is_data)
- g_data_start = start;
- if (is_bss)
- g_data_end = end;
- prev_is_data = is_data;
- }
-#endif
- DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
- CHECK_LT(g_data_start, g_data_end);
- CHECK_GE((uptr)&g_data_start, g_data_start);
- CHECK_LT((uptr)&g_data_start, g_data_end);
-}
-
#endif // #ifndef SANITIZER_GO
void InitializePlatformEarly() {
@@ -289,6 +251,20 @@ void InitializePlatform() {
SetAddressSpaceUnlimited();
reexec = true;
}
+#if SANITIZER_LINUX && defined(__aarch64__)
+ // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
+ // linux kernel, the random gap between stack and mapped area is increased
+ // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
+ // this big range, we should disable randomized virtual space on aarch64.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is run with randomized virtual address "
+ "space, which wouldn't work with ThreadSanitizer.\n"
+ "Re-execing with fixed virtual address space.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ }
+#endif
if (reexec)
ReExec();
}
@@ -296,14 +272,9 @@ void InitializePlatform() {
#ifndef SANITIZER_GO
CheckAndProtect();
InitTlsSize();
- InitDataSeg();
#endif
}
-bool IsGlobalVar(uptr addr) {
- return g_data_start && addr >= g_data_start && addr < g_data_end;
-}
-
#ifndef SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
@@ -361,6 +332,69 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void ReplaceSystemMalloc() { }
#endif
+#ifndef SANITIZER_GO
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+ ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+ ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; })
+#else
+#error unsupported architecture
+#endif
+
+// On Android, __thread is not supported. So we store the pointer to ThreadState
+// in TLS_SLOT_TSAN, which is the tls slot allocated by Android bionic for tsan.
+static const int TLS_SLOT_TSAN = 8;
+// On Android, one thread can call intercepted functions after
+// DestroyThreadState(), so add a fake thread state for "dead" threads.
+static ThreadState *dead_thread_state = nullptr;
+
+ThreadState *cur_thread() {
+ ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ if (thr == nullptr) {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ thr = reinterpret_cast<ThreadState*>(__get_tls()[TLS_SLOT_TSAN]);
+ if (thr == nullptr) {
+ thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
+ "ThreadState"));
+ __get_tls()[TLS_SLOT_TSAN] = thr;
+ if (dead_thread_state == nullptr) {
+ dead_thread_state = reinterpret_cast<ThreadState*>(
+ MmapOrDie(sizeof(ThreadState), "ThreadState"));
+ dead_thread_state->fast_state.SetIgnoreBit();
+ dead_thread_state->ignore_interceptors = 1;
+ dead_thread_state->is_dead = true;
+ *const_cast<int*>(&dead_thread_state->tid) = -1;
+ CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
+ PROT_READ));
+ }
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+ }
+ return thr;
+}
+
+void cur_thread_finalize() {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ if (thr != dead_thread_state) {
+ __get_tls()[TLS_SLOT_TSAN] = dead_thread_state;
+ UnmapOrDie(thr, sizeof(ThreadState));
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+}
+#endif // SANITIZER_ANDROID
+#endif // ifndef SANITIZER_GO
+
} // namespace __tsan
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 31caf37dee5a..0cc02ab87a3e 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -67,20 +67,18 @@ static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
// when TLVs are not accessible (early process startup, thread cleanup, ...).
// The following provides a "poor man's TLV" implementation, where we use the
// shadow memory of the pointer returned by pthread_self() to store a pointer to
-// the ThreadState object. The main thread's ThreadState pointer is stored
-// separately in a static variable, because we need to access it even before the
+// the ThreadState object. The main thread's ThreadState is stored separately
+// in a static variable, because we need to access it even before the
// shadow memory is set up.
static uptr main_thread_identity = 0;
-static ThreadState *main_thread_state = nullptr;
+ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
ThreadState *cur_thread() {
- ThreadState **fake_tls;
uptr thread_identity = (uptr)pthread_self();
if (thread_identity == main_thread_identity || main_thread_identity == 0) {
- fake_tls = &main_thread_state;
- } else {
- fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ return (ThreadState *)&main_thread_state;
}
+ ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
(uptr *)fake_tls, sizeof(ThreadState));
return thr;
@@ -91,7 +89,11 @@ ThreadState *cur_thread() {
// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
uptr thread_identity = (uptr)pthread_self();
- CHECK_NE(thread_identity, main_thread_identity);
+ if (thread_identity == main_thread_identity) {
+ // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
+ // exit the main thread. Let's keep the main thread's ThreadState.
+ return;
+ }
ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
internal_munmap(*fake_tls, sizeof(ThreadState));
*fake_tls = nullptr;
@@ -131,10 +133,12 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
if (thread == pthread_self()) {
// The current thread is a newly created GCD worker thread.
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
ThreadState *parent_thread_state = nullptr; // No parent.
int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
CHECK_NE(tid, 0);
- ThreadState *thr = cur_thread();
ThreadStart(thr, tid, GetTid());
}
} else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
@@ -183,10 +187,6 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-bool IsGlobalVar(uptr addr) {
- return false;
-}
-
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc
index 90476cbc5fd5..805ce1be4bde 100644
--- a/lib/tsan/rtl/tsan_platform_posix.cc
+++ b/lib/tsan/rtl/tsan_platform_posix.cc
@@ -105,7 +105,7 @@ static void ProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
return;
- if (beg != (uptr)MmapNoAccess(beg, end - beg)) {
+ if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
Printf("FATAL: Make sure you are not using unlimited stack\n");
Die();
diff --git a/lib/tsan/rtl/tsan_preinit.cc b/lib/tsan/rtl/tsan_preinit.cc
new file mode 100644
index 000000000000..a96618d7dc81
--- /dev/null
+++ b/lib/tsan/rtl/tsan_preinit.cc
@@ -0,0 +1,27 @@
+//===-- tsan_preinit.cc ---------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer.
+//
+// Call __tsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interface.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// The symbol is called __local_tsan_preinit, because it's not intended to be
+// exported.
+// This code linked into the main executable when -fsanitize=thread is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used))
+void (*__local_tsan_preinit)(void) = __tsan_init;
+
+#endif
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index c1d2fd07c0d9..93604946ac38 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -96,6 +96,8 @@ static const char *ReportTypeString(ReportType typ) {
return "destroy of a locked mutex";
if (typ == ReportTypeMutexDoubleLock)
return "double lock of a mutex";
+ if (typ == ReportTypeMutexInvalidAccess)
+ return "use of an invalid mutex (e.g. uninitialized or destroyed)";
if (typ == ReportTypeMutexBadUnlock)
return "unlock of an unlocked mutex (or by a wrong thread)";
if (typ == ReportTypeMutexBadReadLock)
@@ -234,7 +236,7 @@ static void PrintThread(const ReportThread *rt) {
Printf(" '%s'", rt->name);
char thrbuf[kThreadBufSize];
Printf(" (tid=%zu, %s) created by %s",
- rt->pid, rt->running ? "running" : "finished",
+ rt->os_id, rt->running ? "running" : "finished",
thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
@@ -379,9 +381,9 @@ void PrintStack(const ReportStack *ent) {
static void PrintMop(const ReportMop *mop, bool first) {
Printf("\n");
- Printf("%s by ",
+ Printf("%s at %p by ",
(first ? (mop->write ? "Write" : "Read")
- : (mop->write ? "Previous write" : "Previous read")));
+ : (mop->write ? "Previous write" : "Previous read")), mop->addr);
if (mop->tid == kMainThreadId)
Printf("main goroutine:\n");
else
@@ -389,6 +391,31 @@ static void PrintMop(const ReportMop *mop, bool first) {
PrintStack(mop->stack);
}
+static void PrintLocation(const ReportLocation *loc) {
+ switch (loc->type) {
+ case ReportLocationHeap: {
+ Printf("\n");
+ Printf("Heap block of size %zu at %p allocated by ",
+ loc->heap_chunk_size, loc->heap_chunk_start);
+ if (loc->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", loc->tid);
+ PrintStack(loc->stack);
+ break;
+ }
+ case ReportLocationGlobal: {
+ Printf("\n");
+ Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
+ loc->global.name, loc->global.size, loc->global.start,
+ loc->global.file, loc->global.line);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
static void PrintThread(const ReportThread *rt) {
if (rt->id == kMainThreadId)
return;
@@ -404,6 +431,8 @@ void PrintReport(const ReportDesc *rep) {
Printf("WARNING: DATA RACE");
for (uptr i = 0; i < rep->mops.Size(); i++)
PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
} else if (rep->typ == ReportTypeDeadlock) {
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index 3e344a048e43..d0b9d7458bf8 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -27,6 +27,7 @@ enum ReportType {
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock,
+ ReportTypeMutexInvalidAccess,
ReportTypeMutexBadUnlock,
ReportTypeMutexBadReadLock,
ReportTypeMutexBadReadUnlock,
@@ -86,7 +87,7 @@ struct ReportLocation {
struct ReportThread {
int id;
- uptr pid;
+ uptr os_id;
bool running;
char *name;
int parent_tid;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 4df4db557a24..629871ef8f7a 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -321,6 +321,7 @@ void Initialize(ThreadState *thr) {
const char *options = GetEnv(kTsanOptionsEnv);
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
+ AvoidCVE_2016_2143();
InitializePlatformEarly();
#ifndef SANITIZER_GO
// Re-exec ourselves if we need to set additional env or command line args.
@@ -329,6 +330,10 @@ void Initialize(ThreadState *thr) {
InitializeAllocator();
ReplaceSystemMalloc();
#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
InitializeInterceptors();
CheckShadowMapping();
InitializePlatform();
@@ -336,6 +341,7 @@ void Initialize(ThreadState *thr) {
InitializeDynamicAnnotations();
#ifndef SANITIZER_GO
InitializeShadowMemory();
+ InitializeAllocatorLate();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
@@ -351,8 +357,6 @@ void Initialize(ThreadState *thr) {
SetSandboxingCallback(StopBackgroundThread);
#endif
#endif
- if (common_flags()->detect_deadlocks)
- ctx->dd = DDetector::Create(flags());
VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
(int)internal_getpid());
@@ -366,6 +370,10 @@ void Initialize(ThreadState *thr) {
#endif
ctx->initialized = true;
+#ifndef SANITIZER_GO
+ Symbolizer::LateInitialize();
+#endif
+
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
" Call __tsan_resume().\n",
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 04104b162f98..ff69015660b6 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -325,6 +325,36 @@ struct JmpBuf {
uptr *shadow_stack_pos;
};
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#ifndef SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
+#ifndef SANITIZER_GO
+// ScopedGlobalProcessor temporary setups a global processor for the current
+// thread, if it does not have one. Intended for interceptors that can run
+// at the very thread end, when we already destroyed the thread processor.
+struct ScopedGlobalProcessor {
+ ScopedGlobalProcessor();
+ ~ScopedGlobalProcessor();
+};
+#endif
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
@@ -360,8 +390,6 @@ struct ThreadState {
MutexSet mset;
ThreadClock clock;
#ifndef SANITIZER_GO
- AllocatorCache alloc_cache;
- InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -385,16 +413,19 @@ struct ThreadState {
#if SANITIZER_DEBUG && !SANITIZER_GO
InternalDeadlockDetector internal_deadlock_detector;
#endif
- DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc1;
+#ifndef SANITIZER_GO
+ Processor *proc() { return proc1; }
+#else
+ Processor *proc();
+#endif
+
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
- DenseSlabAllocCache block_cache;
- DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
-
#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
@@ -404,6 +435,8 @@ struct ThreadState {
// If set, malloc must not be called.
int nomalloc;
+ const ReportDesc *current_report;
+
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
unsigned reuse_count,
uptr stk_addr, uptr stk_size,
@@ -411,7 +444,7 @@ struct ThreadState {
};
#ifndef SANITIZER_GO
-#if SANITIZER_MAC
+#if SANITIZER_MAC || SANITIZER_ANDROID
ThreadState *cur_thread();
void cur_thread_finalize();
#else
@@ -421,7 +454,7 @@ INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
}
INLINE void cur_thread_finalize() { }
-#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC || SANITIZER_ANDROID
#endif // SANITIZER_GO
class ThreadContext : public ThreadContextBase {
@@ -683,6 +716,11 @@ void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
void ProcessPendingSignals(ThreadState *thr);
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
@@ -693,6 +731,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
void Acquire(ThreadState *thr, uptr pc, uptr addr);
// AcquireGlobal synchronizes the current thread with all other threads.
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 62ab7aa6b2b4..1806acf063bc 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -32,7 +32,7 @@ struct Callback : DDCallback {
Callback(ThreadState *thr, uptr pc)
: thr(thr)
, pc(pc) {
- DDCallback::pt = thr->dd_pt;
+ DDCallback::pt = thr->proc()->dd_pt;
DDCallback::lt = thr->dd_lt;
}
@@ -84,21 +84,14 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef SANITIZER_GO
- // Global mutexes not marked as LINKER_INITIALIZED
- // cause tons of not interesting reports, so just ignore it.
- if (IsGlobalVar(addr))
- return;
-#endif
- if (IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
+ if (s->is_linker_init) {
+ // Destroy is no-op for linker-initialized mutexes.
+ s->mtx.Unlock();
+ return;
+ }
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ctx->dd->MutexDestroy(&cb, &s->dd);
@@ -114,7 +107,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
u64 mid = s->GetId();
u32 last_lock = s->last_lock;
if (!unlock_locked)
- s->Reset(thr); // must not reset it before the report is printed
+ s->Reset(thr->proc()); // must not reset it before the report is printed
s->mtx.Unlock();
if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
@@ -128,15 +121,23 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
rep.AddStack(trace, true);
rep.AddLocation(addr, 1);
OutputReport(thr, rep);
- }
- if (unlock_locked) {
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s != 0) {
- s->Reset(thr);
+ s->Reset(thr->proc());
s->mtx.Unlock();
}
}
thr->mset.Remove(mid);
+ // Imitate a memory write to catch unlock-destroy races.
+ // Do this outside of sync mutex, because it can report a race which locks
+ // sync mutexes.
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
// s will be destroyed and freed in MetaMap::FreeBlock.
}
@@ -350,11 +351,21 @@ void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+}
+
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ if (!s)
+ return;
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -426,7 +437,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->clock_cache, c);
+ thr->clock.acquire(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
@@ -435,7 +446,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->clock_cache, c);
+ thr->clock.release(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -444,7 +455,7 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->clock_cache, c);
+ thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -453,7 +464,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->clock_cache, c);
+ thr->clock.acq_rel(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
diff --git a/lib/tsan/rtl/tsan_rtl_proc.cc b/lib/tsan/rtl/tsan_rtl_proc.cc
new file mode 100644
index 000000000000..0c838a1388f5
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_proc.cc
@@ -0,0 +1,61 @@
+//===-- tsan_rtl_proc.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+Processor *ProcCreate() {
+ void *mem = InternalAlloc(sizeof(Processor));
+ internal_memset(mem, 0, sizeof(Processor));
+ Processor *proc = new(mem) Processor;
+ proc->thr = nullptr;
+#ifndef SANITIZER_GO
+ AllocatorProcStart(proc);
+#endif
+ if (common_flags()->detect_deadlocks)
+ proc->dd_pt = ctx->dd->CreatePhysicalThread();
+ return proc;
+}
+
+void ProcDestroy(Processor *proc) {
+ CHECK_EQ(proc->thr, nullptr);
+#ifndef SANITIZER_GO
+ AllocatorProcFinish(proc);
+#endif
+ ctx->clock_alloc.FlushCache(&proc->clock_cache);
+ ctx->metamap.OnProcIdle(proc);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyPhysicalThread(proc->dd_pt);
+ proc->~Processor();
+ InternalFree(proc);
+}
+
+void ProcWire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, nullptr);
+ CHECK_EQ(proc->thr, nullptr);
+ thr->proc1 = proc;
+ proc->thr = thr;
+}
+
+void ProcUnwire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, proc);
+ CHECK_EQ(proc->thr, thr);
+ thr->proc1 = nullptr;
+ proc->thr = nullptr;
+}
+
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 5aff6ca56adf..810119b93a7a 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -56,6 +56,11 @@ bool OnReport(const ReportDesc *rep, bool suppressed) {
}
#endif
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_on_report(const ReportDesc *rep) {
+ (void)rep;
+}
+
static void StackStripMain(SymbolizedStack *frames) {
SymbolizedStack *last_frame = nullptr;
SymbolizedStack *last_frame2 = nullptr;
@@ -189,7 +194,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
ReportThread *rt = new(mem) ReportThread;
rep_->threads.PushBack(rt);
rt->id = tctx->tid;
- rt->pid = tctx->os_id;
+ rt->os_id = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
@@ -268,7 +273,7 @@ u64 ScopedReport::AddMutex(u64 id) {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
@@ -342,12 +347,12 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
rep_->locs.PushBack(loc);
AddThread(tctx);
}
+#endif
if (ReportLocation *loc = SymbolizeData(addr)) {
loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
-#endif
}
#ifndef SANITIZER_GO
@@ -492,6 +497,8 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
return false;
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
+ CHECK_EQ(thr->current_report, nullptr);
+ thr->current_report = rep;
Suppression *supp = 0;
uptr pc_or_addr = 0;
for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
@@ -512,13 +519,17 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
thr->is_freeing = false;
bool suppressed = OnReport(rep, pc_or_addr != 0);
thr->is_freeing = old_is_freeing;
- if (suppressed)
+ if (suppressed) {
+ thr->current_report = nullptr;
return false;
+ }
}
PrintReport(rep);
+ __tsan_on_report(rep);
ctx->nreported++;
if (flags()->halt_on_error)
Die();
+ thr->current_report = nullptr;
return true;
}
@@ -669,6 +680,14 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
PrintStack(SymbolizeStack(trace));
}
+// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
+// tail-call to PrintCurrentStackSlow breaks this assumption because
+// __sanitizer_print_stack_trace disappears after tail-call.
+// However, this solution is not reliable enough, please see dvyukov's comment
+// http://reviews.llvm.org/D19148#406208
+// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
+ALWAYS_INLINE
void PrintCurrentStackSlow(uptr pc) {
#ifndef SANITIZER_GO
BufferedStackTrace *ptrace =
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index dcae255f7643..ab8f3c38a960 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -42,7 +42,7 @@ void ThreadContext::OnDead() {
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->clock_cache);
+ sync.Reset(&caller_thr->proc()->clock_cache);
}
struct OnCreatedArgs {
@@ -74,7 +74,7 @@ void ThreadContext::OnReset() {
void ThreadContext::OnDetached(void *arg) {
ThreadState *thr1 = static_cast<ThreadState*>(arg);
- sync.Reset(&thr1->clock_cache);
+ sync.Reset(&thr1->proc()->clock_cache);
}
struct OnStartedArgs {
@@ -106,13 +106,8 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef SANITIZER_GO
- AllocatorThreadStart(thr);
-#endif
- if (common_flags()->detect_deadlocks) {
- thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ if (common_flags()->detect_deadlocks)
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- }
thr->fast_state.SetHistorySize(flags()->history_size);
// Commit switch to the new part of the trace.
// TraceAddEvent will reset stack0/mset0 in the new part for us.
@@ -121,7 +116,7 @@ void ThreadContext::OnStarted(void *arg) {
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
StatInc(thr, StatSyncAcquire);
- sync.Reset(&thr->clock_cache);
+ sync.Reset(&thr->proc()->clock_cache);
thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
@@ -138,15 +133,8 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
- if (common_flags()->detect_deadlocks) {
- ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ if (common_flags()->detect_deadlocks)
ctx->dd->DestroyLogicalThread(thr->dd_lt);
- }
- ctx->clock_alloc.FlushCache(&thr->clock_cache);
- ctx->metamap.OnThreadIdle(thr);
-#ifndef SANITIZER_GO
- AllocatorThreadFinish(thr);
-#endif
thr->~ThreadState();
#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index a5cca9679582..d1d6ed24d991 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -168,6 +168,7 @@ void StatOutput(u64 *stat) {
name[StatMtxFired] = " FiredSuppressions ";
name[StatMtxRacy] = " RacyStacks ";
name[StatMtxFD] = " FD ";
+ name[StatMtxGlobalProc] = " GlobalProc ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
index 8ea32048e147..8447dd84fc17 100644
--- a/lib/tsan/rtl/tsan_stat.h
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -173,6 +173,7 @@ enum StatType {
StatMtxFired,
StatMtxRacy,
StatMtxFD,
+ StatMtxGlobalProc,
// This must be the last.
StatCnt
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index b992d78f8cb5..aea3cb978cb1 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -80,6 +80,8 @@ static const char *conv(ReportType typ) {
return kSuppressionMutex;
else if (typ == ReportTypeMutexDoubleLock)
return kSuppressionMutex;
+ else if (typ == ReportTypeMutexInvalidAccess)
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexBadUnlock)
return kSuppressionMutex;
else if (typ == ReportTypeMutexBadReadLock)
@@ -92,7 +94,7 @@ static const char *conv(ReportType typ) {
return kSuppressionNone;
else if (typ == ReportTypeDeadlock)
return kSuppressionDeadlock;
- Printf("ThreadSanitizer: unknown report type %d\n", typ),
+ Printf("ThreadSanitizer: unknown report type %d\n", typ);
Die();
}
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 4202d30a2e17..58b26802b0e7 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -36,7 +36,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(ThreadState *thr) {
+void SyncVar::Reset(Processor *proc) {
uid = 0;
creation_stack_id = 0;
owner_tid = kInvalidTid;
@@ -47,12 +47,12 @@ void SyncVar::Reset(ThreadState *thr) {
is_broken = 0;
is_linker_init = 0;
- if (thr == 0) {
+ if (proc == 0) {
CHECK_EQ(clock.size(), 0);
CHECK_EQ(read_clock.size(), 0);
} else {
- clock.Reset(&thr->clock_cache);
- read_clock.Reset(&thr->clock_cache);
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
}
}
@@ -61,7 +61,7 @@ MetaMap::MetaMap() {
}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
- u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
b->tid = thr->tid;
@@ -71,16 +71,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return sz;
}
-bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -96,14 +96,14 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
has_something = true;
while (idx != 0) {
if (idx & kFlagBlock) {
- block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
break;
} else if (idx & kFlagSync) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
CHECK(0);
@@ -119,24 +119,30 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+ if (kGoMode) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do the optimization only for C/C++.
+ FreeRange(proc, p, sz);
+ return;
+ }
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(thr, pc, p, diff);
+ FreeRange(proc, p, diff);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(thr, pc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -146,18 +152,21 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
const uptr p0 = p;
const uptr sz0 = sz;
// Probe start of the range.
- while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p, kPageSize);
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p, kPageSize);
p += kPageSize;
sz -= kPageSize;
- if (!has_something)
+ if (!has_something && checked > (128 << 10))
break;
}
// Probe end of the range.
- while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
sz -= kPageSize;
- if (!has_something)
+ // Stacks grow down, so sync object are most likely at the end of the region
+ // (if it is a stack). The very end of the stack is TLS and tsan increases
+ // TLS by at least 256K, so check at least 512K.
+ if (!has_something && checked > (512 << 10))
break;
}
// Finally, page out the whole range (including the parts that we've just
@@ -189,8 +198,8 @@ SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
return GetAndLock(thr, pc, addr, write_lock, true);
}
-SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
- return GetAndLock(0, 0, addr, true, false);
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
}
SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
@@ -210,8 +219,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (s->addr == addr) {
if (myidx != 0) {
- mys->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, myidx);
+ mys->Reset(thr->proc());
+ sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
}
if (write_lock)
s->mtx.Lock();
@@ -230,7 +239,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
if (myidx == 0) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
mys = sync_alloc_.Map(myidx);
mys->Init(thr, pc, addr, uid);
}
@@ -279,9 +288,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
}
}
-void MetaMap::OnThreadIdle(ThreadState *thr) {
- block_alloc_.FlushCache(&thr->block_cache);
- sync_alloc_.FlushCache(&thr->sync_cache);
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index f07ea3b9776b..2bc2f41fbe26 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -47,7 +47,7 @@ struct SyncVar {
SyncClock clock;
void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
- void Reset(ThreadState *thr);
+ void Reset(Processor *proc);
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
@@ -72,18 +72,18 @@ class MetaMap {
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
- bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
- void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr);
+ SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
void MoveMemory(uptr src, uptr dst, uptr sz);
- void OnThreadIdle(ThreadState *thr);
+ void OnProcIdle(Processor *proc);
private:
static const u32 kFlagMask = 3u << 30;