diff options
Diffstat (limited to 'lib/tsan/rtl')
37 files changed, 2563 insertions, 692 deletions
diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old deleted file mode 100644 index ee7095e88d96..000000000000 --- a/lib/tsan/rtl/Makefile.old +++ /dev/null @@ -1,63 +0,0 @@ -CXXFLAGS = -std=c++11 -fPIE -g -Wall -Werror -fno-builtin -msse3 -DSANITIZER_DEBUG=$(DEBUG) -DTSAN_CONTAINS_UBSAN=0 -CLANG=clang -ifeq ($(DEBUG), 0) - CXXFLAGS += -O3 -endif - -# For interception. FIXME: move interception one level higher. -INTERCEPTION=../../interception -COMMON=../../sanitizer_common -INCLUDES= -I../.. -I../../../include -EXTRA_CXXFLAGS=-fno-exceptions -fno-rtti -NO_SYSROOT=--sysroot=. -CXXFLAGS+=$(EXTRA_CXXFLAGS) -CXXFLAGS+=$(CFLAGS) -ifeq ($(DEBUG), 0) - CXXFLAGS+=-fomit-frame-pointer -ifeq ($(CXX), g++) - CXXFLAGS+=-Wno-maybe-uninitialized - CXXFLAGS+=-Wframe-larger-than=512 -endif # CXX=g++ -endif # DEBUG=0 - -ifeq ($(CXX), $(CLANG)++) - # Global constructors are banned. - CXXFLAGS+=-Wglobal-constructors -endif - - - -all: libtsan.a - -LIBTSAN_HEADERS=$(wildcard *.h) \ - $(wildcard $(INTERCEPTION)/*.h) \ - $(wildcard $(COMMON)/*.h) -LIBTSAN_SRC=$(wildcard *.cc) -LIBTSAN_ASM_SRC=$(wildcard *.S) -INTERCEPTION_SRC=$(wildcard $(INTERCEPTION)/*.cc) -COMMON_SRC=$(wildcard $(COMMON)/*.cc) - -LIBTSAN_OBJ=$(patsubst %.cc,%.o,$(LIBTSAN_SRC)) \ - $(patsubst %.S,%.o,$(LIBTSAN_ASM_SRC)) \ - $(patsubst $(INTERCEPTION)/%.cc,%.o,$(INTERCEPTION_SRC)) \ - $(patsubst $(COMMON)/%.cc,%.o,$(COMMON_SRC)) - -%_linux.o: %_linux.cc Makefile.old $(LIBTSAN_HEADERS) - $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -%.o: %.cc Makefile.old $(LIBTSAN_HEADERS) - $(CXX) $(CXXFLAGS) $(INCLUDES) $(NO_SYSROOT) -c $< -%.o: $(INTERCEPTION)/%.cc Makefile.old $(LIBTSAN_HEADERS) - $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@ -%.o: $(COMMON)/%.cc Makefile.old $(LIBTSAN_HEADERS) - $(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@ -%.o: %.S - $(CXX) $(INCLUDES) -o $@ -c $< - -libtsan.a: $(LIBTSAN_OBJ) - ar ru $@ $(LIBTSAN_OBJ) - -libtsan_dummy.a: tsan_dummy_rtl.o - ar ru $@ $< - -clean: - rm -f *.o *.a diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc index 59e3de435f1b..1e2050d1f203 100644 --- a/lib/tsan/rtl/tsan_clock.cc +++ b/lib/tsan/rtl/tsan_clock.cc @@ -90,8 +90,6 @@ namespace __tsan { -const unsigned kInvalidTid = (unsigned)-1; - ThreadClock::ThreadClock(unsigned tid, unsigned reused) : tid_(tid) , reused_(reused + 1) { // 0 has special meaning diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h index d869d95e0878..9c7b329dcf00 100644 --- a/lib/tsan/rtl/tsan_defs.h +++ b/lib/tsan/rtl/tsan_defs.h @@ -38,13 +38,10 @@ namespace __tsan { const bool kGoMode = true; const bool kCppMode = false; const char *const kTsanOptionsEnv = "GORACE"; -// Go linker does not support weak symbols. -#define CPP_WEAK #else const bool kGoMode = false; const bool kCppMode = true; const char *const kTsanOptionsEnv = "TSAN_OPTIONS"; -#define CPP_WEAK WEAK #endif const int kTidBits = 13; @@ -83,6 +80,8 @@ const bool kCollectHistory = false; const bool kCollectHistory = true; #endif +const unsigned kInvalidTid = (unsigned)-1; + // The following "build consistency" machinery ensures that all source files // are built in the same configuration. Inconsistent builds lead to // hard to debug crashes. diff --git a/lib/tsan/rtl/tsan_dense_alloc.h b/lib/tsan/rtl/tsan_dense_alloc.h index a1cf84b8f166..e9815c90a953 100644 --- a/lib/tsan/rtl/tsan_dense_alloc.h +++ b/lib/tsan/rtl/tsan_dense_alloc.h @@ -108,7 +108,7 @@ class DenseSlabAlloc { // Reserve 0 as invalid index. IndexT start = fillpos_ == 0 ? 1 : 0; for (IndexT i = start; i < kL2Size; i++) { - new(batch + i) T(); + new(batch + i) T; *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size; } *(IndexT*)(batch + kL2Size - 1) = 0; diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc index 5de227a42dee..761523171c77 100644 --- a/lib/tsan/rtl/tsan_flags.cc +++ b/lib/tsan/rtl/tsan_flags.cc @@ -29,8 +29,8 @@ Flags *flags() { #ifdef TSAN_EXTERNAL_HOOKS extern "C" const char* __tsan_default_options(); #else -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -const char *WEAK __tsan_default_options() { +SANITIZER_WEAK_DEFAULT_IMPL +const char *__tsan_default_options() { return ""; } #endif @@ -61,11 +61,16 @@ void InitializeFlags(Flags *f, const char *env) { CommonFlags cf; cf.CopyFrom(*common_flags()); cf.allow_addr2line = true; -#ifndef SANITIZER_GO - cf.detect_deadlocks = true; -#endif + if (kGoMode) { + // Does not work as expected for Go: runtime handles SIGABRT and crashes. + cf.abort_on_error = false; + // Go does not have mutexes. + } else { + cf.detect_deadlocks = true; + } cf.print_suppressions = false; cf.stack_trace_format = " #%n %f %S %M"; + cf.exitcode = 66; OverrideCommonFlags(cf); } diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc index e4994685fa0d..ab9ca9924936 100644 --- a/lib/tsan/rtl/tsan_flags.inc +++ b/lib/tsan/rtl/tsan_flags.inc @@ -45,7 +45,6 @@ TSAN_FLAG( "If set, all atomics are effectively sequentially consistent (seq_cst), " "regardless of what user actually specified.") TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.") -TSAN_FLAG(int, exitcode, 66, "Override exit status if something was reported.") TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.") TSAN_FLAG(int, atexit_sleep_ms, 1000, "Sleep in main thread before exiting for that many ms " diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc index b1a7ae6de328..62c96cb42047 100644 --- a/lib/tsan/rtl/tsan_interceptors.cc +++ b/lib/tsan/rtl/tsan_interceptors.cc @@ -28,16 +28,42 @@ #include "tsan_mman.h" #include "tsan_fd.h" +#if SANITIZER_POSIX +#include "sanitizer_common/sanitizer_posix.h" +#endif + using namespace __tsan; // NOLINT -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC #define __errno_location __error -#define __libc_realloc __realloc -#define __libc_calloc __calloc #define stdout __stdoutp #define stderr __stderrp #endif +#if SANITIZER_FREEBSD +#define __libc_realloc __realloc +#define __libc_calloc __calloc +#elif SANITIZER_MAC +#define __libc_malloc REAL(malloc) +#define __libc_realloc REAL(realloc) +#define __libc_calloc REAL(calloc) +#define __libc_free REAL(free) +#elif SANITIZER_ANDROID +#define __errno_location __errno +#define __libc_malloc REAL(malloc) +#define __libc_realloc REAL(realloc) +#define __libc_calloc REAL(calloc) +#define __libc_free REAL(free) +#define mallopt(a, b) +#endif + +#if SANITIZER_LINUX || SANITIZER_FREEBSD +#define PTHREAD_CREATE_DETACHED 1 +#elif SANITIZER_MAC +#define PTHREAD_CREATE_DETACHED 2 +#endif + + #ifdef __mips__ const int kSigCount = 129; #else @@ -60,6 +86,14 @@ struct ucontext_t { }; #endif +#if defined(__x86_64__) || defined(__mips__) \ + || (defined(__powerpc64__) && defined(__BIG_ENDIAN__)) +#define PTHREAD_ABI_BASE "GLIBC_2.3.2" +#elif defined(__aarch64__) || (defined(__powerpc64__) \ + && defined(__LITTLE_ENDIAN__)) +#define PTHREAD_ABI_BASE "GLIBC_2.17" +#endif + extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) @@ -67,20 +101,23 @@ extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); extern "C" int pthread_setspecific(unsigned key, const void *v); DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) -extern "C" int pthread_yield(); extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset); // REAL(sigfillset) defined in common interceptors. DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set) DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) +DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) +DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) extern "C" void *pthread_self(); extern "C" void _exit(int status); extern "C" int *__errno_location(); extern "C" int fileno_unlocked(void *stream); +#if !SANITIZER_ANDROID extern "C" void *__libc_calloc(uptr size, uptr n); extern "C" void *__libc_realloc(void *ptr, uptr size); +#endif extern "C" int dirfd(void *dirp); -#if !SANITIZER_FREEBSD +#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID extern "C" int mallopt(int param, int value); #endif extern __sanitizer_FILE *stdout, *stderr; @@ -89,14 +126,16 @@ const int PTHREAD_MUTEX_RECURSIVE_NP = 1; const int EINVAL = 22; const int EBUSY = 16; const int EOWNERDEAD = 130; +#if !SANITIZER_MAC const int EPOLL_CTL_ADD = 1; +#endif const int SIGILL = 4; const int SIGABRT = 6; const int SIGFPE = 8; const int SIGSEGV = 11; const int SIGPIPE = 13; const int SIGTERM = 15; -#ifdef __mips__ +#if defined(__mips__) || SANITIZER_MAC const int SIGBUS = 10; const int SIGSYS = 12; #else @@ -104,7 +143,9 @@ const int SIGBUS = 7; const int SIGSYS = 31; #endif void *const MAP_FAILED = (void*)-1; +#if !SANITIZER_MAC const int PTHREAD_BARRIER_SERIAL_THREAD = -1; +#endif const int MAP_FIXED = 0x10; typedef long long_t; // NOLINT @@ -119,6 +160,17 @@ typedef long long_t; // NOLINT typedef void (*sighandler_t)(int sig); typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx); +#if SANITIZER_ANDROID +struct sigaction_t { + u32 sa_flags; + union { + sighandler_t sa_handler; + sigactionhandler_t sa_sgiaction; + }; + __sanitizer_sigset_t sa_mask; + void (*sa_restorer)(); +}; +#else struct sigaction_t { #ifdef __mips__ u32 sa_flags; @@ -130,6 +182,9 @@ struct sigaction_t { #if SANITIZER_FREEBSD int sa_flags; __sanitizer_sigset_t sa_mask; +#elif SANITIZER_MAC + __sanitizer_sigset_t sa_mask; + int sa_flags; #else __sanitizer_sigset_t sa_mask; #ifndef __mips__ @@ -138,11 +193,12 @@ struct sigaction_t { void (*sa_restorer)(); #endif }; +#endif const sighandler_t SIG_DFL = (sighandler_t)0; const sighandler_t SIG_IGN = (sighandler_t)1; const sighandler_t SIG_ERR = (sighandler_t)-1; -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC const int SA_SIGINFO = 0x40; const int SIG_SETMASK = 3; #elif defined(__mips__) @@ -171,6 +227,9 @@ struct ThreadSignalContext { atomic_uintptr_t in_blocking_func; atomic_uintptr_t have_pending_signals; SignalDesc pending_signals[kSigCount]; + // emptyset and oldset are too big for stack. + __sanitizer_sigset_t emptyset; + __sanitizer_sigset_t oldset; }; // The object is 64-byte aligned, because we want hot data to be located in @@ -203,7 +262,9 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) { return ctx; } +#if !SANITIZER_MAC static unsigned g_thread_finalize_key; +#endif ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc) @@ -234,17 +295,20 @@ ScopedInterceptor::~ScopedInterceptor() { } } -#define SCOPED_TSAN_INTERCEPTOR(func, ...) \ - SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ - if (REAL(func) == 0) { \ - Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \ - Die(); \ - } \ - if (thr->ignore_interceptors || thr->in_ignored_lib) \ - return REAL(func)(__VA_ARGS__); \ -/**/ +void ScopedInterceptor::UserCallbackStart() { + if (in_ignored_lib_) { + thr_->in_ignored_lib = false; + ThreadIgnoreEnd(thr_, pc_); + } +} + +void ScopedInterceptor::UserCallbackEnd() { + if (in_ignored_lib_) { + thr_->in_ignored_lib = true; + ThreadIgnoreBegin(thr_, pc_); + } +} -#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__) #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) #if SANITIZER_FREEBSD # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) @@ -329,6 +393,7 @@ static void at_exit_wrapper(void *arg) { static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), void *arg, void *dso); +#if !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, atexit, void (*f)()) { if (cur_thread()->in_symbolizer) return 0; @@ -337,6 +402,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) { SCOPED_INTERCEPTOR_RAW(atexit, f); return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); } +#endif TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { if (cur_thread()->in_symbolizer) @@ -359,6 +425,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), return res; } +#if !SANITIZER_MAC static void on_exit_wrapper(int status, void *arg) { ThreadState *thr = cur_thread(); uptr pc = 0; @@ -383,6 +450,7 @@ TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { ThreadIgnoreEnd(thr, pc); return res; } +#endif // Cleanup old bufs. static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { @@ -390,7 +458,7 @@ static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { JmpBuf *buf = &thr->jmp_bufs[i]; if (buf->sp <= sp) { uptr sz = thr->jmp_bufs.Size(); - thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1]; + internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); thr->jmp_bufs.PopBack(); i--; } @@ -417,11 +485,17 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { } static void LongJmp(ThreadState *thr, uptr *env) { -#if SANITIZER_FREEBSD +#ifdef __powerpc__ + uptr mangled_sp = env[0]; +#elif SANITIZER_FREEBSD || SANITIZER_MAC uptr mangled_sp = env[2]; -#else +#elif defined(SANITIZER_LINUX) +# ifdef __aarch64__ + uptr mangled_sp = env[13]; +# else uptr mangled_sp = env[6]; -#endif // SANITIZER_FREEBSD +# endif +#endif // Find the saved buf by mangled_sp. for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { JmpBuf *buf = &thr->jmp_bufs[i]; @@ -451,6 +525,11 @@ extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { SetJmp(cur_thread(), sp, mangled_sp); } +#if SANITIZER_MAC +TSAN_INTERCEPTOR(int, setjmp, void *env); +TSAN_INTERCEPTOR(int, _setjmp, void *env); +TSAN_INTERCEPTOR(int, sigsetjmp, void *env); +#else // SANITIZER_MAC // Not called. Merely to satisfy TSAN_INTERCEPT(). extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor_setjmp(void *env); @@ -489,6 +568,7 @@ DEFINE_REAL(int, setjmp, void *env) DEFINE_REAL(int, _setjmp, void *env) DEFINE_REAL(int, sigsetjmp, void *env) DEFINE_REAL(int, __sigsetjmp, void *env) +#endif // SANITIZER_MAC TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) { { @@ -506,6 +586,7 @@ TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) { REAL(siglongjmp)(env, val); } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(void*, malloc, uptr size) { if (cur_thread()->in_symbolizer) return __libc_malloc(size); @@ -572,6 +653,7 @@ TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); return user_alloc_usable_size(p); } +#endif TSAN_INTERCEPTOR(uptr, strlen, const char *s) { SCOPED_TSAN_INTERCEPTOR(strlen, s); @@ -596,27 +678,18 @@ TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) { MemoryAccessRange(thr, pc, (uptr)dst, size, true); MemoryAccessRange(thr, pc, (uptr)src, size, false); } - return internal_memcpy(dst, src, size); -} - -TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) { - SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n); - int res = 0; - uptr len = 0; - for (; len < n; len++) { - if ((res = ((const unsigned char *)s1)[len] - - ((const unsigned char *)s2)[len])) - break; - } - MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false); - MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false); - return res; + // On OS X, calling internal_memcpy here will cause memory corruptions, + // because memcpy and memmove are actually aliases of the same implementation. + // We need to use internal_memmove here. + return internal_memmove(dst, src, size); } TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) { - SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n); - MemoryAccessRange(thr, pc, (uptr)dst, n, true); - MemoryAccessRange(thr, pc, (uptr)src, n, false); + if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { + SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n); + MemoryAccessRange(thr, pc, (uptr)dst, n, true); + MemoryAccessRange(thr, pc, (uptr)src, n, false); + } return REAL(memmove)(dst, src, n); } @@ -629,6 +702,7 @@ TSAN_INTERCEPTOR(char*, strchr, char *s, int c) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) { SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c); char *res = REAL(strchrnul)(s, c); @@ -636,6 +710,7 @@ TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) { READ_STRING(thr, pc, s, len); return res; } +#endif TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) { SCOPED_TSAN_INTERCEPTOR(strrchr, s, c); @@ -679,8 +754,8 @@ static bool fix_mmap_addr(void **addr, long_t sz, int flags) { return true; } -TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot, - int flags, int fd, unsigned off) { +TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, + int fd, OFF_T off) { SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off); if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; @@ -693,9 +768,9 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot, return res; } -#if !SANITIZER_FREEBSD -TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot, - int flags, int fd, u64 off) { +#if SANITIZER_LINUX +TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, + int fd, OFF64_T off) { SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off); if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; @@ -723,7 +798,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { return res; } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(memalign, align, sz); return user_alloc(thr, pc, sz, align); @@ -733,6 +808,7 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { #define TSAN_MAYBE_INTERCEPT_MEMALIGN #endif +#if !SANITIZER_MAC TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(memalign, align, sz); return user_alloc(thr, pc, sz, align); @@ -742,8 +818,9 @@ TSAN_INTERCEPTOR(void*, valloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(valloc, sz); return user_alloc(thr, pc, sz, GetPageSizeCached()); } +#endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(pvalloc, sz); sz = RoundUp(sz, GetPageSizeCached()); @@ -754,14 +831,33 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { #define TSAN_MAYBE_INTERCEPT_PVALLOC #endif +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); *memptr = user_alloc(thr, pc, sz, align); return 0; } +#endif + +// __cxa_guard_acquire and friends need to be intercepted in a special way - +// regular interceptors will break statically-linked libstdc++. Linux +// interceptors are especially defined as weak functions (so that they don't +// cause link errors when user defines them as well). So they silently +// auto-disable themselves when such symbol is already present in the binary. If +// we link libstdc++ statically, it will bring own __cxa_guard_acquire which +// will silently replace our interceptor. That's why on Linux we simply export +// these interceptors with INTERFACE_ATTRIBUTE. +// On OS X, we don't support statically linking, so we just use a regular +// interceptor. +#if SANITIZER_MAC +#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR +#else +#define STDCXX_INTERCEPTOR(rettype, name, ...) \ + extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) +#endif // Used in thread-safe function static initialization. -extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) { +STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); for (;;) { u32 cmp = atomic_load(g, memory_order_acquire); @@ -777,17 +873,31 @@ extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) { } } -extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_release(atomic_uint32_t *g) { +STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); Release(thr, pc, (uptr)g); atomic_store(g, 1, memory_order_release); } -extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) { +STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); atomic_store(g, 0, memory_order_relaxed); } +namespace __tsan { +void DestroyThreadState() { + ThreadState *thr = cur_thread(); + ThreadFinish(thr); + ThreadSignalContext *sctx = thr->signal_ctx; + if (sctx) { + thr->signal_ctx = 0; + UnmapOrDie(sctx, sizeof(*sctx)); + } + cur_thread_finalize(); +} +} // namespace __tsan + +#if !SANITIZER_MAC static void thread_finalize(void *v) { uptr iter = (uptr)v; if (iter > 1) { @@ -797,16 +907,9 @@ static void thread_finalize(void *v) { } return; } - { - ThreadState *thr = cur_thread(); - ThreadFinish(thr); - ThreadSignalContext *sctx = thr->signal_ctx; - if (sctx) { - thr->signal_ctx = 0; - UnmapOrDie(sctx, sizeof(*sctx)); - } - } + DestroyThreadState(); } +#endif struct ThreadParam { @@ -824,6 +927,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) { ThreadState *thr = cur_thread(); // Thread-local state is not initialized yet. ScopedIgnoreInterceptors ignore; +#if !SANITIZER_MAC ThreadIgnoreBegin(thr, 0); if (pthread_setspecific(g_thread_finalize_key, (void *)GetPthreadDestructorIterations())) { @@ -831,8 +935,9 @@ extern "C" void *__tsan_thread_start_func(void *arg) { Die(); } ThreadIgnoreEnd(thr, 0); +#endif while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) - pthread_yield(); + internal_sched_yield(); ThreadStart(thr, tid, GetTid()); atomic_store(&p->tid, 0, memory_order_release); } @@ -880,7 +985,8 @@ TSAN_INTERCEPTOR(int, pthread_create, ThreadIgnoreEnd(thr, pc); } if (res == 0) { - int tid = ThreadCreate(thr, pc, *(uptr*)th, detached); + int tid = ThreadCreate(thr, pc, *(uptr*)th, + detached == PTHREAD_CREATE_DETACHED); CHECK_NE(tid, 0); // Synchronization on p.tid serves two purposes: // 1. ThreadCreate must finish before the new thread starts. @@ -891,7 +997,7 @@ TSAN_INTERCEPTOR(int, pthread_create, // before the new thread got a chance to acquire from it in ThreadStart. atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) - pthread_yield(); + internal_sched_yield(); } if (attr == &myattr) pthread_attr_destroy(&myattr); @@ -1094,6 +1200,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); int res = REAL(pthread_mutex_timedlock)(m, abstime); @@ -1102,7 +1209,9 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { } return res; } +#endif +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); int res = REAL(pthread_spin_init)(m, pshared); @@ -1145,6 +1254,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { int res = REAL(pthread_spin_unlock)(m); return res; } +#endif TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); @@ -1182,6 +1292,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); @@ -1190,6 +1301,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { } return res; } +#endif TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); @@ -1209,6 +1321,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); @@ -1217,6 +1330,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { } return res; } +#endif TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); @@ -1225,6 +1339,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); MemoryWrite(thr, pc, (uptr)b, kSizeLog1); @@ -1250,12 +1365,17 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { } return res; } +#endif TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); if (o == 0 || f == 0) return EINVAL; - atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o); + atomic_uint32_t *a; + if (!SANITIZER_MAC) + a = static_cast<atomic_uint32_t*>(o); + else // On OS X, pthread_once_t has a header with a long-sized signature. + a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t))); u32 v = atomic_load(a, memory_order_acquire); if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) { @@ -1265,7 +1385,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { atomic_store(a, 2, memory_order_release); } else { while (v != 2) { - pthread_yield(); + internal_sched_yield(); v = atomic_load(a, memory_order_acquire); } if (!thr->in_ignored_lib) @@ -1274,62 +1394,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { return 0; } -TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) { - SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value); - int res = REAL(sem_init)(s, pshared, value); - return res; -} - -TSAN_INTERCEPTOR(int, sem_destroy, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_destroy, s); - int res = REAL(sem_destroy)(s); - return res; -} - -TSAN_INTERCEPTOR(int, sem_wait, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_wait, s); - int res = BLOCK_REAL(sem_wait)(s); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_trywait, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_trywait, s); - int res = BLOCK_REAL(sem_trywait)(s); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) { - SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime); - int res = BLOCK_REAL(sem_timedwait)(s, abstime); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_post, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_post, s); - Release(thr, pc, (uptr)s); - int res = REAL(sem_post)(s); - return res; -} - -TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) { - SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval); - int res = REAL(sem_getvalue)(s, sval); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf); READ_STRING(thr, pc, path, 0); @@ -1341,7 +1406,7 @@ TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { #endif TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID SCOPED_TSAN_INTERCEPTOR(stat, path, buf); READ_STRING(thr, pc, path, 0); return REAL(stat)(path, buf); @@ -1352,7 +1417,7 @@ TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { #endif } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf); READ_STRING(thr, pc, path, 0); @@ -1363,7 +1428,7 @@ TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { #define TSAN_MAYBE_INTERCEPT___XSTAT64 #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf); READ_STRING(thr, pc, path, 0); @@ -1374,7 +1439,7 @@ TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { #define TSAN_MAYBE_INTERCEPT_STAT64 #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf); READ_STRING(thr, pc, path, 0); @@ -1386,7 +1451,7 @@ TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { #endif TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID SCOPED_TSAN_INTERCEPTOR(lstat, path, buf); READ_STRING(thr, pc, path, 0); return REAL(lstat)(path, buf); @@ -1397,7 +1462,7 @@ TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { #endif } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf); READ_STRING(thr, pc, path, 0); @@ -1408,7 +1473,7 @@ TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { #define TSAN_MAYBE_INTERCEPT___LXSTAT64 #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf); READ_STRING(thr, pc, path, 0); @@ -1419,7 +1484,7 @@ TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { #define TSAN_MAYBE_INTERCEPT_LSTAT64 #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); if (fd > 0) @@ -1432,7 +1497,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { #endif TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); if (fd > 0) FdAccess(thr, pc, fd); @@ -1445,7 +1510,7 @@ TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { #endif } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); if (fd > 0) @@ -1457,7 +1522,7 @@ TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { #define TSAN_MAYBE_INTERCEPT___FXSTAT64 #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); if (fd > 0) @@ -1478,7 +1543,7 @@ TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { return fd; } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); READ_STRING(thr, pc, name, 0); @@ -1501,7 +1566,7 @@ TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { return fd; } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); READ_STRING(thr, pc, name, 0); @@ -1531,6 +1596,7 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { return newfd2; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); int newfd2 = REAL(dup3)(oldfd, newfd, flags); @@ -1538,8 +1604,9 @@ TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { FdDup(thr, pc, oldfd, newfd2, false); return newfd2; } +#endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); int fd = REAL(eventfd)(initval, flags); @@ -1552,7 +1619,7 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { #define TSAN_MAYBE_INTERCEPT_EVENTFD #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); if (fd >= 0) @@ -1567,7 +1634,7 @@ TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { #define TSAN_MAYBE_INTERCEPT_SIGNALFD #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, inotify_init, int fake) { SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); int fd = REAL(inotify_init)(fake); @@ -1580,7 +1647,7 @@ TSAN_INTERCEPTOR(int, inotify_init, int fake) { #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, inotify_init1, int flags) { SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); int fd = REAL(inotify_init1)(flags); @@ -1634,7 +1701,7 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { return res; } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, epoll_create, int size) { SCOPED_TSAN_INTERCEPTOR(epoll_create, size); int fd = REAL(epoll_create)(size); @@ -1647,7 +1714,7 @@ TSAN_INTERCEPTOR(int, epoll_create, int size) { #define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, epoll_create1, int flags) { SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); int fd = REAL(epoll_create1)(flags); @@ -1667,7 +1734,7 @@ TSAN_INTERCEPTOR(int, close, int fd) { return REAL(close)(fd); } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, __close, int fd) { SCOPED_TSAN_INTERCEPTOR(__close, fd); if (fd >= 0) @@ -1680,7 +1747,7 @@ TSAN_INTERCEPTOR(int, __close, int fd) { #endif // glibc guts -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX && !SANITIZER_ANDROID TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); int fds[64]; @@ -1704,6 +1771,7 @@ TSAN_INTERCEPTOR(int, pipe, int *pipefd) { return res; } +#if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); int res = REAL(pipe2)(pipefd, flags); @@ -1711,6 +1779,7 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); return res; } +#endif TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) { SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags); @@ -1761,7 +1830,7 @@ TSAN_INTERCEPTOR(void*, tmpfile, int fake) { return res; } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); void *res = REAL(tmpfile64)(fake); @@ -1828,7 +1897,7 @@ TSAN_INTERCEPTOR(int, closedir, void *dirp) { return REAL(closedir)(dirp); } -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); if (epfd >= 0) @@ -1845,7 +1914,7 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { #define TSAN_MAYBE_INTERCEPT_EPOLL_CTL #endif -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); if (epfd >= 0) @@ -1895,7 +1964,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); - if (!IsFiredSuppression(ctx, rep, stack)) { + if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { rep.AddStack(stack, true); OutputReport(thr, rep); } @@ -1910,10 +1979,8 @@ void ProcessPendingSignals(ThreadState *thr) { return; atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); - // These are too big for stack. - static THREADLOCAL __sanitizer_sigset_t emptyset, oldset; - CHECK_EQ(0, REAL(sigfillset)(&emptyset)); - CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &emptyset, &oldset)); + CHECK_EQ(0, REAL(sigfillset)(&sctx->emptyset)); + CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset)); for (int sig = 0; sig < kSigCount; sig++) { SignalDesc *signal = &sctx->pending_signals[sig]; if (signal->armed) { @@ -1922,7 +1989,7 @@ void ProcessPendingSignals(ThreadState *thr) { &signal->siginfo, &signal->ctx); } } - CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, 0)); + CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->oldset, 0)); atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); } @@ -2011,7 +2078,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags; internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, sizeof(sigactions[sig].sa_mask)); -#if !SANITIZER_FREEBSD +#if !SANITIZER_FREEBSD && !SANITIZER_MAC sigactions[sig].sa_restorer = act->sa_restorer; #endif sigaction_t newact; @@ -2144,6 +2211,7 @@ TSAN_INTERCEPTOR(int, vfork, int fake) { return WRAP(fork)(fake); } +#if !SANITIZER_MAC && !SANITIZER_ANDROID typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, void *data); struct dl_iterate_phdr_data { @@ -2187,6 +2255,7 @@ TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); return res; } +#endif static int OnExit(ThreadState *thr) { int status = Finalize(thr); @@ -2200,6 +2269,7 @@ struct TsanInterceptorContext { const uptr pc; }; +#if !SANITIZER_MAC static void HandleRecvmsg(ThreadState *thr, uptr pc, __sanitizer_msghdr *msg) { int fds[64]; @@ -2207,6 +2277,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, for (int i = 0; i < cnt; i++) FdEventCreate(thr, pc, fds[i]); } +#endif #include "sanitizer_common/sanitizer_platform_interceptors.h" // Causes interceptor recursion (getaddrinfo() and fopen()) @@ -2277,6 +2348,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ libignore()->OnLibraryUnloaded() +#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ + Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) + +#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ + Release(((TsanInterceptorContext *) ctx)->thr, pc, u) + #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) @@ -2315,9 +2392,11 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) +#if !SANITIZER_MAC #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, msg) +#endif #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ if (TsanThread *t = GetCurrentThread()) { \ @@ -2349,6 +2428,7 @@ struct ScopedSyscall { } }; +#if !SANITIZER_MAC static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { TSAN_SYSCALL(); MemoryAccessRange(thr, pc, p, s, write); @@ -2402,6 +2482,7 @@ static void syscall_post_fork(uptr pc, int pid) { ForkParentAfter(thr, pc); } } +#endif #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) @@ -2449,28 +2530,32 @@ static void finalize(void *arg) { // Make sure the output is not lost. FlushStreams(); if (status) - REAL(_exit)(status); + Die(); } +#if !SANITIZER_MAC && !SANITIZER_ANDROID static void unreachable() { Report("FATAL: ThreadSanitizer: unreachable called\n"); Die(); } +#endif void InitializeInterceptors() { +#if !SANITIZER_MAC // We need to setup it early, because functions like dlsym() can call it. REAL(memset) = internal_memset; REAL(memcpy) = internal_memcpy; - REAL(memcmp) = internal_memcmp; +#endif // Instruct libc malloc to consume less memory. -#if !SANITIZER_FREEBSD +#if SANITIZER_LINUX mallopt(1, 0); // M_MXFAST mallopt(-3, 32*1024); // M_MMAP_THRESHOLD #endif InitializeCommonInterceptors(); +#if !SANITIZER_MAC // We can not use TSAN_INTERCEPT to get setjmp addr, // because it does &setjmp and setjmp is not present in some versions of libc. using __interception::GetRealFunctionAddress; @@ -2478,6 +2563,7 @@ void InitializeInterceptors() { GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0); GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); +#endif TSAN_INTERCEPT(longjmp); TSAN_INTERCEPT(siglongjmp); @@ -2500,7 +2586,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(memset); TSAN_INTERCEPT(memcpy); TSAN_INTERCEPT(memmove); - TSAN_INTERCEPT(memcmp); TSAN_INTERCEPT(strchr); TSAN_INTERCEPT(strchrnul); TSAN_INTERCEPT(strrchr); @@ -2512,12 +2597,12 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_join); TSAN_INTERCEPT(pthread_detach); - TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); TSAN_INTERCEPT(pthread_mutex_init); TSAN_INTERCEPT(pthread_mutex_destroy); @@ -2546,14 +2631,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_once); - TSAN_INTERCEPT(sem_init); - TSAN_INTERCEPT(sem_destroy); - TSAN_INTERCEPT(sem_wait); - TSAN_INTERCEPT(sem_trywait); - TSAN_INTERCEPT(sem_timedwait); - TSAN_INTERCEPT(sem_post); - TSAN_INTERCEPT(sem_getvalue); - TSAN_INTERCEPT(stat); TSAN_MAYBE_INTERCEPT___XSTAT; TSAN_MAYBE_INTERCEPT_STAT64; @@ -2621,25 +2698,68 @@ void InitializeInterceptors() { TSAN_INTERCEPT(fork); TSAN_INTERCEPT(vfork); +#if !SANITIZER_ANDROID TSAN_INTERCEPT(dl_iterate_phdr); +#endif TSAN_INTERCEPT(on_exit); TSAN_INTERCEPT(__cxa_atexit); TSAN_INTERCEPT(_exit); +#if !SANITIZER_MAC && !SANITIZER_ANDROID // Need to setup it, because interceptors check that the function is resolved. // But atexit is emitted directly into the module, so can't be resolved. REAL(atexit) = (int(*)(void(*)()))unreachable; +#endif + if (REAL(__cxa_atexit)(&finalize, 0, 0)) { Printf("ThreadSanitizer: failed to setup atexit callback\n"); Die(); } +#if !SANITIZER_MAC if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Printf("ThreadSanitizer: failed to create thread key\n"); Die(); } +#endif FdInit(); } } // namespace __tsan + +// Invisible barrier for tests. +// There were several unsuccessful iterations for this functionality: +// 1. Initially it was implemented in user code using +// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on +// MacOS. Futexes are linux-specific for this matter. +// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic +// "as-if synchronized via sleep" messages in reports which failed some +// output tests. +// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- +// visible events, which lead to "failed to restore stack trace" failures. +// Note that no_sanitize_thread attribute does not turn off atomic interception +// so attaching it to the function defined in user code does not help. +// That's why we now have what we have. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_testonly_barrier_init(u64 *barrier, u32 count) { + if (count >= (1 << 8)) { + Printf("barrier_init: count is too large (%d)\n", count); + Die(); + } + // 8 lsb is thread count, the remaining are count of entered threads. + *barrier = count; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __tsan_testonly_barrier_wait(u64 *barrier) { + unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED); + unsigned old_epoch = (old >> 8) / (old & 0xff); + for (;;) { + unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED); + unsigned cur_epoch = (cur >> 8) / (cur & 0xff); + if (cur_epoch != old_epoch) + return; + internal_sched_yield(); + } +} diff --git a/lib/tsan/rtl/tsan_interceptors.h b/lib/tsan/rtl/tsan_interceptors.h index 49b79a7c5f9e..d831620cfafe 100644 --- a/lib/tsan/rtl/tsan_interceptors.h +++ b/lib/tsan/rtl/tsan_interceptors.h @@ -10,6 +10,8 @@ class ScopedInterceptor { public: ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc); ~ScopedInterceptor(); + void UserCallbackStart(); + void UserCallbackEnd(); private: ThreadState *const thr_; const uptr pc_; @@ -26,6 +28,24 @@ class ScopedInterceptor { (void)pc; \ /**/ +#define SCOPED_TSAN_INTERCEPTOR(func, ...) \ + SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ + if (REAL(func) == 0) { \ + Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \ + Die(); \ + } \ + if (thr->ignore_interceptors || thr->in_ignored_lib) \ + return REAL(func)(__VA_ARGS__); \ +/**/ + +#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \ + si.UserCallbackStart(); + +#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \ + si.UserCallbackEnd(); + +#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__) + #if SANITIZER_FREEBSD #define __libc_free __free #define __libc_malloc __malloc diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc new file mode 100644 index 000000000000..2bf7ad9861c4 --- /dev/null +++ b/lib/tsan/rtl/tsan_interceptors_mac.cc @@ -0,0 +1,91 @@ +//===-- tsan_interceptors_mac.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Mac-specific interceptors. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "interception/interception.h" +#include "tsan_interceptors.h" + +#include <libkern/OSAtomic.h> + +namespace __tsan { + +TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(OSSpinLockLock)(lock); + } + SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock); + REAL(OSSpinLockLock)(lock); + Acquire(thr, pc, (uptr)lock); +} + +TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(OSSpinLockTry)(lock); + } + SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock); + bool result = REAL(OSSpinLockTry)(lock); + if (result) + Acquire(thr, pc, (uptr)lock); + return result; +} + +TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(OSSpinLockUnlock)(lock); + } + SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock); + Release(thr, pc, (uptr)lock); + REAL(OSSpinLockUnlock)(lock); +} + +TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(os_lock_lock)(lock); + } + SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock); + REAL(os_lock_lock)(lock); + Acquire(thr, pc, (uptr)lock); +} + +TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(os_lock_trylock)(lock); + } + SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock); + bool result = REAL(os_lock_trylock)(lock); + if (result) + Acquire(thr, pc, (uptr)lock); + return result; +} + +TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { + CHECK(!cur_thread()->is_dead); + if (!cur_thread()->is_inited) { + return REAL(os_lock_unlock)(lock); + } + SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock); + Release(thr, pc, (uptr)lock); + REAL(os_lock_unlock)(lock); +} + +} // namespace __tsan + +#endif // SANITIZER_MAC diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc index fd3c846678f5..62db79661625 100644 --- a/lib/tsan/rtl/tsan_interface_ann.cc +++ b/lib/tsan/rtl/tsan_interface_ann.cc @@ -63,8 +63,8 @@ static const int kMaxDescLen = 128; struct ExpectRace { ExpectRace *next; ExpectRace *prev; - int hitcount; - int addcount; + atomic_uintptr_t hitcount; + atomic_uintptr_t addcount; uptr addr; uptr size; char *file; @@ -90,7 +90,8 @@ static void AddExpectRace(ExpectRace *list, ExpectRace *race = list->next; for (; race != list; race = race->next) { if (race->addr == addr && race->size == size) { - race->addcount++; + atomic_store_relaxed(&race->addcount, + atomic_load_relaxed(&race->addcount) + 1); return; } } @@ -100,8 +101,8 @@ static void AddExpectRace(ExpectRace *list, race->file = f; race->line = l; race->desc[0] = 0; - race->hitcount = 0; - race->addcount = 1; + atomic_store_relaxed(&race->hitcount, 0); + atomic_store_relaxed(&race->addcount, 1); if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) @@ -130,7 +131,7 @@ static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); - race->hitcount++; + atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); return true; } @@ -146,7 +147,7 @@ void InitializeDynamicAnnotations() { } bool IsExpectedReport(uptr addr, uptr size) { - Lock lock(&dyn_ann_ctx->mtx); + ReadLock lock(&dyn_ann_ctx->mtx); if (CheckContains(&dyn_ann_ctx->expect, addr, size)) return true; if (CheckContains(&dyn_ann_ctx->benign, addr, size)) @@ -155,20 +156,21 @@ bool IsExpectedReport(uptr addr, uptr size) { } static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched, - int *unique_count, int *hit_count, int ExpectRace::*counter) { + int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { ExpectRace *list = &dyn_ann_ctx->benign; for (ExpectRace *race = list->next; race != list; race = race->next) { (*unique_count)++; - if (race->*counter == 0) + const uptr cnt = atomic_load_relaxed(&(race->*counter)); + if (cnt == 0) continue; - (*hit_count) += race->*counter; + *hit_count += cnt; uptr i = 0; for (; i < matched->Size(); i++) { ExpectRace *race0 = &(*matched)[i]; if (race->line == race0->line && internal_strcmp(race->file, race0->file) == 0 && internal_strcmp(race->desc, race0->desc) == 0) { - race0->*counter += race->*counter; + atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed); break; } } @@ -193,8 +195,8 @@ void PrintMatchedBenignRaces() { hit_count, (int)internal_getpid()); for (uptr i = 0; i < hit_matched.Size(); i++) { Printf("%d %s:%d %s\n", - hit_matched[i].hitcount, hit_matched[i].file, - hit_matched[i].line, hit_matched[i].desc); + atomic_load_relaxed(&hit_matched[i].hitcount), + hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); } } if (hit_matched.Size()) { @@ -203,8 +205,8 @@ void PrintMatchedBenignRaces() { add_count, unique_count, (int)internal_getpid()); for (uptr i = 0; i < add_matched.Size(); i++) { Printf("%d %s:%d %s\n", - add_matched[i].addcount, add_matched[i].file, - add_matched[i].line, add_matched[i].desc); + atomic_load_relaxed(&add_matched[i].addcount), + add_matched[i].file, add_matched[i].line, add_matched[i].desc); } } } @@ -303,7 +305,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { ExpectRace *race = dyn_ann_ctx->expect.next; - if (race->hitcount == 0) { + if (atomic_load_relaxed(&race->hitcount) == 0) { ctx->nmissed_expected++; ReportMissedExpectedRace(race); } diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc new file mode 100644 index 000000000000..617dc91b33d0 --- /dev/null +++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc @@ -0,0 +1,284 @@ +//===-- tsan_libdispatch_mac.cc -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Mac-specific libdispatch (GCD) support. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "sanitizer_common/sanitizer_common.h" +#include "interception/interception.h" +#include "tsan_interceptors.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" + +#include <Block.h> +#include <dispatch/dispatch.h> +#include <pthread.h> + +typedef long long_t; // NOLINT + +namespace __tsan { + +typedef struct { + dispatch_queue_t queue; + void *orig_context; + dispatch_function_t orig_work; + uptr object_to_acquire; + dispatch_object_t object_to_release; +} tsan_block_context_t; + +// The offsets of different fields of the dispatch_queue_t structure, exported +// by libdispatch.dylib. +extern "C" struct dispatch_queue_offsets_s { + const uint16_t dqo_version; + const uint16_t dqo_label; + const uint16_t dqo_label_size; + const uint16_t dqo_flags; + const uint16_t dqo_flags_size; + const uint16_t dqo_serialnum; + const uint16_t dqo_serialnum_size; + const uint16_t dqo_width; + const uint16_t dqo_width_size; + const uint16_t dqo_running; + const uint16_t dqo_running_size; + const uint16_t dqo_suspend_cnt; + const uint16_t dqo_suspend_cnt_size; + const uint16_t dqo_target_queue; + const uint16_t dqo_target_queue_size; + const uint16_t dqo_priority; + const uint16_t dqo_priority_size; +} dispatch_queue_offsets; + +static bool IsQueueSerial(dispatch_queue_t q) { + CHECK_EQ(dispatch_queue_offsets.dqo_width_size, 2); + uptr width = *(uint16_t *)(((uptr)q) + dispatch_queue_offsets.dqo_width); + CHECK_NE(width, 0); + return width == 1; +} + +static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc, + dispatch_queue_t queue, + void *orig_context, + dispatch_function_t orig_work) { + tsan_block_context_t *new_context = + (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t)); + new_context->queue = queue; + new_context->orig_context = orig_context; + new_context->orig_work = orig_work; + new_context->object_to_acquire = (uptr)new_context; + new_context->object_to_release = nullptr; + return new_context; +} + +static void dispatch_callback_wrap_acquire(void *param) { + SCOPED_INTERCEPTOR_RAW(dispatch_async_f_callback_wrap); + tsan_block_context_t *context = (tsan_block_context_t *)param; + Acquire(thr, pc, context->object_to_acquire); + + // Extra retain/release is required for dispatch groups. We use the group + // itself to synchronize, but in a notification (dispatch_group_notify + // callback), it may be disposed already. To solve this, we retain the group + // and release it here. + if (context->object_to_release) dispatch_release(context->object_to_release); + + // In serial queues, work items can be executed on different threads, we need + // to explicitly synchronize on the queue itself. + if (IsQueueSerial(context->queue)) Acquire(thr, pc, (uptr)context->queue); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); + context->orig_work(context->orig_context); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); + if (IsQueueSerial(context->queue)) Release(thr, pc, (uptr)context->queue); + user_free(thr, pc, context); +} + +static void invoke_and_release_block(void *param) { + dispatch_block_t block = (dispatch_block_t)param; + block(); + Block_release(block); +} + +#define DISPATCH_INTERCEPT_B(name) \ + TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \ + SCOPED_TSAN_INTERCEPTOR(name, q, block); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \ + dispatch_block_t heap_block = Block_copy(block); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \ + tsan_block_context_t *new_context = \ + AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); \ + Release(thr, pc, (uptr)new_context); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \ + REAL(name##_f)(q, new_context, dispatch_callback_wrap_acquire); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \ + } + +#define DISPATCH_INTERCEPT_F(name) \ + TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \ + dispatch_function_t work) { \ + SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \ + tsan_block_context_t *new_context = \ + AllocContext(thr, pc, q, context, work); \ + Release(thr, pc, (uptr)new_context); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \ + REAL(name)(q, new_context, dispatch_callback_wrap_acquire); \ + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \ + } + +// We wrap dispatch_async, dispatch_sync and friends where we allocate a new +// context, which is used to synchronize (we release the context before +// submitting, and the callback acquires it before executing the original +// callback). +DISPATCH_INTERCEPT_B(dispatch_async) +DISPATCH_INTERCEPT_B(dispatch_barrier_async) +DISPATCH_INTERCEPT_F(dispatch_async_f) +DISPATCH_INTERCEPT_F(dispatch_barrier_async_f) +DISPATCH_INTERCEPT_B(dispatch_sync) +DISPATCH_INTERCEPT_B(dispatch_barrier_sync) +DISPATCH_INTERCEPT_F(dispatch_sync_f) +DISPATCH_INTERCEPT_F(dispatch_barrier_sync_f) + +// GCD's dispatch_once implementation has a fast path that contains a racy read +// and it's inlined into user's code. Furthermore, this fast path doesn't +// establish a proper happens-before relations between the initialization and +// code following the call to dispatch_once. We could deal with this in +// instrumented code, but there's not much we can do about it in system +// libraries. Let's disable the fast path (by never storing the value ~0 to +// predicate), so the interceptor is always called, and let's add proper release +// and acquire semantics. Since TSan does not see its own atomic stores, the +// race on predicate won't be reported - the only accesses to it that TSan sees +// are the loads on the fast path. Loads don't race. Secondly, dispatch_once is +// both a macro and a real function, we want to intercept the function, so we +// need to undefine the macro. +#undef dispatch_once +TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate, + dispatch_block_t block) { + SCOPED_TSAN_INTERCEPTOR(dispatch_once, predicate, block); + atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate); + u32 v = atomic_load(a, memory_order_acquire); + if (v == 0 && + atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) { + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); + block(); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); + Release(thr, pc, (uptr)a); + atomic_store(a, 2, memory_order_release); + } else { + while (v != 2) { + internal_sched_yield(); + v = atomic_load(a, memory_order_acquire); + } + Acquire(thr, pc, (uptr)a); + } +} + +#undef dispatch_once_f +TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate, + void *context, dispatch_function_t function) { + SCOPED_TSAN_INTERCEPTOR(dispatch_once_f, predicate, context, function); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); + WRAP(dispatch_once)(predicate, ^(void) { + function(context); + }); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); +} + +TSAN_INTERCEPTOR(long_t, dispatch_semaphore_signal, + dispatch_semaphore_t dsema) { + SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_signal, dsema); + Release(thr, pc, (uptr)dsema); + return REAL(dispatch_semaphore_signal)(dsema); +} + +TSAN_INTERCEPTOR(long_t, dispatch_semaphore_wait, dispatch_semaphore_t dsema, + dispatch_time_t timeout) { + SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_wait, dsema, timeout); + long_t result = REAL(dispatch_semaphore_wait)(dsema, timeout); + if (result == 0) Acquire(thr, pc, (uptr)dsema); + return result; +} + +TSAN_INTERCEPTOR(long_t, dispatch_group_wait, dispatch_group_t group, + dispatch_time_t timeout) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_wait, group, timeout); + long_t result = REAL(dispatch_group_wait)(group, timeout); + if (result == 0) Acquire(thr, pc, (uptr)group); + return result; +} + +TSAN_INTERCEPTOR(void, dispatch_group_leave, dispatch_group_t group) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_leave, group); + Release(thr, pc, (uptr)group); + REAL(dispatch_group_leave)(group); +} + +TSAN_INTERCEPTOR(void, dispatch_group_async, dispatch_group_t group, + dispatch_queue_t queue, dispatch_block_t block) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_async, group, queue, block); + dispatch_retain(group); + dispatch_group_enter(group); + WRAP(dispatch_async)(queue, ^(void) { + block(); + WRAP(dispatch_group_leave)(group); + dispatch_release(group); + }); +} + +TSAN_INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t queue, void *context, + dispatch_function_t work) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_async_f, group, queue, context, work); + dispatch_retain(group); + dispatch_group_enter(group); + WRAP(dispatch_async)(queue, ^(void) { + work(context); + WRAP(dispatch_group_leave)(group); + dispatch_release(group); + }); +} + +TSAN_INTERCEPTOR(void, dispatch_group_notify, dispatch_group_t group, + dispatch_queue_t q, dispatch_block_t block) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify, group, q, block); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); + dispatch_block_t heap_block = Block_copy(block); + SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); + tsan_block_context_t *new_context = + AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); + new_context->object_to_acquire = (uptr)group; + + // Will be released in dispatch_callback_wrap_acquire. + new_context->object_to_release = group; + dispatch_retain(group); + + Release(thr, pc, (uptr)group); + REAL(dispatch_group_notify_f)(group, q, new_context, + dispatch_callback_wrap_acquire); +} + +TSAN_INTERCEPTOR(void, dispatch_group_notify_f, dispatch_group_t group, + dispatch_queue_t q, void *context, dispatch_function_t work) { + SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify_f, group, q, context, work); + tsan_block_context_t *new_context = AllocContext(thr, pc, q, context, work); + new_context->object_to_acquire = (uptr)group; + + // Will be released in dispatch_callback_wrap_acquire. + new_context->object_to_release = group; + dispatch_retain(group); + + Release(thr, pc, (uptr)group); + REAL(dispatch_group_notify_f)(group, q, new_context, + dispatch_callback_wrap_acquire); +} + +} // namespace __tsan + +#endif // SANITIZER_MAC diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc new file mode 100644 index 000000000000..7fd94273c314 --- /dev/null +++ b/lib/tsan/rtl/tsan_malloc_mac.cc @@ -0,0 +1,65 @@ +//===-- tsan_malloc_mac.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Mac-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "tsan_interceptors.h" +#include "tsan_stack_trace.h" + +using namespace __tsan; +#define COMMON_MALLOC_ZONE_NAME "tsan" +#define COMMON_MALLOC_ENTER() +#define COMMON_MALLOC_SANITIZER_INITIALIZED (cur_thread()->is_inited) +#define COMMON_MALLOC_FORCE_LOCK() +#define COMMON_MALLOC_FORCE_UNLOCK() +#define COMMON_MALLOC_MEMALIGN(alignment, size) \ + void *p = \ + user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment) +#define COMMON_MALLOC_MALLOC(size) \ + if (cur_thread()->in_symbolizer) \ + return REAL(malloc)(size); \ + SCOPED_INTERCEPTOR_RAW(malloc, size); \ + void *p = user_alloc(thr, pc, size) +#define COMMON_MALLOC_REALLOC(ptr, size) \ + if (cur_thread()->in_symbolizer) \ + return REAL(realloc)(ptr, size); \ + SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \ + void *p = user_realloc(thr, pc, ptr, size) +#define COMMON_MALLOC_CALLOC(count, size) \ + if (cur_thread()->in_symbolizer) \ + return REAL(calloc)(count, size); \ + SCOPED_INTERCEPTOR_RAW(calloc, size, count); \ + void *p = user_calloc(thr, pc, size, count) +#define COMMON_MALLOC_VALLOC(size) \ + if (cur_thread()->in_symbolizer) \ + return REAL(valloc)(size); \ + SCOPED_INTERCEPTOR_RAW(valloc, size); \ + void *p = user_alloc(thr, pc, size, GetPageSizeCached()) +#define COMMON_MALLOC_FREE(ptr) \ + if (cur_thread()->in_symbolizer) \ + return REAL(free)(ptr); \ + SCOPED_INTERCEPTOR_RAW(free, ptr); \ + user_free(thr, pc, ptr) +#define COMMON_MALLOC_SIZE(ptr) \ + uptr size = user_alloc_usable_size(ptr); +#define COMMON_MALLOC_FILL_STATS(zone, stats) +#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + (void)zone_name; \ + Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr); +#define COMMON_MALLOC_NAMESPACE __tsan + +#include "sanitizer_common/sanitizer_malloc_mac.inc" + +#endif diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 12a616ff5ae8..7247c6e00035 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -19,12 +19,14 @@ #include "tsan_flags.h" // May be overriden by front-end. -extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) { +SANITIZER_WEAK_DEFAULT_IMPL +void __sanitizer_malloc_hook(void *ptr, uptr size) { (void)ptr; (void)size; } -extern "C" void WEAK __sanitizer_free_hook(void *ptr) { +SANITIZER_WEAK_DEFAULT_IMPL +void __sanitizer_free_hook(void *ptr) { (void)ptr; } @@ -80,17 +82,17 @@ void AllocatorPrintStats() { } static void SignalUnsafeCall(ThreadState *thr, uptr pc) { - if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 || + if (atomic_load_relaxed(&thr->in_signal_handler) == 0 || !flags()->report_signal_unsafe) return; VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack); + if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) + return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); - if (!IsFiredSuppression(ctx, rep, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); - } + rep.AddStack(stack, true); + OutputReport(thr, rep); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h index 5ff956d827f6..b419b58ca457 100644 --- a/lib/tsan/rtl/tsan_mman.h +++ b/lib/tsan/rtl/tsan_mman.h @@ -20,6 +20,7 @@ namespace __tsan { const uptr kDefaultAlignment = 16; void InitializeAllocator(); +void ReplaceSystemMalloc(); void AllocatorThreadStart(ThreadState *thr); void AllocatorThreadFinish(ThreadState *thr); void AllocatorPrintStats(); diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc index dc5a462a8081..9dd24803b183 100644 --- a/lib/tsan/rtl/tsan_mutex.cc +++ b/lib/tsan/rtl/tsan_mutex.cc @@ -41,6 +41,8 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, /*11 MutexTypeDDetector*/ {}, + /*12 MutexTypeFired*/ {MutexTypeLeaf}, + /*13 MutexTypeRacy*/ {MutexTypeLeaf}, }; static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h index 88fad57c78a0..27f55385c959 100644 --- a/lib/tsan/rtl/tsan_mutex.h +++ b/lib/tsan/rtl/tsan_mutex.h @@ -32,6 +32,8 @@ enum MutexType { MutexTypeMBlock, MutexTypeJavaMBlock, MutexTypeDDetector, + MutexTypeFired, + MutexTypeRacy, // This must be the last. MutexTypeCount diff --git a/lib/tsan/rtl/tsan_new_delete.cc b/lib/tsan/rtl/tsan_new_delete.cc index 2d9d044e42fa..ebb422cf2023 100644 --- a/lib/tsan/rtl/tsan_new_delete.cc +++ b/lib/tsan/rtl/tsan_new_delete.cc @@ -11,6 +11,7 @@ // // Interceptors for operators new and delete. //===----------------------------------------------------------------------===// +#include "interception/interception.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "tsan_interceptors.h" @@ -20,6 +21,13 @@ namespace std { struct nothrow_t {}; } // namespace std +DECLARE_REAL(void *, malloc, uptr size) +DECLARE_REAL(void, free, void *ptr) +#if SANITIZER_MAC || SANITIZER_ANDROID +#define __libc_malloc REAL(malloc) +#define __libc_free REAL(free) +#endif + #define OPERATOR_NEW_BODY(mangled_name) \ if (cur_thread()->in_symbolizer) \ return __libc_malloc(size); \ @@ -64,14 +72,14 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { user_free(thr, pc, ptr); SANITIZER_INTERFACE_ATTRIBUTE -void operator delete(void *ptr) throw(); -void operator delete(void *ptr) throw() { +void operator delete(void *ptr) NOEXCEPT; +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(_ZdlPv); } SANITIZER_INTERFACE_ATTRIBUTE -void operator delete[](void *ptr) throw(); -void operator delete[](void *ptr) throw() { +void operator delete[](void *ptr) NOEXCEPT; +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(_ZdaPv); } diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h index 135e16027132..c2b487155300 100644 --- a/lib/tsan/rtl/tsan_platform.h +++ b/lib/tsan/rtl/tsan_platform.h @@ -41,21 +41,23 @@ C/C++ on linux/x86_64 and freebsd/x86_64 7e00 0000 0000 - 7e80 0000 0000: - 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack */ -const uptr kMetaShadowBeg = 0x300000000000ull; -const uptr kMetaShadowEnd = 0x400000000000ull; -const uptr kTraceMemBeg = 0x600000000000ull; -const uptr kTraceMemEnd = 0x620000000000ull; -const uptr kShadowBeg = 0x020000000000ull; -const uptr kShadowEnd = 0x100000000000ull; -const uptr kHeapMemBeg = 0x7d0000000000ull; -const uptr kHeapMemEnd = 0x7e0000000000ull; -const uptr kLoAppMemBeg = 0x000000001000ull; -const uptr kLoAppMemEnd = 0x010000000000ull; -const uptr kHiAppMemBeg = 0x7e8000000000ull; -const uptr kHiAppMemEnd = 0x800000000000ull; -const uptr kAppMemMsk = 0x7c0000000000ull; -const uptr kAppMemXor = 0x020000000000ull; -const uptr kVdsoBeg = 0xf000000000000000ull; +struct Mapping { + static const uptr kMetaShadowBeg = 0x300000000000ull; + static const uptr kMetaShadowEnd = 0x400000000000ull; + static const uptr kTraceMemBeg = 0x600000000000ull; + static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kShadowBeg = 0x020000000000ull; + static const uptr kShadowEnd = 0x100000000000ull; + static const uptr kHeapMemBeg = 0x7d0000000000ull; + static const uptr kHeapMemEnd = 0x7e0000000000ull; + static const uptr kLoAppMemBeg = 0x000000001000ull; + static const uptr kLoAppMemEnd = 0x010000000000ull; + static const uptr kHiAppMemBeg = 0x7e8000000000ull; + static const uptr kHiAppMemEnd = 0x800000000000ull; + static const uptr kAppMemMsk = 0x7c0000000000ull; + static const uptr kAppMemXor = 0x020000000000ull; + static const uptr kVdsoBeg = 0xf000000000000000ull; +}; #elif defined(__mips64) /* C/C++ on linux/mips64 @@ -71,69 +73,181 @@ fe00 0000 00 - ff00 0000 00: heap ff00 0000 00 - ff80 0000 00: - ff80 0000 00 - ffff ffff ff: modules and main thread stack */ -const uptr kMetaShadowBeg = 0x3000000000ull; -const uptr kMetaShadowEnd = 0x4000000000ull; -const uptr kTraceMemBeg = 0x6000000000ull; -const uptr kTraceMemEnd = 0x6200000000ull; -const uptr kShadowBeg = 0x1400000000ull; -const uptr kShadowEnd = 0x2400000000ull; -const uptr kHeapMemBeg = 0xfe00000000ull; -const uptr kHeapMemEnd = 0xff00000000ull; -const uptr kLoAppMemBeg = 0x0100000000ull; -const uptr kLoAppMemEnd = 0x0200000000ull; -const uptr kHiAppMemBeg = 0xff80000000ull; -const uptr kHiAppMemEnd = 0xffffffffffull; -const uptr kAppMemMsk = 0xfc00000000ull; -const uptr kAppMemXor = 0x0400000000ull; -const uptr kVdsoBeg = 0xfffff00000ull; -#endif - -ALWAYS_INLINE -bool IsAppMem(uptr mem) { - return (mem >= kHeapMemBeg && mem < kHeapMemEnd) || - (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) || - (mem >= kHiAppMemBeg && mem < kHiAppMemEnd); -} - -ALWAYS_INLINE -bool IsShadowMem(uptr mem) { - return mem >= kShadowBeg && mem <= kShadowEnd; -} +struct Mapping { + static const uptr kMetaShadowBeg = 0x3000000000ull; + static const uptr kMetaShadowEnd = 0x4000000000ull; + static const uptr kTraceMemBeg = 0x6000000000ull; + static const uptr kTraceMemEnd = 0x6200000000ull; + static const uptr kShadowBeg = 0x1400000000ull; + static const uptr kShadowEnd = 0x2400000000ull; + static const uptr kHeapMemBeg = 0xfe00000000ull; + static const uptr kHeapMemEnd = 0xff00000000ull; + static const uptr kLoAppMemBeg = 0x0100000000ull; + static const uptr kLoAppMemEnd = 0x0200000000ull; + static const uptr kHiAppMemBeg = 0xff80000000ull; + static const uptr kHiAppMemEnd = 0xffffffffffull; + static const uptr kAppMemMsk = 0xfc00000000ull; + static const uptr kAppMemXor = 0x0400000000ull; + static const uptr kVdsoBeg = 0xfffff00000ull; +}; +#elif defined(__aarch64__) +// AArch64 supports multiple VMA which leads to multiple address transformation +// functions. To support these multiple VMAS transformations and mappings TSAN +// runtime for AArch64 uses an external memory read (vmaSize) to select which +// mapping to use. Although slower, it make a same instrumented binary run on +// multiple kernels. -ALWAYS_INLINE -bool IsMetaMem(uptr mem) { - return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; -} +/* +C/C++ on linux/aarch64 (39-bit VMA) +0000 0010 00 - 0100 0000 00: main binary +0100 0000 00 - 0800 0000 00: - +0800 0000 00 - 2000 0000 00: shadow memory +2000 0000 00 - 3100 0000 00: - +3100 0000 00 - 3400 0000 00: metainfo +3400 0000 00 - 5500 0000 00: - +5500 0000 00 - 5600 0000 00: main binary (PIE) +5600 0000 00 - 6000 0000 00: - +6000 0000 00 - 6200 0000 00: traces +6200 0000 00 - 7d00 0000 00: - +7c00 0000 00 - 7d00 0000 00: heap +7d00 0000 00 - 7fff ffff ff: modules and main thread stack +*/ +struct Mapping39 { + static const uptr kLoAppMemBeg = 0x0000001000ull; + static const uptr kLoAppMemEnd = 0x0100000000ull; + static const uptr kShadowBeg = 0x0800000000ull; + static const uptr kShadowEnd = 0x2000000000ull; + static const uptr kMetaShadowBeg = 0x3100000000ull; + static const uptr kMetaShadowEnd = 0x3400000000ull; + static const uptr kMidAppMemBeg = 0x5500000000ull; + static const uptr kMidAppMemEnd = 0x5600000000ull; + static const uptr kMidShadowOff = 0x5000000000ull; + static const uptr kTraceMemBeg = 0x6000000000ull; + static const uptr kTraceMemEnd = 0x6200000000ull; + static const uptr kHeapMemBeg = 0x7c00000000ull; + static const uptr kHeapMemEnd = 0x7d00000000ull; + static const uptr kHiAppMemBeg = 0x7e00000000ull; + static const uptr kHiAppMemEnd = 0x7fffffffffull; + static const uptr kAppMemMsk = 0x7800000000ull; + static const uptr kAppMemXor = 0x0200000000ull; + static const uptr kVdsoBeg = 0x7f00000000ull; +}; -ALWAYS_INLINE -uptr MemToShadow(uptr x) { - DCHECK(IsAppMem(x)); - return (((x) & ~(kAppMemMsk | (kShadowCell - 1))) - ^ kAppMemXor) * kShadowCnt; -} +/* +C/C++ on linux/aarch64 (42-bit VMA) +00000 0010 00 - 01000 0000 00: main binary +01000 0000 00 - 10000 0000 00: - +10000 0000 00 - 20000 0000 00: shadow memory +20000 0000 00 - 26000 0000 00: - +26000 0000 00 - 28000 0000 00: metainfo +28000 0000 00 - 2aa00 0000 00: - +2aa00 0000 00 - 2ab00 0000 00: main binary (PIE) +2ab00 0000 00 - 36200 0000 00: - +36200 0000 00 - 36240 0000 00: traces +36240 0000 00 - 3e000 0000 00: - +3e000 0000 00 - 3f000 0000 00: heap +3f000 0000 00 - 3ffff ffff ff: modules and main thread stack +*/ +struct Mapping42 { + static const uptr kLoAppMemBeg = 0x00000001000ull; + static const uptr kLoAppMemEnd = 0x01000000000ull; + static const uptr kShadowBeg = 0x10000000000ull; + static const uptr kShadowEnd = 0x20000000000ull; + static const uptr kMetaShadowBeg = 0x26000000000ull; + static const uptr kMetaShadowEnd = 0x28000000000ull; + static const uptr kMidAppMemBeg = 0x2aa00000000ull; + static const uptr kMidAppMemEnd = 0x2ab00000000ull; + static const uptr kMidShadowOff = 0x28000000000ull; + static const uptr kTraceMemBeg = 0x36200000000ull; + static const uptr kTraceMemEnd = 0x36400000000ull; + static const uptr kHeapMemBeg = 0x3e000000000ull; + static const uptr kHeapMemEnd = 0x3f000000000ull; + static const uptr kHiAppMemBeg = 0x3f000000000ull; + static const uptr kHiAppMemEnd = 0x3ffffffffffull; + static const uptr kAppMemMsk = 0x3c000000000ull; + static const uptr kAppMemXor = 0x04000000000ull; + static const uptr kVdsoBeg = 0x37f00000000ull; +}; -ALWAYS_INLINE -u32 *MemToMeta(uptr x) { - DCHECK(IsAppMem(x)); - return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1))) - ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); -} +// Indicates the runtime will define the memory regions at runtime. +#define TSAN_RUNTIME_VMA 1 +// Indicates that mapping defines a mid range memory segment. +#define TSAN_MID_APP_RANGE 1 +#elif defined(__powerpc64__) +// PPC64 supports multiple VMA which leads to multiple address transformation +// functions. To support these multiple VMAS transformations and mappings TSAN +// runtime for PPC64 uses an external memory read (vmaSize) to select which +// mapping to use. Although slower, it make a same instrumented binary run on +// multiple kernels. -ALWAYS_INLINE -uptr ShadowToMem(uptr s) { - CHECK(IsShadowMem(s)); - if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1)) - return (s / kShadowCnt) ^ kAppMemXor; - else - return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk; -} +/* +C/C++ on linux/powerpc64 (44-bit VMA) +0000 0000 0100 - 0001 0000 0000: main binary +0001 0000 0000 - 0001 0000 0000: - +0001 0000 0000 - 0b00 0000 0000: shadow +0b00 0000 0000 - 0b00 0000 0000: - +0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects) +0d00 0000 0000 - 0d00 0000 0000: - +0d00 0000 0000 - 0f00 0000 0000: traces +0f00 0000 0000 - 0f00 0000 0000: - +0f00 0000 0000 - 0f50 0000 0000: heap +0f50 0000 0000 - 0f60 0000 0000: - +0f60 0000 0000 - 1000 0000 0000: modules and main thread stack +*/ +struct Mapping44 { + static const uptr kMetaShadowBeg = 0x0b0000000000ull; + static const uptr kMetaShadowEnd = 0x0d0000000000ull; + static const uptr kTraceMemBeg = 0x0d0000000000ull; + static const uptr kTraceMemEnd = 0x0f0000000000ull; + static const uptr kShadowBeg = 0x000100000000ull; + static const uptr kShadowEnd = 0x0b0000000000ull; + static const uptr kLoAppMemBeg = 0x000000000100ull; + static const uptr kLoAppMemEnd = 0x000100000000ull; + static const uptr kHeapMemBeg = 0x0f0000000000ull; + static const uptr kHeapMemEnd = 0x0f5000000000ull; + static const uptr kHiAppMemBeg = 0x0f6000000000ull; + static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits + static const uptr kAppMemMsk = 0x0f0000000000ull; + static const uptr kAppMemXor = 0x002100000000ull; + static const uptr kVdsoBeg = 0x3c0000000000000ull; +}; -static USED uptr UserRegions[] = { - kLoAppMemBeg, kLoAppMemEnd, - kHiAppMemBeg, kHiAppMemEnd, - kHeapMemBeg, kHeapMemEnd, +/* +C/C++ on linux/powerpc64 (46-bit VMA) +0000 0000 1000 - 0100 0000 0000: main binary +0100 0000 0000 - 0200 0000 0000: - +0100 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects) +2000 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 2200 0000 0000: traces +2200 0000 0000 - 3d00 0000 0000: - +3d00 0000 0000 - 3e00 0000 0000: heap +3e00 0000 0000 - 3e80 0000 0000: - +3e80 0000 0000 - 4000 0000 0000: modules and main thread stack +*/ +struct Mapping46 { + static const uptr kMetaShadowBeg = 0x100000000000ull; + static const uptr kMetaShadowEnd = 0x200000000000ull; + static const uptr kTraceMemBeg = 0x200000000000ull; + static const uptr kTraceMemEnd = 0x220000000000ull; + static const uptr kShadowBeg = 0x010000000000ull; + static const uptr kShadowEnd = 0x100000000000ull; + static const uptr kHeapMemBeg = 0x3d0000000000ull; + static const uptr kHeapMemEnd = 0x3e0000000000ull; + static const uptr kLoAppMemBeg = 0x000000001000ull; + static const uptr kLoAppMemEnd = 0x010000000000ull; + static const uptr kHiAppMemBeg = 0x3e8000000000ull; + static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits + static const uptr kAppMemMsk = 0x3c0000000000ull; + static const uptr kAppMemXor = 0x020000000000ull; + static const uptr kVdsoBeg = 0x7800000000000000ull; }; +// Indicates the runtime will define the memory regions at runtime. +#define TSAN_RUNTIME_VMA 1 +#endif + #elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS /* Go on linux, darwin and freebsd @@ -149,138 +263,495 @@ static USED uptr UserRegions[] = { 6200 0000 0000 - 8000 0000 0000: - */ -const uptr kMetaShadowBeg = 0x300000000000ull; -const uptr kMetaShadowEnd = 0x400000000000ull; -const uptr kTraceMemBeg = 0x600000000000ull; -const uptr kTraceMemEnd = 0x620000000000ull; -const uptr kShadowBeg = 0x200000000000ull; -const uptr kShadowEnd = 0x238000000000ull; -const uptr kAppMemBeg = 0x000000001000ull; -const uptr kAppMemEnd = 0x00e000000000ull; +struct Mapping { + static const uptr kMetaShadowBeg = 0x300000000000ull; + static const uptr kMetaShadowEnd = 0x400000000000ull; + static const uptr kTraceMemBeg = 0x600000000000ull; + static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kShadowBeg = 0x200000000000ull; + static const uptr kShadowEnd = 0x238000000000ull; + static const uptr kAppMemBeg = 0x000000001000ull; + static const uptr kAppMemEnd = 0x00e000000000ull; +}; + +#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS + +/* Go on windows +0000 0000 1000 - 0000 1000 0000: executable +0000 1000 0000 - 00f8 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 0100 0000 0000: - +0100 0000 0000 - 0500 0000 0000: shadow +0500 0000 0000 - 0560 0000 0000: - +0560 0000 0000 - 0760 0000 0000: traces +0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) +07d0 0000 0000 - 8000 0000 0000: - +*/ + +struct Mapping { + static const uptr kMetaShadowBeg = 0x076000000000ull; + static const uptr kMetaShadowEnd = 0x07d000000000ull; + static const uptr kTraceMemBeg = 0x056000000000ull; + static const uptr kTraceMemEnd = 0x076000000000ull; + static const uptr kShadowBeg = 0x010000000000ull; + static const uptr kShadowEnd = 0x050000000000ull; + static const uptr kAppMemBeg = 0x000000001000ull; + static const uptr kAppMemEnd = 0x00e000000000ull; +} + +#else +# error "Unknown platform" +#endif + + +#ifdef TSAN_RUNTIME_VMA +extern uptr vmaSize; +#endif + + +enum MappingType { + MAPPING_LO_APP_BEG, + MAPPING_LO_APP_END, + MAPPING_HI_APP_BEG, + MAPPING_HI_APP_END, +#ifdef TSAN_MID_APP_RANGE + MAPPING_MID_APP_BEG, + MAPPING_MID_APP_END, +#endif + MAPPING_HEAP_BEG, + MAPPING_HEAP_END, + MAPPING_APP_BEG, + MAPPING_APP_END, + MAPPING_SHADOW_BEG, + MAPPING_SHADOW_END, + MAPPING_META_SHADOW_BEG, + MAPPING_META_SHADOW_END, + MAPPING_TRACE_BEG, + MAPPING_TRACE_END, + MAPPING_VDSO_BEG, +}; + +template<typename Mapping, int Type> +uptr MappingImpl(void) { + switch (Type) { +#ifndef SANITIZER_GO + case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg; + case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd; +# ifdef TSAN_MID_APP_RANGE + case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg; + case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd; +# endif + case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg; + case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd; + case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg; + case MAPPING_HEAP_END: return Mapping::kHeapMemEnd; + case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg; +#else + case MAPPING_APP_BEG: return Mapping::kAppMemBeg; + case MAPPING_APP_END: return Mapping::kAppMemEnd; +#endif + case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg; + case MAPPING_SHADOW_END: return Mapping::kShadowEnd; + case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg; + case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd; + case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg; + case MAPPING_TRACE_END: return Mapping::kTraceMemEnd; + } +} + +template<int Type> +uptr MappingArchImpl(void) { +#ifdef __aarch64__ + if (vmaSize == 39) + return MappingImpl<Mapping39, Type>(); + else + return MappingImpl<Mapping42, Type>(); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MappingImpl<Mapping44, Type>(); + else + return MappingImpl<Mapping46, Type>(); + DCHECK(0); +#else + return MappingImpl<Mapping, Type>(); +#endif +} +#ifndef SANITIZER_GO ALWAYS_INLINE -bool IsAppMem(uptr mem) { - return mem >= kAppMemBeg && mem < kAppMemEnd; +uptr LoAppMemBeg(void) { + return MappingArchImpl<MAPPING_LO_APP_BEG>(); +} +ALWAYS_INLINE +uptr LoAppMemEnd(void) { + return MappingArchImpl<MAPPING_LO_APP_END>(); } +#ifdef TSAN_MID_APP_RANGE ALWAYS_INLINE -bool IsShadowMem(uptr mem) { - return mem >= kShadowBeg && mem <= kShadowEnd; +uptr MidAppMemBeg(void) { + return MappingArchImpl<MAPPING_MID_APP_BEG>(); +} +ALWAYS_INLINE +uptr MidAppMemEnd(void) { + return MappingArchImpl<MAPPING_MID_APP_END>(); } +#endif ALWAYS_INLINE -bool IsMetaMem(uptr mem) { - return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; +uptr HeapMemBeg(void) { + return MappingArchImpl<MAPPING_HEAP_BEG>(); +} +ALWAYS_INLINE +uptr HeapMemEnd(void) { + return MappingArchImpl<MAPPING_HEAP_END>(); } ALWAYS_INLINE -uptr MemToShadow(uptr x) { - DCHECK(IsAppMem(x)); - return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg; +uptr HiAppMemBeg(void) { + return MappingArchImpl<MAPPING_HI_APP_BEG>(); +} +ALWAYS_INLINE +uptr HiAppMemEnd(void) { + return MappingArchImpl<MAPPING_HI_APP_END>(); } ALWAYS_INLINE -u32 *MemToMeta(uptr x) { - DCHECK(IsAppMem(x)); - return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ - kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); +uptr VdsoBeg(void) { + return MappingArchImpl<MAPPING_VDSO_BEG>(); } +#else + ALWAYS_INLINE -uptr ShadowToMem(uptr s) { - CHECK(IsShadowMem(s)); - return (s & ~kShadowBeg) / kShadowCnt; +uptr AppMemBeg(void) { + return MappingArchImpl<MAPPING_APP_BEG>(); +} +ALWAYS_INLINE +uptr AppMemEnd(void) { + return MappingArchImpl<MAPPING_APP_END>(); } -static USED uptr UserRegions[] = { - kAppMemBeg, kAppMemEnd, -}; +#endif -#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS +static inline +bool GetUserRegion(int i, uptr *start, uptr *end) { + switch (i) { + default: + return false; +#ifndef SANITIZER_GO + case 0: + *start = LoAppMemBeg(); + *end = LoAppMemEnd(); + return true; + case 1: + *start = HiAppMemBeg(); + *end = HiAppMemEnd(); + return true; + case 2: + *start = HeapMemBeg(); + *end = HeapMemEnd(); + return true; +# ifdef TSAN_MID_APP_RANGE + case 3: + *start = MidAppMemBeg(); + *end = MidAppMemEnd(); + return true; +# endif +#else + case 0: + *start = AppMemBeg(); + *end = AppMemEnd(); + return true; +#endif + } +} -/* Go on windows -0000 0000 1000 - 0000 1000 0000: executable -0000 1000 0000 - 00f8 0000 0000: - -00c0 0000 0000 - 00e0 0000 0000: heap -00e0 0000 0000 - 0100 0000 0000: - -0100 0000 0000 - 0500 0000 0000: shadow -0500 0000 0000 - 0560 0000 0000: - -0560 0000 0000 - 0760 0000 0000: traces -0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) -07d0 0000 0000 - 8000 0000 0000: - -*/ +ALWAYS_INLINE +uptr ShadowBeg(void) { + return MappingArchImpl<MAPPING_SHADOW_BEG>(); +} +ALWAYS_INLINE +uptr ShadowEnd(void) { + return MappingArchImpl<MAPPING_SHADOW_END>(); +} + +ALWAYS_INLINE +uptr MetaShadowBeg(void) { + return MappingArchImpl<MAPPING_META_SHADOW_BEG>(); +} +ALWAYS_INLINE +uptr MetaShadowEnd(void) { + return MappingArchImpl<MAPPING_META_SHADOW_END>(); +} + +ALWAYS_INLINE +uptr TraceMemBeg(void) { + return MappingArchImpl<MAPPING_TRACE_BEG>(); +} +ALWAYS_INLINE +uptr TraceMemEnd(void) { + return MappingArchImpl<MAPPING_TRACE_END>(); +} -const uptr kMetaShadowBeg = 0x076000000000ull; -const uptr kMetaShadowEnd = 0x07d000000000ull; -const uptr kTraceMemBeg = 0x056000000000ull; -const uptr kTraceMemEnd = 0x076000000000ull; -const uptr kShadowBeg = 0x010000000000ull; -const uptr kShadowEnd = 0x050000000000ull; -const uptr kAppMemBeg = 0x000000001000ull; -const uptr kAppMemEnd = 0x00e000000000ull; + +template<typename Mapping> +bool IsAppMemImpl(uptr mem) { +#ifndef SANITIZER_GO + return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) || +# ifdef TSAN_MID_APP_RANGE + (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) || +# endif + (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) || + (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd); +#else + return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd; +#endif +} ALWAYS_INLINE bool IsAppMem(uptr mem) { - return mem >= kAppMemBeg && mem < kAppMemEnd; +#ifdef __aarch64__ + if (vmaSize == 39) + return IsAppMemImpl<Mapping39>(mem); + else + return IsAppMemImpl<Mapping42>(mem); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsAppMemImpl<Mapping44>(mem); + else + return IsAppMemImpl<Mapping46>(mem); + DCHECK(0); +#else + return IsAppMemImpl<Mapping>(mem); +#endif +} + + +template<typename Mapping> +bool IsShadowMemImpl(uptr mem) { + return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd; } ALWAYS_INLINE bool IsShadowMem(uptr mem) { - return mem >= kShadowBeg && mem <= kShadowEnd; +#ifdef __aarch64__ + if (vmaSize == 39) + return IsShadowMemImpl<Mapping39>(mem); + else + return IsShadowMemImpl<Mapping42>(mem); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsShadowMemImpl<Mapping44>(mem); + else + return IsShadowMemImpl<Mapping46>(mem); + DCHECK(0); +#else + return IsShadowMemImpl<Mapping>(mem); +#endif +} + + +template<typename Mapping> +bool IsMetaMemImpl(uptr mem) { + return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd; } ALWAYS_INLINE bool IsMetaMem(uptr mem) { - return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd; +#ifdef __aarch64__ + if (vmaSize == 39) + return IsMetaMemImpl<Mapping39>(mem); + else + return IsMetaMemImpl<Mapping42>(mem); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsMetaMemImpl<Mapping44>(mem); + else + return IsMetaMemImpl<Mapping46>(mem); + DCHECK(0); +#else + return IsMetaMemImpl<Mapping>(mem); +#endif } -ALWAYS_INLINE -uptr MemToShadow(uptr x) { + +template<typename Mapping> +uptr MemToShadowImpl(uptr x) { DCHECK(IsAppMem(x)); - return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg; +#ifndef SANITIZER_GO + return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1))) + ^ Mapping::kAppMemXor) * kShadowCnt; +#else + return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg; +#endif } ALWAYS_INLINE -u32 *MemToMeta(uptr x) { +uptr MemToShadow(uptr x) { +#ifdef __aarch64__ + if (vmaSize == 39) + return MemToShadowImpl<Mapping39>(x); + else + return MemToShadowImpl<Mapping42>(x); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MemToShadowImpl<Mapping44>(x); + else + return MemToShadowImpl<Mapping46>(x); + DCHECK(0); +#else + return MemToShadowImpl<Mapping>(x); +#endif +} + + +template<typename Mapping> +u32 *MemToMetaImpl(uptr x) { DCHECK(IsAppMem(x)); +#ifndef SANITIZER_GO + return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1))) + ^ Mapping::kAppMemXor) / kMetaShadowCell * kMetaShadowSize) + | Mapping::kMetaShadowBeg); +#else return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ - kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); + kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg); +#endif } ALWAYS_INLINE -uptr ShadowToMem(uptr s) { - CHECK(IsShadowMem(s)); - // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection. - return (s - kShadowBeg) / kShadowCnt; +u32 *MemToMeta(uptr x) { +#ifdef __aarch64__ + if (vmaSize == 39) + return MemToMetaImpl<Mapping39>(x); + else + return MemToMetaImpl<Mapping42>(x); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MemToMetaImpl<Mapping44>(x); + else + return MemToMetaImpl<Mapping46>(x); + DCHECK(0); +#else + return MemToMetaImpl<Mapping>(x); +#endif } -static USED uptr UserRegions[] = { - kAppMemBeg, kAppMemEnd, -}; +template<typename Mapping> +uptr ShadowToMemImpl(uptr s) { + DCHECK(IsShadowMem(s)); +#ifndef SANITIZER_GO + if (s >= MemToShadow(Mapping::kLoAppMemBeg) + && s <= MemToShadow(Mapping::kLoAppMemEnd - 1)) + return (s / kShadowCnt) ^ Mapping::kAppMemXor; +# ifdef TSAN_MID_APP_RANGE + if (s >= MemToShadow(Mapping::kMidAppMemBeg) + && s <= MemToShadow(Mapping::kMidAppMemEnd - 1)) + return ((s / kShadowCnt) ^ Mapping::kAppMemXor) + Mapping::kMidShadowOff; +# endif + else + return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk; #else -# error "Unknown platform" +# ifndef SANITIZER_WINDOWS + return (s & ~Mapping::kShadowBeg) / kShadowCnt; +# else + // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection. + return (s - Mapping::kShadowBeg) / kShadowCnt; +# endif // SANITIZER_WINDOWS +#endif +} + +ALWAYS_INLINE +uptr ShadowToMem(uptr s) { +#ifdef __aarch64__ + if (vmaSize == 39) + return ShadowToMemImpl<Mapping39>(s); + else + return ShadowToMemImpl<Mapping42>(s); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return ShadowToMemImpl<Mapping44>(s); + else + return ShadowToMemImpl<Mapping46>(s); + DCHECK(0); +#else + return ShadowToMemImpl<Mapping>(s); #endif +} + + // The additional page is to catch shadow stack overflow as paging fault. // Windows wants 64K alignment for mmaps. const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1); -uptr ALWAYS_INLINE GetThreadTrace(int tid) { - uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize; - DCHECK_LT(p, kTraceMemEnd); +template<typename Mapping> +uptr GetThreadTraceImpl(int tid) { + uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize; + DCHECK_LT(p, Mapping::kTraceMemEnd); return p; } -uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) { - uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize +ALWAYS_INLINE +uptr GetThreadTrace(int tid) { +#ifdef __aarch64__ + if (vmaSize == 39) + return GetThreadTraceImpl<Mapping39>(tid); + else + return GetThreadTraceImpl<Mapping42>(tid); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return GetThreadTraceImpl<Mapping44>(tid); + else + return GetThreadTraceImpl<Mapping46>(tid); + DCHECK(0); +#else + return GetThreadTraceImpl<Mapping>(tid); +#endif +} + + +template<typename Mapping> +uptr GetThreadTraceHeaderImpl(int tid) { + uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize + kTraceSize * sizeof(Event); - DCHECK_LT(p, kTraceMemEnd); + DCHECK_LT(p, Mapping::kTraceMemEnd); return p; } +ALWAYS_INLINE +uptr GetThreadTraceHeader(int tid) { +#ifdef __aarch64__ + if (vmaSize == 39) + return GetThreadTraceHeaderImpl<Mapping39>(tid); + else + return GetThreadTraceHeaderImpl<Mapping42>(tid); + DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return GetThreadTraceHeaderImpl<Mapping44>(tid); + else + return GetThreadTraceHeaderImpl<Mapping46>(tid); + DCHECK(0); +#else + return GetThreadTraceHeaderImpl<Mapping>(tid); +#endif +} + void InitializePlatform(); +void InitializePlatformEarly(); +void CheckAndProtect(); +void InitializeShadowMemoryPlatform(); void FlushShadowMemory(); void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); @@ -294,6 +765,8 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg); +void DestroyThreadState(); + } // namespace __tsan #endif // TSAN_PLATFORM_H diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc index 1309058210ce..6602561186ce 100644 --- a/lib/tsan/rtl/tsan_platform_linux.cc +++ b/lib/tsan/rtl/tsan_platform_linux.cc @@ -67,6 +67,11 @@ namespace __tsan { static uptr g_data_start; static uptr g_data_end; +#ifdef TSAN_RUNTIME_VMA +// Runtime detected VMA size. +uptr vmaSize; +#endif + enum { MemTotal = 0, MemShadow = 1, @@ -82,29 +87,30 @@ enum { void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem, uptr stats_size) { mem[MemTotal] += rss; - if (p >= kShadowBeg && p < kShadowEnd) + if (p >= ShadowBeg() && p < ShadowEnd()) mem[MemShadow] += rss; - else if (p >= kMetaShadowBeg && p < kMetaShadowEnd) + else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) mem[MemMeta] += rss; #ifndef SANITIZER_GO - else if (p >= kHeapMemBeg && p < kHeapMemEnd) + else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; - else if (p >= kLoAppMemBeg && p < kLoAppMemEnd) + else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; - else if (p >= kHiAppMemBeg && p < kHiAppMemEnd) + else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #else - else if (p >= kAppMemBeg && p < kAppMemEnd) + else if (p >= AppMemBeg() && p < AppMemEnd()) mem[file ? MemFile : MemMmap] += rss; #endif - else if (p >= kTraceMemBeg && p < kTraceMemEnd) + else if (p >= TraceMemBeg() && p < TraceMemEnd()) mem[MemTrace] += rss; else mem[MemOther] += rss; } void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { - uptr mem[MemCount] = {}; + uptr mem[MemCount]; + internal_memset(mem, 0, sizeof(mem[0]) * MemCount); __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); StackDepotStats *stacks = StackDepotGetStats(); internal_snprintf(buf, buf_size, @@ -121,7 +127,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, void *argument) { - FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); + FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); } #endif @@ -132,17 +138,6 @@ void FlushShadowMemory() { } #ifndef SANITIZER_GO -static void ProtectRange(uptr beg, uptr end) { - CHECK_LE(beg, end); - if (beg == end) - return; - if (beg != (uptr)MmapNoAccess(beg, end - beg)) { - Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); - Printf("FATAL: Make sure you are not using unlimited stack\n"); - Die(); - } -} - // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { @@ -200,55 +195,7 @@ static void MapRodata() { internal_close(fd); } -void InitializeShadowMemory() { - // Map memory shadow. - uptr shadow = - (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow"); - if (shadow != kShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", shadow, kShadowBeg); - Die(); - } - // This memory range is used for thread stacks and large user mmaps. - // Frequently a thread uses only a small part of stack and similarly - // a program uses a small part of large mmap. On some programs - // we see 20% memory usage reduction without huge pages for this range. - // FIXME: don't use constants here. -#if defined(__x86_64__) - const uptr kMadviseRangeBeg = 0x7f0000000000ull; - const uptr kMadviseRangeSize = 0x010000000000ull; -#elif defined(__mips64) - const uptr kMadviseRangeBeg = 0xff00000000ull; - const uptr kMadviseRangeSize = 0x0100000000ull; -#endif - NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), - kMadviseRangeSize * kShadowMultiplier); - // Meta shadow is compressing and we don't flush it, - // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. - // On one program it reduces memory consumption from 5GB to 2.5GB. - NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg); - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); - DPrintf("memory shadow: %zx-%zx (%zuGB)\n", - kShadowBeg, kShadowEnd, - (kShadowEnd - kShadowBeg) >> 30); - - // Map meta shadow. - uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; - uptr meta = - (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow"); - if (meta != kMetaShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); - Die(); - } - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(meta, meta_size); - DPrintf("meta shadow: %zx-%zx (%zuGB)\n", - meta, meta + meta_size, meta_size >> 30); - +void InitializeShadowMemoryPlatform() { MapRodata(); } @@ -292,32 +239,27 @@ static void InitDataSeg() { CHECK_LT((uptr)&g_data_start, g_data_end); } -static void CheckAndProtect() { - // Ensure that the binary is indeed compiled with -pie. - MemoryMappingLayout proc_maps(true); - uptr p, end; - while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) { - if (IsAppMem(p)) - continue; - if (p >= kHeapMemEnd && - p < HeapEnd()) - continue; - if (p >= kVdsoBeg) // vdso - break; - Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); +#endif // #ifndef SANITIZER_GO + +void InitializePlatformEarly() { +#ifdef TSAN_RUNTIME_VMA + vmaSize = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); +#if defined(__aarch64__) + if (vmaSize != 39 && vmaSize != 42) { + Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); + Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize); Die(); } - - ProtectRange(kLoAppMemEnd, kShadowBeg); - ProtectRange(kShadowEnd, kMetaShadowBeg); - ProtectRange(kMetaShadowEnd, kTraceMemBeg); - // Memory for traces is mapped lazily in MapThreadTrace. - // Protect the whole range for now, so that user does not map something here. - ProtectRange(kTraceMemBeg, kTraceMemEnd); - ProtectRange(kTraceMemEnd, kHeapMemBeg); - ProtectRange(HeapEnd(), kHiAppMemBeg); +#elif defined(__powerpc64__) + if (vmaSize != 44 && vmaSize != 46) { + Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); + Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize); + Die(); + } +#endif +#endif } -#endif // #ifndef SANITIZER_GO void InitializePlatform() { DisableCoreDumperIfNecessary(); @@ -367,7 +309,7 @@ bool IsGlobalVar(uptr addr) { // This is required to properly "close" the fds, because we do not see internal // closes within glibc. The code is a pure hack. int ExtractResolvFDs(void *state, int *fds, int nfd) { -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_ANDROID int cnt = 0; __res_state *statp = (__res_state*)state; for (int i = 0; i < MAXNS && cnt < nfd; i++) { @@ -415,6 +357,10 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, } #endif +#ifndef SANITIZER_GO +void ReplaceSystemMalloc() { } +#endif + } // namespace __tsan #endif // SANITIZER_LINUX || SANITIZER_FREEBSD diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc index b72d9b07ef35..31caf37dee5a 100644 --- a/lib/tsan/rtl/tsan_platform_mac.cc +++ b/lib/tsan/rtl/tsan_platform_mac.cc @@ -15,8 +15,10 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_MAC +#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "tsan_platform.h" #include "tsan_rtl.h" @@ -40,6 +42,62 @@ namespace __tsan { +#ifndef SANITIZER_GO +static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) { + atomic_uintptr_t *a = (atomic_uintptr_t *)dst; + void *val = (void *)atomic_load_relaxed(a); + atomic_signal_fence(memory_order_acquire); // Turns the previous load into + // acquire wrt signals. + if (UNLIKELY(val == nullptr)) { + val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + CHECK(val); + void *cmp = nullptr; + if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val, + memory_order_acq_rel)) { + internal_munmap(val, size); + val = cmp; + } + } + return val; +} + +// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is +// problematic, because there are several places where interceptors are called +// when TLVs are not accessible (early process startup, thread cleanup, ...). +// The following provides a "poor man's TLV" implementation, where we use the +// shadow memory of the pointer returned by pthread_self() to store a pointer to +// the ThreadState object. The main thread's ThreadState pointer is stored +// separately in a static variable, because we need to access it even before the +// shadow memory is set up. +static uptr main_thread_identity = 0; +static ThreadState *main_thread_state = nullptr; + +ThreadState *cur_thread() { + ThreadState **fake_tls; + uptr thread_identity = (uptr)pthread_self(); + if (thread_identity == main_thread_identity || main_thread_identity == 0) { + fake_tls = &main_thread_state; + } else { + fake_tls = (ThreadState **)MemToShadow(thread_identity); + } + ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate( + (uptr *)fake_tls, sizeof(ThreadState)); + return thr; +} + +// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call +// munmap first and then clear `fake_tls`; if we receive a signal in between, +// handler will try to access the unmapped ThreadState. +void cur_thread_finalize() { + uptr thread_identity = (uptr)pthread_self(); + CHECK_NE(thread_identity, main_thread_identity); + ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity); + internal_munmap(*fake_tls, sizeof(ThreadState)); + *fake_tls = nullptr; +} +#endif + uptr GetShadowMemoryConsumption() { return 0; } @@ -51,28 +109,62 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { } #ifndef SANITIZER_GO -void InitializeShadowMemory() { - uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, - kShadowEnd - kShadowBeg); - if (shadow != kShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie.\n"); - Die(); +void InitializeShadowMemoryPlatform() { } + +// On OS X, GCD worker threads are created without a call to pthread_create. We +// need to properly register these threads with ThreadCreate and ThreadStart. +// These threads don't have a parent thread, as they are created "spuriously". +// We're using a libpthread API that notifies us about a newly created thread. +// The `thread == pthread_self()` check indicates this is actually a worker +// thread. If it's just a regular thread, this hook is called on the parent +// thread. +typedef void (*pthread_introspection_hook_t)(unsigned int event, + pthread_t thread, void *addr, + size_t size); +extern "C" pthread_introspection_hook_t pthread_introspection_hook_install( + pthread_introspection_hook_t hook); +static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1; +static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3; +static pthread_introspection_hook_t prev_pthread_introspection_hook; +static void my_pthread_introspection_hook(unsigned int event, pthread_t thread, + void *addr, size_t size) { + if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) { + if (thread == pthread_self()) { + // The current thread is a newly created GCD worker thread. + ThreadState *parent_thread_state = nullptr; // No parent. + int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true); + CHECK_NE(tid, 0); + ThreadState *thr = cur_thread(); + ThreadStart(thr, tid, GetTid()); + } + } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) { + if (thread == pthread_self()) { + ThreadState *thr = cur_thread(); + if (thr->tctx) { + DestroyThreadState(); + } + } } - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); - DPrintf("kShadow %zx-%zx (%zuGB)\n", - kShadowBeg, kShadowEnd, - (kShadowEnd - kShadowBeg) >> 30); - DPrintf("kAppMem %zx-%zx (%zuGB)\n", - kAppMemBeg, kAppMemEnd, - (kAppMemEnd - kAppMemBeg) >> 30); + + if (prev_pthread_introspection_hook != nullptr) + prev_pthread_introspection_hook(event, thread, addr, size); } #endif +void InitializePlatformEarly() { +} + void InitializePlatform() { DisableCoreDumperIfNecessary(); +#ifndef SANITIZER_GO + CheckAndProtect(); + + CHECK_EQ(main_thread_identity, 0); + main_thread_identity = (uptr)pthread_self(); + + prev_pthread_introspection_hook = + pthread_introspection_hook_install(&my_pthread_introspection_hook); +#endif } #ifndef SANITIZER_GO @@ -91,6 +183,10 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, } #endif +bool IsGlobalVar(uptr addr) { + return false; +} + } // namespace __tsan #endif // SANITIZER_MAC diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc new file mode 100644 index 000000000000..90476cbc5fd5 --- /dev/null +++ b/lib/tsan/rtl/tsan_platform_posix.cc @@ -0,0 +1,151 @@ +//===-- tsan_platform_posix.cc --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// POSIX-specific code. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_POSIX + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" +#include "tsan_platform.h" +#include "tsan_rtl.h" + +namespace __tsan { + +#ifndef SANITIZER_GO +void InitializeShadowMemory() { + // Map memory shadow. + uptr shadow = + (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), + "shadow"); + if (shadow != ShadowBeg()) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", shadow, ShadowBeg()); + Die(); + } + // This memory range is used for thread stacks and large user mmaps. + // Frequently a thread uses only a small part of stack and similarly + // a program uses a small part of large mmap. On some programs + // we see 20% memory usage reduction without huge pages for this range. + // FIXME: don't use constants here. +#if defined(__x86_64__) + const uptr kMadviseRangeBeg = 0x7f0000000000ull; + const uptr kMadviseRangeSize = 0x010000000000ull; +#elif defined(__mips64) + const uptr kMadviseRangeBeg = 0xff00000000ull; + const uptr kMadviseRangeSize = 0x0100000000ull; +#elif defined(__aarch64__) + uptr kMadviseRangeBeg = 0; + uptr kMadviseRangeSize = 0; + if (vmaSize == 39) { + kMadviseRangeBeg = 0x7d00000000ull; + kMadviseRangeSize = 0x0300000000ull; + } else if (vmaSize == 42) { + kMadviseRangeBeg = 0x3f000000000ull; + kMadviseRangeSize = 0x01000000000ull; + } else { + DCHECK(0); + } +#elif defined(__powerpc64__) + uptr kMadviseRangeBeg = 0; + uptr kMadviseRangeSize = 0; + if (vmaSize == 44) { + kMadviseRangeBeg = 0x0f60000000ull; + kMadviseRangeSize = 0x0010000000ull; + } else if (vmaSize == 46) { + kMadviseRangeBeg = 0x3f0000000000ull; + kMadviseRangeSize = 0x010000000000ull; + } else { + DCHECK(0); + } +#endif + NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), + kMadviseRangeSize * kShadowMultiplier); + // Meta shadow is compressing and we don't flush it, + // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. + // On one program it reduces memory consumption from 5GB to 2.5GB. + NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg()); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); + DPrintf("memory shadow: %zx-%zx (%zuGB)\n", + ShadowBeg(), ShadowEnd(), + (ShadowEnd() - ShadowBeg()) >> 30); + + // Map meta shadow. + uptr meta_size = MetaShadowEnd() - MetaShadowBeg(); + uptr meta = + (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow"); + if (meta != MetaShadowBeg()) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", meta, MetaShadowBeg()); + Die(); + } + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(meta, meta_size); + DPrintf("meta shadow: %zx-%zx (%zuGB)\n", + meta, meta + meta_size, meta_size >> 30); + + InitializeShadowMemoryPlatform(); +} + +static void ProtectRange(uptr beg, uptr end) { + CHECK_LE(beg, end); + if (beg == end) + return; + if (beg != (uptr)MmapNoAccess(beg, end - beg)) { + Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); + Printf("FATAL: Make sure you are not using unlimited stack\n"); + Die(); + } +} + +void CheckAndProtect() { + // Ensure that the binary is indeed compiled with -pie. + MemoryMappingLayout proc_maps(true); + uptr p, end, prot; + while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) { + if (IsAppMem(p)) + continue; + if (p >= HeapMemEnd() && + p < HeapEnd()) + continue; + if (prot == 0) // Zero page or mprotected. + continue; + if (p >= VdsoBeg()) // vdso + break; + Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); + Die(); + } + + ProtectRange(LoAppMemEnd(), ShadowBeg()); + ProtectRange(ShadowEnd(), MetaShadowBeg()); +#ifdef TSAN_MID_APP_RANGE + ProtectRange(MetaShadowEnd(), MidAppMemBeg()); + ProtectRange(MidAppMemEnd(), TraceMemBeg()); +#else + ProtectRange(MetaShadowEnd(), TraceMemBeg()); +#endif + // Memory for traces is mapped lazily in MapThreadTrace. + // Protect the whole range for now, so that user does not map something here. + ProtectRange(TraceMemBeg(), TraceMemEnd()); + ProtectRange(TraceMemEnd(), HeapMemBeg()); + ProtectRange(HeapEnd(), HiAppMemBeg()); +} +#endif + +} // namespace __tsan + +#endif // SANITIZER_POSIX diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc index cfbe77da2c07..c6d5058d96fc 100644 --- a/lib/tsan/rtl/tsan_platform_windows.cc +++ b/lib/tsan/rtl/tsan_platform_windows.cc @@ -31,6 +31,9 @@ void FlushShadowMemory() { void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { } +void InitializePlatformEarly() { +} + void InitializePlatform() { } diff --git a/lib/tsan/rtl/tsan_ppc_regs.h b/lib/tsan/rtl/tsan_ppc_regs.h new file mode 100644 index 000000000000..5b43f3ddada3 --- /dev/null +++ b/lib/tsan/rtl/tsan_ppc_regs.h @@ -0,0 +1,96 @@ +#define r0 0 +#define r1 1 +#define r2 2 +#define r3 3 +#define r4 4 +#define r5 5 +#define r6 6 +#define r7 7 +#define r8 8 +#define r9 9 +#define r10 10 +#define r11 11 +#define r12 12 +#define r13 13 +#define r14 14 +#define r15 15 +#define r16 16 +#define r17 17 +#define r18 18 +#define r19 19 +#define r20 20 +#define r21 21 +#define r22 22 +#define r23 23 +#define r24 24 +#define r25 25 +#define r26 26 +#define r27 27 +#define r28 28 +#define r29 29 +#define r30 30 +#define r31 31 +#define f0 0 +#define f1 1 +#define f2 2 +#define f3 3 +#define f4 4 +#define f5 5 +#define f6 6 +#define f7 7 +#define f8 8 +#define f9 9 +#define f10 10 +#define f11 11 +#define f12 12 +#define f13 13 +#define f14 14 +#define f15 15 +#define f16 16 +#define f17 17 +#define f18 18 +#define f19 19 +#define f20 20 +#define f21 21 +#define f22 22 +#define f23 23 +#define f24 24 +#define f25 25 +#define f26 26 +#define f27 27 +#define f28 28 +#define f29 29 +#define f30 30 +#define f31 31 +#define v0 0 +#define v1 1 +#define v2 2 +#define v3 3 +#define v4 4 +#define v5 5 +#define v6 6 +#define v7 7 +#define v8 8 +#define v9 9 +#define v10 10 +#define v11 11 +#define v12 12 +#define v13 13 +#define v14 14 +#define v15 15 +#define v16 16 +#define v17 17 +#define v18 18 +#define v19 19 +#define v20 20 +#define v21 21 +#define v22 22 +#define v23 23 +#define v24 24 +#define v25 25 +#define v26 26 +#define v27 27 +#define v28 28 +#define v29 29 +#define v30 30 +#define v31 31 diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc index f4b06878a58e..c1d2fd07c0d9 100644 --- a/lib/tsan/rtl/tsan_report.cc +++ b/lib/tsan/rtl/tsan_report.cc @@ -111,6 +111,12 @@ static const char *ReportTypeString(ReportType typ) { return ""; } +#if SANITIZER_MAC +static const char *const kInterposedFunctionPrefix = "wrap_"; +#else +static const char *const kInterposedFunctionPrefix = "__interceptor_"; +#endif + void PrintStack(const ReportStack *ent) { if (ent == 0 || ent->frames == 0) { Printf(" [failed to restore the stack]\n\n"); @@ -121,7 +127,7 @@ void PrintStack(const ReportStack *ent) { InternalScopedString res(2 * GetPageSizeCached()); RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info, common_flags()->symbolize_vs_style, - common_flags()->strip_path_prefix, "__interceptor_"); + common_flags()->strip_path_prefix, kInterposedFunctionPrefix); Printf("%s\n", res.data()); } Printf("\n"); @@ -165,9 +171,14 @@ static void PrintLocation(const ReportLocation *loc) { Printf("%s", d.Location()); if (loc->type == ReportLocationGlobal) { const DataInfo &global = loc->global; - Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n", - global.name, global.size, global.start, - StripModuleName(global.module), global.module_offset); + if (global.size != 0) + Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n", + global.name, global.size, global.start, + StripModuleName(global.module), global.module_offset); + else + Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name, + global.start, StripModuleName(global.module), + global.module_offset); } else if (loc->type == ReportLocationHeap) { char thrbuf[kThreadBufSize]; Printf(" Location is heap block of size %zu at %p allocated by %s:\n", @@ -256,10 +267,15 @@ static bool FrameIsInternal(const SymbolizedStack *frame) { if (frame == 0) return false; const char *file = frame->info.file; - return file != 0 && - (internal_strstr(file, "tsan_interceptors.cc") || - internal_strstr(file, "sanitizer_common_interceptors.inc") || - internal_strstr(file, "tsan_interface_")); + const char *module = frame->info.module; + if (file != 0 && + (internal_strstr(file, "tsan_interceptors.cc") || + internal_strstr(file, "sanitizer_common_interceptors.inc") || + internal_strstr(file, "tsan_interface_"))) + return true; + if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_"))) + return true; + return false; } static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) { diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc index 63c356b228a4..4df4db557a24 100644 --- a/lib/tsan/rtl/tsan_rtl.cc +++ b/lib/tsan/rtl/tsan_rtl.cc @@ -44,7 +44,7 @@ extern "C" void __tsan_resume() { namespace __tsan { -#ifndef SANITIZER_GO +#if !defined(SANITIZER_GO) && !SANITIZER_MAC THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); #endif static char ctx_placeholder[sizeof(Context)] ALIGNED(64); @@ -55,12 +55,12 @@ Context *ctx; bool OnFinalize(bool failed); void OnInitialize(); #else -SANITIZER_INTERFACE_ATTRIBUTE -bool WEAK OnFinalize(bool failed) { +SANITIZER_WEAK_CXX_DEFAULT_IMPL +bool OnFinalize(bool failed) { return failed; } -SANITIZER_INTERFACE_ATTRIBUTE -void WEAK OnInitialize() {} +SANITIZER_WEAK_CXX_DEFAULT_IMPL +void OnInitialize() {} #endif static char thread_registry_placeholder[sizeof(ThreadRegistry)]; @@ -99,8 +99,10 @@ Context::Context() , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) + , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) + , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) , fired_suppressions(8) { } @@ -271,8 +273,8 @@ void MapShadow(uptr addr, uptr size) { void MapThreadTrace(uptr addr, uptr size, const char *name) { DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); - CHECK_GE(addr, kTraceMemBeg); - CHECK_LE(addr + size, kTraceMemEnd); + CHECK_GE(addr, TraceMemBeg()); + CHECK_LE(addr + size, TraceMemEnd()); CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); if (addr1 != addr) { @@ -283,9 +285,8 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) { } static void CheckShadowMapping() { - for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) { - const uptr beg = UserRegions[i]; - const uptr end = UserRegions[i + 1]; + uptr beg, end; + for (int i = 0; GetUserRegion(i, &beg, &end); i++) { VPrintf(3, "checking shadow region %p-%p\n", beg, end); for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { for (int x = -1; x <= 1; x++) { @@ -318,10 +319,15 @@ void Initialize(ThreadState *thr) { ctx = new(ctx_placeholder) Context; const char *options = GetEnv(kTsanOptionsEnv); - InitializeFlags(&ctx->flags, options); CacheBinaryName(); + InitializeFlags(&ctx->flags, options); + InitializePlatformEarly(); #ifndef SANITIZER_GO + // Re-exec ourselves if we need to set additional env or command line args. + MaybeReexec(); + InitializeAllocator(); + ReplaceSystemMalloc(); #endif InitializeInterceptors(); CheckShadowMapping(); @@ -417,7 +423,7 @@ int Finalize(ThreadState *thr) { StatOutput(ctx->stat); #endif - return failed ? flags()->exitcode : 0; + return failed ? common_flags()->exitcode : 0; } #ifndef SANITIZER_GO diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h index a13e4b6379f0..04104b162f98 100644 --- a/lib/tsan/rtl/tsan_rtl.h +++ b/lib/tsan/rtl/tsan_rtl.h @@ -54,7 +54,7 @@ namespace __tsan { #ifndef SANITIZER_GO struct MapUnmapCallback; -#ifdef __mips64 +#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) static const uptr kAllocatorSpace = 0; static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kAllocatorRegionSizeLog = 20; @@ -66,7 +66,8 @@ typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0, CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap, MapUnmapCallback> PrimaryAllocator; #else -typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0, +typedef SizeClassAllocator64<Mapping::kHeapMemBeg, + Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0, DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; #endif typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; @@ -410,12 +411,18 @@ struct ThreadState { }; #ifndef SANITIZER_GO +#if SANITIZER_MAC +ThreadState *cur_thread(); +void cur_thread_finalize(); +#else __attribute__((tls_model("initial-exec"))) extern THREADLOCAL char cur_thread_placeholder[]; INLINE ThreadState *cur_thread() { return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); } -#endif +INLINE void cur_thread_finalize() { } +#endif // SANITIZER_MAC +#endif // SANITIZER_GO class ThreadContext : public ThreadContextBase { public: @@ -458,7 +465,7 @@ struct RacyAddress { struct FiredSuppression { ReportType type; - uptr pc; + uptr pc_or_addr; Suppression *supp; }; @@ -480,9 +487,11 @@ struct Context { ThreadRegistry *thread_registry; + Mutex racy_mtx; Vector<RacyStacks> racy_stacks; Vector<RacyAddress> racy_addresses; // Number of fired suppressions may be large enough. + Mutex fired_suppressions_mtx; InternalMmapVector<FiredSuppression> fired_suppressions; DDetector *dd; @@ -587,8 +596,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc); void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace); +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); @@ -708,7 +716,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); // The trick is that the call preserves all registers and the compiler // does not treat it as a call. // If it does not work for you, use normal call. -#if !SANITIZER_DEBUG && defined(__x86_64__) +#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC // The caller may not create the stack frame for itself at all, // so we create a reserve stack frame for it (1024b must be enough). #define HACKY_CALL(f) \ @@ -754,11 +762,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, #ifndef SANITIZER_GO uptr ALWAYS_INLINE HeapEnd() { -#if SANITIZER_CAN_USE_ALLOCATOR64 - return kHeapMemEnd + PrimaryAllocator::AdditionalSize(); -#else - return kHeapMemEnd; -#endif + return HeapMemEnd() + PrimaryAllocator::AdditionalSize(); } #endif diff --git a/lib/tsan/rtl/tsan_rtl_aarch64.S b/lib/tsan/rtl/tsan_rtl_aarch64.S new file mode 100644 index 000000000000..9cea3cf02800 --- /dev/null +++ b/lib/tsan/rtl/tsan_rtl_aarch64.S @@ -0,0 +1,206 @@ +#include "sanitizer_common/sanitizer_asm.h" +.section .text + +.hidden __tsan_setjmp +.comm _ZN14__interception11real_setjmpE,8,8 +.type setjmp, @function +setjmp: + CFI_STARTPROC + + // save env parameters for function call + stp x29, x30, [sp, -32]! + CFI_DEF_CFA_OFFSET (32) + CFI_OFFSET (29, -32) + CFI_OFFSET (30, -24) + + // Adjust the SP for previous frame + add x29, sp, 0 + CFI_DEF_CFA_REGISTER (29) + + // Save jmp_buf + str x19, [sp, 16] + CFI_OFFSET (19, -16) + mov x19, x0 + + // SP pointer mangling (see glibc setjmp) + adrp x2, :got:__pointer_chk_guard + ldr x2, [x2, #:got_lo12:__pointer_chk_guard] + add x0, x29, 32 + ldr x2, [x2] + eor x1, x2, x0 + + // call tsan interceptor + bl __tsan_setjmp + + // restore env parameter + mov x0, x19 + ldr x19, [sp, 16] + ldp x29, x30, [sp], 32 + CFI_RESTORE (30) + CFI_RESTORE (19) + CFI_DEF_CFA (31, 0) + + // tail jump to libc setjmp + adrp x1, :got:_ZN14__interception11real_setjmpE + ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE] + ldr x1, [x1] + br x1 + + CFI_ENDPROC +.size setjmp, .-setjmp + +.comm _ZN14__interception12real__setjmpE,8,8 +.globl _setjmp +.type _setjmp, @function +_setjmp: + CFI_STARTPROC + + // save env parameters for function call + stp x29, x30, [sp, -32]! + CFI_DEF_CFA_OFFSET (32) + CFI_OFFSET (29, -32) + CFI_OFFSET (30, -24) + + // Adjust the SP for previous frame + add x29, sp, 0 + CFI_DEF_CFA_REGISTER (29) + + // Save jmp_buf + str x19, [sp, 16] + CFI_OFFSET (19, -16) + mov x19, x0 + + // SP pointer mangling (see glibc setjmp) + adrp x2, :got:__pointer_chk_guard + ldr x2, [x2, #:got_lo12:__pointer_chk_guard] + add x0, x29, 32 + ldr x2, [x2] + eor x1, x2, x0 + + // call tsan interceptor + bl __tsan_setjmp + + // Restore jmp_buf parameter + mov x0, x19 + ldr x19, [sp, 16] + ldp x29, x30, [sp], 32 + CFI_RESTORE (30) + CFI_RESTORE (19) + CFI_DEF_CFA (31, 0) + + // tail jump to libc setjmp + adrp x1, :got:_ZN14__interception12real__setjmpE + ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE] + ldr x1, [x1] + br x1 + + CFI_ENDPROC +.size _setjmp, .-_setjmp + +.comm _ZN14__interception14real_sigsetjmpE,8,8 +.globl sigsetjmp +.type sigsetjmp, @function +sigsetjmp: + CFI_STARTPROC + + // save env parameters for function call + stp x29, x30, [sp, -32]! + CFI_DEF_CFA_OFFSET (32) + CFI_OFFSET (29, -32) + CFI_OFFSET (30, -24) + + // Adjust the SP for previous frame + add x29, sp, 0 + CFI_DEF_CFA_REGISTER (29) + + // Save jmp_buf and savesigs + stp x19, x20, [sp, 16] + CFI_OFFSET (19, -16) + CFI_OFFSET (20, -8) + mov w20, w1 + mov x19, x0 + + // SP pointer mangling (see glibc setjmp) + adrp x2, :got:__pointer_chk_guard + ldr x2, [x2, #:got_lo12:__pointer_chk_guard] + add x0, x29, 32 + ldr x2, [x2] + eor x1, x2, x0 + + // call tsan interceptor + bl __tsan_setjmp + + // restore env parameter + mov w1, w20 + mov x0, x19 + ldp x19, x20, [sp, 16] + ldp x29, x30, [sp], 32 + CFI_RESTORE (30) + CFI_RESTORE (29) + CFI_RESTORE (19) + CFI_RESTORE (20) + CFI_DEF_CFA (31, 0) + + // tail jump to libc sigsetjmp + adrp x2, :got:_ZN14__interception14real_sigsetjmpE + ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE] + ldr x2, [x2] + br x2 + CFI_ENDPROC +.size sigsetjmp, .-sigsetjmp + +.comm _ZN14__interception16real___sigsetjmpE,8,8 +.globl __sigsetjmp +.type __sigsetjmp, @function +__sigsetjmp: + CFI_STARTPROC + + // save env parameters for function call + stp x29, x30, [sp, -32]! + CFI_DEF_CFA_OFFSET (32) + CFI_OFFSET (29, -32) + CFI_OFFSET (30, -24) + + // Adjust the SP for previous frame + add x29, sp, 0 + CFI_DEF_CFA_REGISTER (29) + + // Save jmp_buf and savesigs + stp x19, x20, [sp, 16] + CFI_OFFSET (19, -16) + CFI_OFFSET (20, -8) + mov w20, w1 + mov x19, x0 + + // SP pointer mangling (see glibc setjmp) + adrp x2, :got:__pointer_chk_guard + ldr x2, [x2, #:got_lo12:__pointer_chk_guard] + add x0, x29, 32 + ldr x2, [x2] + eor x1, x2, x0 + + // call tsan interceptor + bl __tsan_setjmp + + mov w1, w20 + mov x0, x19 + ldp x19, x20, [sp, 16] + ldp x29, x30, [sp], 32 + CFI_RESTORE (30) + CFI_RESTORE (29) + CFI_RESTORE (19) + CFI_RESTORE (20) + CFI_DEF_CFA (31, 0) + + // tail jump to libc __sigsetjmp + adrp x2, :got:_ZN14__interception16real___sigsetjmpE + ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE] + ldr x2, [x2] + br x2 + CFI_ENDPROC +.size __sigsetjmp, .-__sigsetjmp + +#if defined(__linux__) +/* We do not need executable stack. */ +.section .note.GNU-stack,"",@progbits +#endif diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S index 8db62f9013a3..caa832375e52 100644 --- a/lib/tsan/rtl/tsan_rtl_amd64.S +++ b/lib/tsan/rtl/tsan_rtl_amd64.S @@ -1,9 +1,13 @@ #include "sanitizer_common/sanitizer_asm.h" +#if !defined(__APPLE__) .section .text +#else +.section __TEXT,__text +#endif -.hidden __tsan_trace_switch -.globl __tsan_trace_switch_thunk -__tsan_trace_switch_thunk: +ASM_HIDDEN(__tsan_trace_switch) +.globl ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk) +ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk): CFI_STARTPROC # Save scratch registers. push %rax @@ -42,7 +46,7 @@ __tsan_trace_switch_thunk: shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp - call __tsan_trace_switch + call ASM_TSAN_SYMBOL(__tsan_trace_switch) # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp @@ -81,9 +85,9 @@ __tsan_trace_switch_thunk: ret CFI_ENDPROC -.hidden __tsan_report_race -.globl __tsan_report_race_thunk -__tsan_report_race_thunk: +ASM_HIDDEN(__tsan_report_race) +.globl ASM_TSAN_SYMBOL(__tsan_report_race_thunk) +ASM_TSAN_SYMBOL(__tsan_report_race_thunk): CFI_STARTPROC # Save scratch registers. push %rax @@ -122,7 +126,7 @@ __tsan_report_race_thunk: shr $4, %rsp # clear 4 lsb, align to 16 shl $4, %rsp - call __tsan_report_race + call ASM_TSAN_SYMBOL(__tsan_report_race) # Unalign stack frame back. mov %rbx, %rsp # restore the original rsp @@ -161,11 +165,13 @@ __tsan_report_race_thunk: ret CFI_ENDPROC -.hidden __tsan_setjmp +ASM_HIDDEN(__tsan_setjmp) +#if !defined(__APPLE__) .comm _ZN14__interception11real_setjmpE,8,8 -.globl setjmp -.type setjmp, @function -setjmp: +#endif +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp): CFI_STARTPROC // save env parameter push %rdi @@ -175,29 +181,38 @@ setjmp: #if defined(__FreeBSD__) lea 8(%rsp), %rdi mov %rdi, %rsi -#else +#elif defined(__APPLE__) + lea 16(%rsp), %rdi + mov %rdi, %rsi +#elif defined(__linux__) lea 16(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi +#else +# error "Unknown platform" #endif // call tsan interceptor - call __tsan_setjmp + call ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc setjmp movl $0, %eax +#if !defined(__APPLE__) movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) +#else + jmp ASM_TSAN_SYMBOL(setjmp) +#endif CFI_ENDPROC -.size setjmp, .-setjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)) .comm _ZN14__interception12real__setjmpE,8,8 -.globl _setjmp -.type _setjmp, @function -_setjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp): CFI_STARTPROC // save env parameter push %rdi @@ -207,29 +222,38 @@ _setjmp: #if defined(__FreeBSD__) lea 8(%rsp), %rdi mov %rdi, %rsi -#else +#elif defined(__APPLE__) + lea 16(%rsp), %rdi + mov %rdi, %rsi +#elif defined(__linux__) lea 16(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi +#else +# error "Unknown platform" #endif // call tsan interceptor - call __tsan_setjmp + call ASM_TSAN_SYMBOL(__tsan_setjmp) // restore env parameter pop %rdi CFI_ADJUST_CFA_OFFSET(-8) CFI_RESTORE(%rdi) // tail jump to libc setjmp movl $0, %eax +#if !defined(__APPLE__) movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) +#else + jmp ASM_TSAN_SYMBOL(_setjmp) +#endif CFI_ENDPROC -.size _setjmp, .-_setjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)) .comm _ZN14__interception14real_sigsetjmpE,8,8 -.globl sigsetjmp -.type sigsetjmp, @function -sigsetjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp): CFI_STARTPROC // save env parameter push %rdi @@ -246,14 +270,19 @@ sigsetjmp: #if defined(__FreeBSD__) lea 24(%rsp), %rdi mov %rdi, %rsi -#else +#elif defined(__APPLE__) + lea 32(%rsp), %rdi + mov %rdi, %rsi +#elif defined(__linux__) lea 32(%rsp), %rdi mov %rdi, %rsi xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) rol $0x11, %rsi +#else +# error "Unknown platform" #endif // call tsan interceptor - call __tsan_setjmp + call ASM_TSAN_SYMBOL(__tsan_setjmp) // unalign stack frame add $8, %rsp CFI_ADJUST_CFA_OFFSET(-8) @@ -267,15 +296,20 @@ sigsetjmp: CFI_RESTORE(%rdi) // tail jump to libc sigsetjmp movl $0, %eax +#if !defined(__APPLE__) movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) +#else + jmp ASM_TSAN_SYMBOL(sigsetjmp) +#endif CFI_ENDPROC -.size sigsetjmp, .-sigsetjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)) +#if !defined(__APPLE__) .comm _ZN14__interception16real___sigsetjmpE,8,8 -.globl __sigsetjmp -.type __sigsetjmp, @function -__sigsetjmp: +.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp) +ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) +ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp): CFI_STARTPROC // save env parameter push %rdi @@ -299,7 +333,7 @@ __sigsetjmp: rol $0x11, %rsi #endif // call tsan interceptor - call __tsan_setjmp + call ASM_TSAN_SYMBOL(__tsan_setjmp) // unalign stack frame add $8, %rsp CFI_ADJUST_CFA_OFFSET(-8) @@ -316,7 +350,8 @@ __sigsetjmp: movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx jmp *(%rdx) CFI_ENDPROC -.size __sigsetjmp, .-__sigsetjmp +ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)) +#endif // !defined(__APPLE__) #if defined(__FreeBSD__) || defined(__linux__) /* We do not need executable stack. */ diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index 09180d88a6fb..62ab7aa6b2b4 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -472,7 +472,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; - if (stk) { + if (stk && stk != 0xffffffff) { rep.AddStack(StackDepotGet(stk), true); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), diff --git a/lib/tsan/rtl/tsan_rtl_ppc64.S b/lib/tsan/rtl/tsan_rtl_ppc64.S new file mode 100644 index 000000000000..8285e21aa1ec --- /dev/null +++ b/lib/tsan/rtl/tsan_rtl_ppc64.S @@ -0,0 +1,288 @@ +#include "tsan_ppc_regs.h" + + .section .text + .hidden __tsan_setjmp + .globl _setjmp + .type _setjmp, @function + .align 4 +#if _CALL_ELF == 2 +_setjmp: +#else + .section ".opd","aw" + .align 3 +_setjmp: + .quad .L._setjmp,.TOC.@tocbase,0 + .previous +#endif +.L._setjmp: + mflr r0 + stdu r1,-48(r1) + std r2,24(r1) + std r3,32(r1) + std r0,40(r1) + // r3 is the original stack pointer. + addi r3,r1,48 + // r4 is the mangled stack pointer (see glibc) + ld r4,-28696(r13) + xor r4,r3,r4 + // Materialize a TOC in case we were called from libc. + // For big-endian, we load the TOC from the OPD. For little- + // endian, we use the .TOC. symbol to find it. + nop + bcl 20,31,0f +0: + mflr r2 +#if _CALL_ELF == 2 + addis r2,r2,.TOC.-0b@ha + addi r2,r2,.TOC.-0b@l +#else + addis r2,r2,_setjmp-0b@ha + addi r2,r2,_setjmp-0b@l + ld r2,8(r2) +#endif + // Call the interceptor. + bl __tsan_setjmp + nop + // Restore regs needed for setjmp. + ld r3,32(r1) + ld r0,40(r1) + // Emulate the real setjmp function. We do this because we can't + // perform a sibcall: The real setjmp function trashes the TOC + // pointer, and with a sibcall we have no way to restore it. + // This way we can make sure our caller's stack pointer and + // link register are saved correctly in the jmpbuf. + ld r6,-28696(r13) + addi r5,r1,48 // original stack ptr of caller + xor r5,r6,r5 + std r5,0(r3) // mangled stack ptr of caller + ld r5,24(r1) + std r5,8(r3) // caller's saved TOC pointer + xor r0,r6,r0 + std r0,16(r3) // caller's mangled return address + mfcr r0 + // Nonvolatiles. + std r14,24(r3) + stfd f14,176(r3) + stw r0,172(r3) // CR + std r15,32(r3) + stfd f15,184(r3) + std r16,40(r3) + stfd f16,192(r3) + std r17,48(r3) + stfd f17,200(r3) + std r18,56(r3) + stfd f18,208(r3) + std r19,64(r3) + stfd f19,216(r3) + std r20,72(r3) + stfd f20,224(r3) + std r21,80(r3) + stfd f21,232(r3) + std r22,88(r3) + stfd f22,240(r3) + std r23,96(r3) + stfd f23,248(r3) + std r24,104(r3) + stfd f24,256(r3) + std r25,112(r3) + stfd f25,264(r3) + std r26,120(r3) + stfd f26,272(r3) + std r27,128(r3) + stfd f27,280(r3) + std r28,136(r3) + stfd f28,288(r3) + std r29,144(r3) + stfd f29,296(r3) + std r30,152(r3) + stfd f30,304(r3) + std r31,160(r3) + stfd f31,312(r3) + addi r5,r3,320 + mfspr r0,256 + stw r0,168(r3) // VRSAVE + addi r6,r5,16 + stvx v20,0,r5 + addi r5,r5,32 + stvx v21,0,r6 + addi r6,r6,32 + stvx v22,0,r5 + addi r5,r5,32 + stvx v23,0,r6 + addi r6,r6,32 + stvx v24,0,r5 + addi r5,r5,32 + stvx v25,0,r6 + addi r6,r6,32 + stvx v26,0,r5 + addi r5,r5,32 + stvx v27,0,r6 + addi r6,r6,32 + stvx v28,0,r5 + addi r5,r5,32 + stvx v29,0,r6 + addi r6,r6,32 + stvx v30,0,r5 + stvx v31,0,r6 + // Clear the "mask-saved" slot. + li r4,0 + stw r4,512(r3) + // Restore TOC, LR, and stack and return to caller. + ld r2,24(r1) + ld r0,40(r1) + addi r1,r1,48 + li r3,0 // This is the setjmp return path + mtlr r0 + blr + .size _setjmp, .-.L._setjmp + + .globl setjmp + .type setjmp, @function + .align 4 +setjmp: + b _setjmp + .size setjmp, .-setjmp + + // sigsetjmp is like setjmp, except that the mask in r4 needs + // to be saved at offset 512 of the jump buffer. + .globl __sigsetjmp + .type __sigsetjmp, @function + .align 4 +#if _CALL_ELF == 2 +__sigsetjmp: +#else + .section ".opd","aw" + .align 3 +__sigsetjmp: + .quad .L.__sigsetjmp,.TOC.@tocbase,0 + .previous +#endif +.L.__sigsetjmp: + mflr r0 + stdu r1,-64(r1) + std r2,24(r1) + std r3,32(r1) + std r4,40(r1) + std r0,48(r1) + // r3 is the original stack pointer. + addi r3,r1,64 + // r4 is the mangled stack pointer (see glibc) + ld r4,-28696(r13) + xor r4,r3,r4 + // Materialize a TOC in case we were called from libc. + // For big-endian, we load the TOC from the OPD. For little- + // endian, we use the .TOC. symbol to find it. + nop + bcl 20,31,1f +1: + mflr r2 +#if _CALL_ELF == 2 + addis r2,r2,.TOC.-1b@ha + addi r2,r2,.TOC.-1b@l +#else + addis r2,r2,_setjmp-1b@ha + addi r2,r2,_setjmp-1b@l + ld r2,8(r2) +#endif + // Call the interceptor. + bl __tsan_setjmp + nop + // Restore regs needed for __sigsetjmp. + ld r3,32(r1) + ld r4,40(r1) + ld r0,48(r1) + // Emulate the real sigsetjmp function. We do this because we can't + // perform a sibcall: The real sigsetjmp function trashes the TOC + // pointer, and with a sibcall we have no way to restore it. + // This way we can make sure our caller's stack pointer and + // link register are saved correctly in the jmpbuf. + ld r6,-28696(r13) + addi r5,r1,64 // original stack ptr of caller + xor r5,r6,r5 + std r5,0(r3) // mangled stack ptr of caller + ld r5,24(r1) + std r5,8(r3) // caller's saved TOC pointer + xor r0,r6,r0 + std r0,16(r3) // caller's mangled return address + mfcr r0 + // Nonvolatiles. + std r14,24(r3) + stfd f14,176(r3) + stw r0,172(r3) // CR + std r15,32(r3) + stfd f15,184(r3) + std r16,40(r3) + stfd f16,192(r3) + std r17,48(r3) + stfd f17,200(r3) + std r18,56(r3) + stfd f18,208(r3) + std r19,64(r3) + stfd f19,216(r3) + std r20,72(r3) + stfd f20,224(r3) + std r21,80(r3) + stfd f21,232(r3) + std r22,88(r3) + stfd f22,240(r3) + std r23,96(r3) + stfd f23,248(r3) + std r24,104(r3) + stfd f24,256(r3) + std r25,112(r3) + stfd f25,264(r3) + std r26,120(r3) + stfd f26,272(r3) + std r27,128(r3) + stfd f27,280(r3) + std r28,136(r3) + stfd f28,288(r3) + std r29,144(r3) + stfd f29,296(r3) + std r30,152(r3) + stfd f30,304(r3) + std r31,160(r3) + stfd f31,312(r3) + addi r5,r3,320 + mfspr r0,256 + stw r0,168(r3) // VRSAVE + addi r6,r5,16 + stvx v20,0,r5 + addi r5,r5,32 + stvx v21,0,r6 + addi r6,r6,32 + stvx v22,0,r5 + addi r5,r5,32 + stvx v23,0,r6 + addi r6,r6,32 + stvx v24,0,r5 + addi r5,r5,32 + stvx v25,0,r6 + addi r6,r6,32 + stvx v26,0,r5 + addi r5,r5,32 + stvx v27,0,r6 + addi r6,r6,32 + stvx v28,0,r5 + addi r5,r5,32 + stvx v29,0,r6 + addi r6,r6,32 + stvx v30,0,r5 + stvx v31,0,r6 + // Save into the "mask-saved" slot. + stw r4,512(r3) + // Restore TOC, LR, and stack and return to caller. + ld r2,24(r1) + ld r0,48(r1) + addi r1,r1,64 + li r3,0 // This is the sigsetjmp return path + mtlr r0 + blr + .size __sigsetjmp, .-.L.__sigsetjmp + + .globl sigsetjmp + .type sigsetjmp, @function + .align 4 +sigsetjmp: + b __sigsetjmp + .size sigsetjmp, .-sigsetjmp diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc index dc9438e6371b..5aff6ca56adf 100644 --- a/lib/tsan/rtl/tsan_rtl_report.cc +++ b/lib/tsan/rtl/tsan_rtl_report.cc @@ -49,8 +49,8 @@ void TsanCheckFailed(const char *file, int line, const char *cond, #ifdef TSAN_EXTERNAL_HOOKS bool OnReport(const ReportDesc *rep, bool suppressed); #else -SANITIZER_INTERFACE_ATTRIBUTE -bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { +SANITIZER_WEAK_CXX_DEFAULT_IMPL +bool OnReport(const ReportDesc *rep, bool suppressed) { (void)rep; return suppressed; } @@ -186,7 +186,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { return; } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); - ReportThread *rt = new(mem) ReportThread(); + ReportThread *rt = new(mem) ReportThread; rep_->threads.PushBack(rt); rt->id = tctx->tid; rt->pid = tctx->os_id; @@ -200,16 +200,16 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { } #ifndef SANITIZER_GO +static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { + int unique_id = *(int *)arg; + return tctx->unique_id == (u32)unique_id; +} + static ThreadContext *FindThreadByUidLocked(int unique_id) { ctx->thread_registry->CheckLocked(); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = static_cast<ThreadContext*>( - ctx->thread_registry->GetThreadLocked(i)); - if (tctx && tctx->unique_id == (u32)unique_id) { - return tctx; - } - } - return 0; + return static_cast<ThreadContext *>( + ctx->thread_registry->FindThreadContextLocked( + FindThreadByUidLockedCallback, &unique_id)); } static ThreadContext *FindThreadByTidLocked(int tid) { @@ -256,7 +256,7 @@ void ScopedReport::AddMutex(const SyncVar *s) { return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); - ReportMutex *rm = new(mem) ReportMutex(); + ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = s->uid; rm->addr = s->addr; @@ -289,7 +289,7 @@ void ScopedReport::AddDeadMutex(u64 id) { return; } void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); - ReportMutex *rm = new(mem) ReportMutex(); + ReportMutex *rm = new(mem) ReportMutex; rep_->mutexes.PushBack(rm); rm->id = id; rm->addr = 0; @@ -369,27 +369,20 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. - ctx->thread_registry->CheckLocked(); - ThreadContext *tctx = static_cast<ThreadContext*>( - ctx->thread_registry->GetThreadLocked(tid)); - if (tctx == 0) - return; - if (tctx->status != ThreadStatusRunning - && tctx->status != ThreadStatusFinished - && tctx->status != ThreadStatusDead) - return; - Trace* trace = ThreadTrace(tctx->tid); - Lock l(&trace->mtx); + Trace* trace = ThreadTrace(tid); + ReadLock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; - if (epoch < hdr->epoch0) + if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) return; + CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); const u64 epoch0 = RoundDown(epoch, TraceSize()); const u64 eend = epoch % TraceSize(); const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); - InternalScopedBuffer<uptr> stack(kShadowStackSize); + Vector<uptr> stack(MBlockReportStack); + stack.Resize(hdr->stack0.size + 64); for (uptr i = 0; i < hdr->stack0.size; i++) { stack[i] = hdr->stack0.trace[i]; DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); @@ -406,6 +399,8 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { + if (stack.Size() < pos + 2) + stack.Resize(pos + 2); stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { if (pos > 0) @@ -428,50 +423,58 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, if (pos == 0 && stack[0] == 0) return; pos++; - stk->Init(stack.data(), pos); + stk->Init(&stack[0], pos); } static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; - if (flags()->suppress_equal_stacks) { - hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); - for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { - if (hash == ctx->racy_stacks[i]) { - DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); - equal_stack = true; - break; - } - } - } bool equal_address = false; RacyAddress ra0 = {addr_min, addr_max}; - if (flags()->suppress_equal_addresses) { - for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { - RacyAddress ra2 = ctx->racy_addresses[i]; - uptr maxbeg = max(ra0.addr_min, ra2.addr_min); - uptr minend = min(ra0.addr_max, ra2.addr_max); - if (maxbeg < minend) { - DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); - equal_address = true; - break; + { + ReadLock lock(&ctx->racy_mtx); + if (flags()->suppress_equal_stacks) { + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { + if (hash == ctx->racy_stacks[i]) { + VPrintf(2, + "ThreadSanitizer: suppressing report as doubled (stack)\n"); + equal_stack = true; + break; + } + } + } + if (flags()->suppress_equal_addresses) { + for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { + RacyAddress ra2 = ctx->racy_addresses[i]; + uptr maxbeg = max(ra0.addr_min, ra2.addr_min); + uptr minend = min(ra0.addr_max, ra2.addr_max); + if (maxbeg < minend) { + VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); + equal_address = true; + break; + } } } } - if (equal_stack || equal_address) { - if (!equal_stack) - ctx->racy_stacks.PushBack(hash); - if (!equal_address) - ctx->racy_addresses.PushBack(ra0); - return true; + if (!equal_stack && !equal_address) + return false; + if (!equal_stack) { + Lock lock(&ctx->racy_mtx); + ctx->racy_stacks.PushBack(hash); } - return false; + if (!equal_address) { + Lock lock(&ctx->racy_mtx); + ctx->racy_addresses.PushBack(ra0); + } + return true; } static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { + Lock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { RacyStacks hash; hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); @@ -485,26 +488,29 @@ static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { - atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); + if (!flags()->report_bugs) + return false; + atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); Suppression *supp = 0; - uptr suppress_pc = 0; - for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp); - if (suppress_pc != 0) { - FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; + uptr pc_or_addr = 0; + for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); + if (pc_or_addr != 0) { + Lock lock(&ctx->fired_suppressions_mtx); + FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; ctx->fired_suppressions.push_back(s); } { bool old_is_freeing = thr->is_freeing; thr->is_freeing = false; - bool suppressed = OnReport(rep, suppress_pc != 0); + bool suppressed = OnReport(rep, pc_or_addr != 0); thr->is_freeing = old_is_freeing; if (suppressed) return false; @@ -512,20 +518,20 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) { PrintReport(rep); ctx->nreported++; if (flags()->halt_on_error) - internal__exit(flags()->exitcode); + Die(); return true; } -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace) { +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; - if (trace.trace[j] == s->pc) { + if (trace.trace[j] == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } @@ -533,16 +539,15 @@ bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, return false; } -static bool IsFiredSuppression(Context *ctx, - const ScopedReport &srep, - uptr addr) { +static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; FiredSuppression *s = &ctx->fired_suppressions[k]; - if (addr == s->pc) { + if (addr == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } @@ -595,8 +600,6 @@ void ReportRace(ThreadState *thr) { return; } - ThreadRegistryLock l0(ctx->thread_registry); - ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) typ = ReportTypeVptrUseAfterFree; @@ -604,29 +607,35 @@ void ReportRace(ThreadState *thr) { typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; - ScopedReport rep(typ); - if (IsFiredSuppression(ctx, rep, addr)) + + if (IsFiredSuppression(ctx, typ, addr)) return; + const uptr kMop = 2; VarSizeStackTrace traces[kMop]; const uptr toppc = TraceTopPC(thr); ObtainCurrentStack(thr, toppc, &traces[0]); - if (IsFiredSuppression(ctx, rep, traces[0])) + if (IsFiredSuppression(ctx, typ, traces[0])) return; - InternalScopedBuffer<MutexSet> mset2(1); - new(mset2.data()) MutexSet(); + + // MutexSet is too large to live on stack. + Vector<u64> mset_buffer(MBlockScopedBuf); + mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); + MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); + Shadow s2(thr->racy_state[1]); - RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); - if (IsFiredSuppression(ctx, rep, traces[1])) + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2); + if (IsFiredSuppression(ctx, typ, traces[1])) return; if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; + ThreadRegistryLock l0(ctx->thread_registry); + ScopedReport rep(typ); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, traces[i], - i == 0 ? &thr->mset : mset2.data()); + rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc index 66c78cfdd7c0..dcae255f7643 100644 --- a/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/lib/tsan/rtl/tsan_rtl_thread.cc @@ -55,6 +55,8 @@ void ThreadContext::OnCreated(void *arg) { if (tid == 0) return; OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); + if (!args->thr) // GCD workers don't have a parent thread. + return; args->thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); @@ -231,8 +233,10 @@ int ThreadCount(ThreadState *thr) { int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { StatInc(thr, StatThreadCreate); OnCreatedArgs args = { thr, pc }; - int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); - DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); + u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers. + int tid = + ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args); + DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid); StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); return tid; } diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc index 15fa43d6f8a1..a5cca9679582 100644 --- a/lib/tsan/rtl/tsan_stat.cc +++ b/lib/tsan/rtl/tsan_stat.cc @@ -164,8 +164,9 @@ void StatOutput(u64 *stat) { name[StatMtxAtExit] = " Atexit "; name[StatMtxAnnotations] = " Annotations "; name[StatMtxMBlock] = " MBlock "; - name[StatMtxJavaMBlock] = " JavaMBlock "; name[StatMtxDeadlockDetector] = " DeadlockDetector "; + name[StatMtxFired] = " FiredSuppressions "; + name[StatMtxRacy] = " RacyStacks "; name[StatMtxFD] = " FD "; Printf("Statistics:\n"); diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h index 0bd949ed1563..8ea32048e147 100644 --- a/lib/tsan/rtl/tsan_stat.h +++ b/lib/tsan/rtl/tsan_stat.h @@ -169,8 +169,9 @@ enum StatType { StatMtxAnnotations, StatMtxAtExit, StatMtxMBlock, - StatMtxJavaMBlock, StatMtxDeadlockDetector, + StatMtxFired, + StatMtxRacy, StatMtxFD, // This must be the last. diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc index e382f21f0dff..8754b61c60cd 100644 --- a/lib/tsan/rtl/tsan_suppressions.cc +++ b/lib/tsan/rtl/tsan_suppressions.cc @@ -34,7 +34,8 @@ static const char *const std_suppressions = "race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n"; // Can be overriden in frontend. -extern "C" const char *WEAK __tsan_default_suppressions() { +SANITIZER_WEAK_DEFAULT_IMPL +const char *__tsan_default_suppressions() { return 0; } #endif @@ -100,8 +101,8 @@ static uptr IsSuppressed(const char *stype, const AddressInfo &info, if (suppression_ctx->Match(info.function, stype, sp) || suppression_ctx->Match(info.file, stype, sp) || suppression_ctx->Match(info.module, stype, sp)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ); - (*sp)->hit_count++; + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ); + atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed); return info.address; } return 0; @@ -138,8 +139,8 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { const DataInfo &global = loc->global; if (suppression_ctx->Match(global.name, stype, &s) || suppression_ctx->Match(global.module, stype, &s)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); - s->hit_count++; + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ); + atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed); *sp = s; return global.start; } @@ -154,7 +155,7 @@ void PrintMatchedSuppressions() { return; int hit_count = 0; for (uptr i = 0; i < matched.size(); i++) - hit_count += matched[i]->hit_count; + hit_count += atomic_load_relaxed(&matched[i]->hit_count); Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count, (int)internal_getpid()); for (uptr i = 0; i < matched.size(); i++) { diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc index a6b9bca0501d..b2423951795f 100644 --- a/lib/tsan/rtl/tsan_symbolize.cc +++ b/lib/tsan/rtl/tsan_symbolize.cc @@ -38,10 +38,10 @@ void ExitSymbolizer() { // May be overriden by JIT/JAVA/etc, // whatever produces PCs marked with kExternalPCBit. -extern "C" bool WEAK __tsan_symbolize_external(uptr pc, - char *func_buf, uptr func_siz, - char *file_buf, uptr file_siz, - int *line, int *col) { +SANITIZER_WEAK_DEFAULT_IMPL +bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz, + char *file_buf, uptr file_siz, int *line, + int *col) { return false; } @@ -71,7 +71,7 @@ ReportLocation *SymbolizeData(uptr addr) { if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) return 0; ReportLocation *ent = ReportLocation::New(ReportLocationGlobal); - ent->global = info; + internal_memcpy(&ent->global, &info, sizeof(info)); return ent; } diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h index 2d12cdff8b2f..f07ea3b9776b 100644 --- a/lib/tsan/rtl/tsan_sync.h +++ b/lib/tsan/rtl/tsan_sync.h @@ -86,9 +86,9 @@ class MetaMap { void OnThreadIdle(ThreadState *thr); private: - static const u32 kFlagMask = 3 << 30; - static const u32 kFlagBlock = 1 << 30; - static const u32 kFlagSync = 2 << 30; + static const u32 kFlagMask = 3u << 30; + static const u32 kFlagBlock = 1u << 30; + static const u32 kFlagSync = 2u << 30; typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc; typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc; BlockAlloc block_alloc_; |