summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc7
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc125
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc37
-rw-r--r--lib/tsan/rtl/tsan_interface.h13
-rw-r--r--lib/tsan/rtl/tsan_malloc_mac.cc10
-rw-r--r--lib/tsan/rtl/tsan_mman.cc50
-rw-r--r--lib/tsan/rtl/tsan_new_delete.cc12
-rw-r--r--lib/tsan/rtl/tsan_platform.h105
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc21
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc59
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc28
-rw-r--r--lib/tsan/rtl/tsan_rtl.h6
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc6
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_stack_trace.cc5
-rw-r--r--lib/tsan/rtl/tsan_stack_trace.h4
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc2
-rw-r--r--lib/tsan/rtl/tsan_symbolize.cc41
-rw-r--r--lib/tsan/rtl/tsan_sync.cc3
19 files changed, 395 insertions, 143 deletions
diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc
index a44b13632c61..9e5465d37557 100644
--- a/lib/tsan/rtl/tsan_debugging.cc
+++ b/lib/tsan/rtl/tsan_debugging.cc
@@ -83,6 +83,13 @@ int __tsan_get_report_data(void *report, const char **description, int *count,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *tag = rep->tag;
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_stack(void *report, uptr idx, void **trace,
uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index d92976ad6e9e..901997b3e85d 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -569,10 +569,8 @@ TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
-#define TSAN_STRING_SETJMP_(x) # x
-#define TSAN_STRING_SETJMP__(x) TSAN_STRING_SETJMP_(x)
-#define TSAN_STRING_SETJMP TSAN_STRING_SETJMP__(setjmp_symname)
-#define TSAN_STRING_SIGSETJMP TSAN_STRING_SETJMP__(sigsetjmp_symname)
+#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
+#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
// Not called. Merely to satisfy TSAN_INTERCEPT().
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
@@ -597,7 +595,7 @@ extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
return 0;
}
-#if !SANITIEZER_NETBSD
+#if !SANITIZER_NETBSD
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
int __interceptor___sigsetjmp(void *env);
extern "C" int __interceptor___sigsetjmp(void *env) {
@@ -609,13 +607,13 @@ extern "C" int __interceptor___sigsetjmp(void *env) {
extern "C" int setjmp_symname(void *env);
extern "C" int _setjmp(void *env);
extern "C" int sigsetjmp_symname(void *env);
-#if !SANITIEZER_NETBSD
+#if !SANITIZER_NETBSD
extern "C" int __sigsetjmp(void *env);
#endif
DEFINE_REAL(int, setjmp_symname, void *env)
DEFINE_REAL(int, _setjmp, void *env)
DEFINE_REAL(int, sigsetjmp_symname, void *env)
-#if !SANITIEZER_NETBSD
+#if !SANITIZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
#endif
#endif // SANITIZER_MAC
@@ -762,16 +760,14 @@ static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
return true;
}
-TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
- int fd, OFF_T off) {
- SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
- if (!fix_mmap_addr(&addr, sz, flags))
- return MAP_FAILED;
- void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
+template <class Mmap>
+static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
+ void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
+ if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
+ void *res = real_mmap(addr, sz, prot, flags, fd, off);
if (res != MAP_FAILED) {
- if (fd > 0)
- FdAccess(thr, pc, fd);
-
+ if (fd > 0) FdAccess(thr, pc, fd);
if (thr->ignore_reads_and_writes == 0)
MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
else
@@ -780,29 +776,6 @@ TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
return res;
}
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
- int fd, OFF64_T off) {
- SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
- if (!fix_mmap_addr(&addr, sz, flags))
- return MAP_FAILED;
- void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
- if (res != MAP_FAILED) {
- if (fd > 0)
- FdAccess(thr, pc, fd);
-
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
- else
- MemoryResetRange(thr, pc, (uptr)res, sz);
- }
- return res;
-}
-#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
-#else
-#define TSAN_MAYBE_INTERCEPT_MMAP64
-#endif
-
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
if (sz != 0) {
@@ -932,7 +905,7 @@ void DestroyThreadState() {
}
} // namespace __tsan
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -963,7 +936,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadState *thr = cur_thread();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(interceptor_ctx()->finalize_key,
(void *)GetPthreadDestructorIterations())) {
@@ -1763,12 +1736,6 @@ TSAN_INTERCEPTOR(void, abort, int fake) {
REAL(abort)(fake);
}
-TSAN_INTERCEPTOR(int, puts, const char *s) {
- SCOPED_TSAN_INTERCEPTOR(puts, s);
- MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
- return REAL(puts)(s);
-}
-
TSAN_INTERCEPTOR(int, rmdir, char *path) {
SCOPED_TSAN_INTERCEPTOR(rmdir, path);
Release(thr, pc, Dir2addr(path));
@@ -2313,6 +2280,13 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ do { \
+ return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
+ off); \
+ } while (false)
+
#if !SANITIZER_MAC
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
@@ -2426,7 +2400,7 @@ struct ScopedSyscall {
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2519,6 +2493,7 @@ static void syscall_post_fork(uptr pc, int pid) {
syscall_post_fork(GET_CALLER_PC(), res)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
#ifdef NEED_TLS_GET_ADDR
// Define own interceptor instead of sanitizer_common's for three reasons:
@@ -2536,7 +2511,8 @@ TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
ThreadState *thr = cur_thread();
if (!thr)
return res;
- DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
+ thr->tls_addr + thr->tls_size);
if (!dtv)
return res;
// New DTLS block has been allocated.
@@ -2556,22 +2532,33 @@ TSAN_INTERCEPTOR(void, _lwp_exit) {
#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
#endif
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m);
-TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)());
+#if SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
+ SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
+ DestroyThreadState();
+ REAL(thr_exit(state));
+}
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
namespace __tsan {
@@ -2635,8 +2622,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(realloc);
TSAN_INTERCEPT(free);
TSAN_INTERCEPT(cfree);
- TSAN_INTERCEPT(mmap);
- TSAN_MAYBE_INTERCEPT_MMAP64;
TSAN_INTERCEPT(munmap);
TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(valloc);
@@ -2715,10 +2700,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(unlink);
TSAN_INTERCEPT(tmpfile);
TSAN_MAYBE_INTERCEPT_TMPFILE64;
- TSAN_INTERCEPT(fread);
- TSAN_INTERCEPT(fwrite);
TSAN_INTERCEPT(abort);
- TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(closedir);
@@ -2750,6 +2732,7 @@ void InitializeInterceptors() {
#endif
TSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ TSAN_MAYBE_INTERCEPT_THR_EXIT;
#if !SANITIZER_MAC && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
@@ -2762,7 +2745,7 @@ void InitializeInterceptors() {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index 3b3a4a2815c3..b58e6b7071cf 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -294,6 +294,43 @@ TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+// Is the Obj-C object a tagged pointer (i.e. isn't really a valid pointer and
+// contains data in the pointers bits instead)?
+static bool IsTaggedObjCPointer(void *obj) {
+ const uptr kPossibleTaggedBits = 0x8000000000000001ull;
+ return ((uptr)obj & kPossibleTaggedBits) != 0;
+}
+
+// Return an address on which we can synchronize (Acquire and Release) for a
+// Obj-C tagged pointer (which is not a valid pointer). Ideally should be a
+// derived address from 'obj', but for now just return the same global address.
+// TODO(kubamracek): Return different address for different pointers.
+static uptr SyncAddressForTaggedPointer(void *obj) {
+ (void)obj;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+// Address on which we can synchronize for an Objective-C object. Supports
+// tagged pointers.
+static uptr SyncAddressForObjCObject(void *obj) {
+ if (IsTaggedObjCPointer(obj)) return SyncAddressForTaggedPointer(obj);
+ return (uptr)obj;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_enter, void *obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
+ int result = REAL(objc_sync_enter)(obj);
+ if (obj) Acquire(thr, pc, SyncAddressForObjCObject(obj));
+ return result;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_exit, void *obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
+ if (obj) Release(thr, pc, SyncAddressForObjCObject(obj));
+ return REAL(objc_sync_exit)(obj);
+}
+
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index a80a48991cba..203f6b106a96 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -117,6 +117,19 @@ int __tsan_get_report_data(void *report, const char **description, int *count,
int *unique_tid_count, void **sleep_trace,
uptr trace_size);
+/// Retrieves the "tag" from a report (for external-race report types). External
+/// races can be associated with a tag which give them more meaning. For example
+/// tag value '1' means "Swift access race". Tag value '0' indicated a plain
+/// external race.
+///
+/// \param report opaque pointer to the current report (obtained as argument in
+/// __tsan_on_report, or from __tsan_get_current_report)
+/// \param [out] tag points to storage that will be filled with the tag value
+///
+/// \returns non-zero value on success, zero on failure
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag);
+
// Returns information about stack traces included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_stack(void *report, uptr idx, void **trace,
diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc
index 455c95df6c50..3cc30724da85 100644
--- a/lib/tsan/rtl/tsan_malloc_mac.cc
+++ b/lib/tsan/rtl/tsan_malloc_mac.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
+#include "sanitizer_common/sanitizer_errno.h"
#include "tsan_interceptors.h"
#include "tsan_stack_trace.h"
@@ -39,6 +40,15 @@ using namespace __tsan;
if (cur_thread()->in_symbolizer) return InternalCalloc(count, size); \
SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
void *p = user_calloc(thr, pc, size, count)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ if (cur_thread()->in_symbolizer) { \
+ void *p = InternalAlloc(size, nullptr, alignment); \
+ if (!p) return errno_ENOMEM; \
+ *memptr = p; \
+ return 0; \
+ } \
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, alignment, size); \
+ int res = user_posix_memalign(thr, pc, memptr, alignment, size);
#define COMMON_MALLOC_VALLOC(size) \
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 19680238bf76..b160a9736d5d 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
@@ -150,13 +151,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
bool signal) {
- if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return Allocator::FailureHandler::OnBadRequest();
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
+ }
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
- if (UNLIKELY(p == 0))
- return 0;
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportOutOfMemory(sz, &stack);
+ }
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
if (signal)
@@ -178,8 +190,12 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
}
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
- if (UNLIKELY(CheckForCallocOverflow(size, n)))
- return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportCallocOverflow(n, size, &stack);
+ }
void *p = user_alloc_internal(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -224,7 +240,10 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!IsPowerOfTwo(align))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAllocationAlignment(align, &stack);
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -232,11 +251,14 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
uptr sz) {
if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
- Allocator::FailureHandler::OnBadRequest();
- return errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidPosixMemalignAlignment(align, &stack);
}
void *ptr = user_alloc_internal(thr, pc, sz, align);
if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by user_alloc_internal.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, align));
*memptr = ptr;
@@ -246,7 +268,10 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAlignedAllocAlignment(sz, align, &stack);
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -259,7 +284,10 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
errno = errno_ENOMEM;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportPvallocOverflow(sz, &stack);
}
// pvalloc(0) should allocate one page.
sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
diff --git a/lib/tsan/rtl/tsan_new_delete.cc b/lib/tsan/rtl/tsan_new_delete.cc
index a1bb22690e0f..4f52d3d71eb6 100644
--- a/lib/tsan/rtl/tsan_new_delete.cc
+++ b/lib/tsan/rtl/tsan_new_delete.cc
@@ -13,8 +13,10 @@
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
@@ -34,7 +36,10 @@ DECLARE_REAL(void, free, void *ptr)
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
p = user_alloc(thr, pc, size); \
- if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
} \
invoke_malloc_hook(p, size); \
return p;
@@ -46,7 +51,10 @@ DECLARE_REAL(void, free, void *ptr)
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
p = user_memalign(thr, pc, (uptr)align, size); \
- if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
} \
invoke_malloc_hook(p, size); \
return p;
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index bfac70df12d9..6b3e6bac27cb 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -79,25 +79,27 @@ struct Mapping {
#define TSAN_MID_APP_RANGE 1
#elif defined(__mips64)
/*
-C/C++ on linux/mips64
-0100 0000 00 - 0200 0000 00: main binary
-0200 0000 00 - 1400 0000 00: -
-1400 0000 00 - 2400 0000 00: shadow
-2400 0000 00 - 3000 0000 00: -
-3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects)
-4000 0000 00 - 6000 0000 00: -
-6000 0000 00 - 6200 0000 00: traces
-6200 0000 00 - fe00 0000 00: -
-fe00 0000 00 - ff00 0000 00: heap
-ff00 0000 00 - ff80 0000 00: -
-ff80 0000 00 - ffff ffff ff: modules and main thread stack
+C/C++ on linux/mips64 (40-bit VMA)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary (4 GB)
+0200 0000 00 - 2000 0000 00: - (120 GB)
+2000 0000 00 - 4000 0000 00: shadow (128 GB)
+4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
+5000 0000 00 - aa00 0000 00: - (360 GB)
+aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
+ab00 0000 00 - b000 0000 00: - (20 GB)
+b000 0000 00 - b200 0000 00: traces (8 GB)
+b200 0000 00 - fe00 0000 00: - (304 GB)
+fe00 0000 00 - ff00 0000 00: heap (4 GB)
+ff00 0000 00 - ff80 0000 00: - (2 GB)
+ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
*/
struct Mapping {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
static const uptr kTraceMemBeg = 0xb000000000ull;
static const uptr kTraceMemEnd = 0xb200000000ull;
- static const uptr kShadowBeg = 0x2400000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
static const uptr kShadowEnd = 0x4000000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
@@ -353,9 +355,9 @@ struct Mapping47 {
#define TSAN_RUNTIME_VMA 1
#endif
-#elif SANITIZER_GO && !SANITIZER_WINDOWS
+#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__)
-/* Go on linux, darwin and freebsd
+/* Go on linux, darwin and freebsd on x86_64
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
@@ -404,6 +406,61 @@ struct Mapping {
static const uptr kAppMemEnd = 0x00e000000000ull;
};
+#elif SANITIZER_GO && defined(__powerpc64__)
+
+/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
+
+/* Go on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 3600 0000 0000: -
+3600 0000 0000 - 3800 0000 0000: traces
+3800 0000 0000 - 4000 0000 0000: -
+*/
+
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x240000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x360000000000ull;
+ static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+/* Go on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+
#else
# error "Unknown platform"
#endif
@@ -476,7 +533,9 @@ uptr MappingArchImpl(void) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return MappingImpl<Mapping44, Type>();
+#endif
case 46: return MappingImpl<Mapping46, Type>();
case 47: return MappingImpl<Mapping47, Type>();
}
@@ -631,7 +690,9 @@ bool IsAppMem(uptr mem) {
return false;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return IsAppMemImpl<Mapping44>(mem);
+#endif
case 46: return IsAppMemImpl<Mapping46>(mem);
case 47: return IsAppMemImpl<Mapping47>(mem);
}
@@ -660,7 +721,9 @@ bool IsShadowMem(uptr mem) {
return false;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return IsShadowMemImpl<Mapping44>(mem);
+#endif
case 46: return IsShadowMemImpl<Mapping46>(mem);
case 47: return IsShadowMemImpl<Mapping47>(mem);
}
@@ -689,7 +752,9 @@ bool IsMetaMem(uptr mem) {
return false;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return IsMetaMemImpl<Mapping44>(mem);
+#endif
case 46: return IsMetaMemImpl<Mapping46>(mem);
case 47: return IsMetaMemImpl<Mapping47>(mem);
}
@@ -728,7 +793,9 @@ uptr MemToShadow(uptr x) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return MemToShadowImpl<Mapping44>(x);
+#endif
case 46: return MemToShadowImpl<Mapping46>(x);
case 47: return MemToShadowImpl<Mapping47>(x);
}
@@ -769,7 +836,9 @@ u32 *MemToMeta(uptr x) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return MemToMetaImpl<Mapping44>(x);
+#endif
case 46: return MemToMetaImpl<Mapping46>(x);
case 47: return MemToMetaImpl<Mapping47>(x);
}
@@ -823,7 +892,9 @@ uptr ShadowToMem(uptr s) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return ShadowToMemImpl<Mapping44>(s);
+#endif
case 46: return ShadowToMemImpl<Mapping46>(s);
case 47: return ShadowToMemImpl<Mapping47>(s);
}
@@ -860,7 +931,9 @@ uptr GetThreadTrace(int tid) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return GetThreadTraceImpl<Mapping44>(tid);
+#endif
case 46: return GetThreadTraceImpl<Mapping46>(tid);
case 47: return GetThreadTraceImpl<Mapping47>(tid);
}
@@ -892,7 +965,9 @@ uptr GetThreadTraceHeader(int tid) {
return 0;
#elif defined(__powerpc64__)
switch (vmaSize) {
+#if !SANITIZER_GO
case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
+#endif
case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
}
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index e14d5f575a2e..de989b780883 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -168,11 +168,11 @@ static void MapRodata() {
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
- InternalScopedBuffer<u64> marker(kMarkerSize);
+ InternalMmapVector<u64> marker(kMarkerSize);
// volatile to prevent insertion of memset
for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
- internal_write(fd, marker.data(), marker.size());
+ internal_write(fd, marker.data(), marker.size() * sizeof(u64));
// Map the file into memory.
uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
@@ -191,8 +191,9 @@ static void MapRodata() {
// Assume it's .rodata
char *shadow_start = (char *)MemToShadow(segment.start);
char *shadow_end = (char *)MemToShadow(segment.end);
- for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
- internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
+ for (char *p = shadow_start; p < shadow_end;
+ p += marker.size() * sizeof(u64)) {
+ internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
}
}
@@ -213,15 +214,23 @@ void InitializePlatformEarly() {
#if defined(__aarch64__)
if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
+ Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize);
Die();
}
#elif defined(__powerpc64__)
+# if !SANITIZER_GO
if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 44, 46, and 47\n", vmaSize);
+ Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize);
Die();
}
+# else
+ if (vmaSize != 46 && vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize);
+ Die();
+ }
+# endif
#endif
#endif
}
diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc
index e4f90a811c35..c38dcc7f2c1c 100644
--- a/lib/tsan/rtl/tsan_platform_posix.cc
+++ b/lib/tsan/rtl/tsan_platform_posix.cc
@@ -16,6 +16,7 @@
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "tsan_platform.h"
@@ -23,16 +24,39 @@
namespace __tsan {
+static const char kShadowMemoryMappingWarning[] =
+ "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n";
+static const char kShadowMemoryMappingHint[] =
+ "HINT: if %s is not supported in your environment, you may set "
+ "TSAN_OPTIONS=%s=0\n";
+
+static void NoHugePagesInShadow(uptr addr, uptr size) {
+ if (common_flags()->no_huge_pages_for_shadow)
+ if (!NoHugePagesInRegion(addr, size)) {
+ Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
+ "MADV_NOHUGEPAGE", errno);
+ Printf(kShadowMemoryMappingHint, "MADV_NOHUGEPAGE",
+ "no_huge_pages_for_shadow");
+ Die();
+ }
+}
+
+static void DontDumpShadow(uptr addr, uptr size) {
+ if (common_flags()->use_madv_dontdump)
+ if (!DontDumpShadowMemory(addr, size)) {
+ Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
+ "MADV_DONTDUMP", errno);
+ Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump");
+ Die();
+ }
+}
+
#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
- uptr shadow =
- (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
- "shadow");
- if (shadow != ShadowBeg()) {
+ if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, ShadowBeg());
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
// This memory range is used for thread stacks and large user mmaps.
@@ -74,30 +98,23 @@ void InitializeShadowMemory() {
DCHECK(0);
}
#endif
- NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
+ NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
kMadviseRangeSize * kShadowMultiplier);
- // Meta shadow is compressing and we don't flush it,
- // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
- // On one program it reduces memory consumption from 5GB to 2.5GB.
- NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg());
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
ShadowBeg(), ShadowEnd(),
(ShadowEnd() - ShadowBeg()) >> 30);
// Map meta shadow.
- uptr meta_size = MetaShadowEnd() - MetaShadowBeg();
- uptr meta =
- (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow");
- if (meta != MetaShadowBeg()) {
+ const uptr meta = MetaShadowBeg();
+ const uptr meta_size = MetaShadowEnd() - meta;
+ if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", meta, MetaShadowBeg());
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(meta, meta_size);
+ NoHugePagesInShadow(meta, meta_size);
+ DontDumpShadow(meta, meta_size);
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
meta, meta + meta_size, meta_size >> 30);
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 17b820977c26..5841222927b3 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -105,8 +105,8 @@ Context::Context()
, racy_stacks()
, racy_addresses()
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
- , fired_suppressions(8)
, clock_alloc("clock allocator") {
+ fired_suppressions.reserve(8);
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
@@ -140,7 +140,7 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
- InternalScopedBuffer<char> buf(4096);
+ InternalMmapVector<char> buf(4096);
WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
}
@@ -246,7 +246,8 @@ void MapShadow(uptr addr, uptr size) {
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow");
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
@@ -258,7 +259,8 @@ void MapShadow(uptr addr, uptr size) {
if (!data_mapped) {
// First call maps data+bss.
data_mapped = true;
- MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
} else {
// Mapping continous heap.
// Windows wants 64K alignment.
@@ -268,7 +270,8 @@ void MapShadow(uptr addr, uptr size) {
return;
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
- MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
mapped_meta_end = meta_end;
}
VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
@@ -280,10 +283,9 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
CHECK_GE(addr, TraceMemBeg());
CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
- if (addr1 != addr) {
- Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
- addr, size, addr1);
+ if (!MmapFixedNoReserve(addr, size, name)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
+ addr, size);
Die();
}
}
@@ -354,6 +356,7 @@ void Initialize(ThreadState *thr) {
ctx = new(ctx_placeholder) Context;
const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
CacheBinaryName();
+ CheckASLR();
InitializeFlags(&ctx->flags, options);
AvoidCVE_2016_2143();
InitializePlatformEarly();
@@ -547,6 +550,10 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
}
void TraceSwitch(ThreadState *thr) {
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork)
+ return;
+#endif
thr->nomalloc++;
Trace *thr_trace = ThreadTrace(thr->tid);
Lock l(&thr_trace->mtx);
@@ -918,7 +925,8 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
u64 *p1 = p;
p = RoundDown(end, kPageSize);
UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
- MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
+ if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ Die();
// Set the ending.
while (p < end) {
*p++ = val;
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 7d44ccae9893..523b69aaa6bc 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -520,7 +520,9 @@ struct Context {
Context();
bool initialized;
+#if !SANITIZER_GO
bool after_multithreaded_fork;
+#endif
MetaMap metamap;
@@ -648,6 +650,10 @@ void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
ExtractTagFromStack(stack, tag);
}
+#define GET_STACK_TRACE_FATAL(thr, pc) \
+ VarSizeStackTrace stack; \
+ ObtainCurrentStack(thr, pc, &stack); \
+ stack.ReverseOrder();
#if TSAN_COLLECT_STATS
void StatAggregate(u64 *dst, u64 *src);
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 152b965ad535..c61d02b7a766 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -104,7 +104,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
unlock_locked = true;
}
u64 mid = s->GetId();
- u32 last_lock = s->last_lock;
+ u64 last_lock = s->last_lock;
if (!unlock_locked)
s->Reset(thr->proc()); // must not reset it before the report is printed
s->mtx.Unlock();
@@ -114,7 +114,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
rep.AddMutex(mid);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace);
+ rep.AddStack(trace, true);
FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
rep.AddStack(trace, true);
@@ -361,7 +361,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
s->owner_tid = SyncVar::kInvalidTid;
- ReleaseImpl(thr, pc, &s->clock);
+ ReleaseStoreImpl(thr, pc, &s->clock);
} else {
StatInc(thr, StatMutexRecUnlock);
}
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index cc582ab50190..febb6cef2d39 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -649,8 +649,8 @@ void ReportRace(ThreadState *thr) {
// callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
// event. Later we subtract -1 from it (in GetPreviousInstructionPc)
// and the resulting PC has kExternalPCBit set, so we pass it to
- // __tsan_symbolize_external. __tsan_symbolize_external is within its rights
- // to crash since the PC is completely bogus.
+ // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
+ // rights to crash since the PC is completely bogus.
// test/tsan/double_race.cc contains a test case for this.
toppc = 0;
}
diff --git a/lib/tsan/rtl/tsan_stack_trace.cc b/lib/tsan/rtl/tsan_stack_trace.cc
index ceca3f8e8738..a0dee19e246e 100644
--- a/lib/tsan/rtl/tsan_stack_trace.cc
+++ b/lib/tsan/rtl/tsan_stack_trace.cc
@@ -43,4 +43,9 @@ void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
trace_buffer[cnt] = extra_top_pc;
}
+void VarSizeStackTrace::ReverseOrder() {
+ for (u32 i = 0; i < (size >> 1); i++)
+ Swap(trace_buffer[i], trace_buffer[size - 1 - i]);
+}
+
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stack_trace.h b/lib/tsan/rtl/tsan_stack_trace.h
index 5bf89bb7584f..f69b57464bcf 100644
--- a/lib/tsan/rtl/tsan_stack_trace.h
+++ b/lib/tsan/rtl/tsan_stack_trace.h
@@ -27,6 +27,10 @@ struct VarSizeStackTrace : public StackTrace {
~VarSizeStackTrace();
void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+ // Reverses the current stack trace order, the top frame goes to the bottom,
+ // the last frame goes to the top.
+ void ReverseOrder();
+
private:
void ResizeBuffer(uptr new_size);
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index e39702b7d22a..be38f331ad96 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -152,7 +152,7 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
}
void PrintMatchedSuppressions() {
- InternalMmapVector<Suppression *> matched(1);
+ InternalMmapVector<Suppression *> matched;
CHECK(suppression_ctx);
suppression_ctx->GetMatched(&matched);
if (!matched.size())
diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc
index b2423951795f..27f0e01c7fbe 100644
--- a/lib/tsan/rtl/tsan_symbolize.cc
+++ b/lib/tsan/rtl/tsan_symbolize.cc
@@ -36,6 +36,7 @@ void ExitSymbolizer() {
thr->ignore_interceptors--;
}
+// Legacy API.
// May be overriden by JIT/JAVA/etc,
// whatever produces PCs marked with kExternalPCBit.
SANITIZER_WEAK_DEFAULT_IMPL
@@ -45,9 +46,49 @@ bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz,
return false;
}
+// New API: call __tsan_symbolize_external_ex only when it exists.
+// Once old clients are gone, provide dummy implementation.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_symbolize_external_ex(uptr pc,
+ void (*add_frame)(void *, const char *,
+ const char *, int, int),
+ void *ctx) {}
+
+struct SymbolizedStackBuilder {
+ SymbolizedStack *head;
+ SymbolizedStack *tail;
+ uptr addr;
+};
+
+static void AddFrame(void *ctx, const char *function_name, const char *file,
+ int line, int column) {
+ SymbolizedStackBuilder *ssb = (struct SymbolizedStackBuilder *)ctx;
+ if (ssb->tail) {
+ ssb->tail->next = SymbolizedStack::New(ssb->addr);
+ ssb->tail = ssb->tail->next;
+ } else {
+ ssb->head = ssb->tail = SymbolizedStack::New(ssb->addr);
+ }
+ AddressInfo *info = &ssb->tail->info;
+ if (function_name) {
+ info->function = internal_strdup(function_name);
+ }
+ if (file) {
+ info->file = internal_strdup(file);
+ }
+ info->line = line;
+ info->column = column;
+}
+
SymbolizedStack *SymbolizeCode(uptr addr) {
// Check if PC comes from non-native land.
if (addr & kExternalPCBit) {
+ SymbolizedStackBuilder ssb = {nullptr, nullptr, addr};
+ __tsan_symbolize_external_ex(addr, AddFrame, &ssb);
+ if (ssb.head)
+ return ssb.head;
+ // Legacy code: remove along with the declaration above
+ // once all clients using this API are gone.
// Declare static to not consume too much stack space.
// We symbolize reports in a single thread, so this is fine.
static char func_buf[1024];
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 44ae558fa1b2..ba3953375ee9 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -176,7 +176,8 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
uptr metap = (uptr)MemToMeta(p0);
uptr metasz = sz0 / kMetaRatio;
UnmapOrDie((void*)metap, metasz);
- MmapFixedNoReserve(metap, metasz);
+ if (!MmapFixedNoReserve(metap, metasz))
+ Die();
}
MBlock* MetaMap::GetBlock(uptr p) {