aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-12-25 22:30:44 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-12-25 22:30:44 +0000
commit77fc4c146f0870ffb09c1afb823ccbe742c5e6ff (patch)
tree5c0eb39553003b9c75a901af6bc4ddabd6f2f28c /compiler-rt
parentf65dcba83ce5035ab88a85fe17628b447eb56e1b (diff)
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/include/profile/InstrProfData.inc4
-rw-r--r--compiler-rt/include/sanitizer/dfsan_interface.h11
-rw-r--r--compiler-rt/lib/asan/asan_activation.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_allocator.cpp29
-rw-r--r--compiler-rt/lib/asan/asan_debugging.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_errors.cpp10
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp13
-rw-r--r--compiler-rt/lib/asan/asan_flags.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_globals.cpp7
-rw-r--r--compiler-rt/lib/asan/asan_interface.inc1
-rw-r--r--compiler-rt/lib/asan/asan_linux.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_mac.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_mapping.h194
-rw-r--r--compiler-rt/lib/asan/asan_mapping_sparc64.h9
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.cpp26
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.h11
-rw-r--r--compiler-rt/lib/asan/asan_premap_shadow.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_rtl.cpp41
-rw-r--r--compiler-rt/lib/asan/asan_rtl_x86_64.S146
-rw-r--r--compiler-rt/lib/asan/asan_thread.cpp14
-rw-r--r--compiler-rt/lib/asan/asan_win.cpp2
-rw-r--r--compiler-rt/lib/builtins/cpu_model.c20
-rw-r--r--compiler-rt/lib/dfsan/dfsan.cpp85
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.cpp6
-rw-r--r--compiler-rt/lib/dfsan/done_abilist.txt6
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocator.cpp5
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors.cpp9
-rw-r--r--compiler-rt/lib/lsan/lsan.h23
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cpp11
-rw-r--r--compiler-rt/lib/lsan/lsan_common.cpp478
-rw-r--r--compiler-rt/lib/lsan/lsan_common.h44
-rw-r--r--compiler-rt/lib/lsan/lsan_common_fuchsia.cpp6
-rw-r--r--compiler-rt/lib/lsan/lsan_common_linux.cpp5
-rw-r--r--compiler-rt/lib/lsan/lsan_common_mac.cpp5
-rw-r--r--compiler-rt/lib/lsan/lsan_interceptors.cpp7
-rw-r--r--compiler-rt/lib/memprof/memprof_allocator.cpp15
-rw-r--r--compiler-rt/lib/memprof/memprof_allocator.h1
-rw-r--r--compiler-rt/lib/memprof/memprof_rtl.cpp7
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp5
-rw-r--r--compiler-rt/lib/msan/msan_interceptors.cpp3
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.c2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingMerge.c8
-rw-r--r--compiler-rt/lib/profile/InstrProfilingWriter.c21
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.cpp8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp72
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.inc3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp26
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_lzw.h159
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp13
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform.h310
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp241
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h25
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp125
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp19
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp175
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp57
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp58
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_win.cpp5
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp36
-rwxr-xr-xcompiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh10
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt14
-rw-r--r--compiler-rt/lib/sanitizer_common/weak_symbols.txt2
-rw-r--r--compiler-rt/lib/tsan/go/tsan_go.cpp2
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan.syms.extra31
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_clock.cpp (renamed from compiler-rt/lib/tsan/rtl/tsan_clock.cpp)0
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_clock.h (renamed from compiler-rt/lib/tsan/rtl/tsan_clock.h)0
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_debugging.cpp262
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_defs.h236
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_dense_alloc.h156
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_dispatch_defs.h73
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_external.cpp126
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_fd.cpp316
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_fd.h64
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_flags.cpp126
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_flags.h34
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_flags.inc84
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.cpp38
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.h36
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_ilist.h189
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interceptors.h93
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interceptors_libdispatch.cpp814
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mac.cpp521
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mach_vm.cpp53
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp3015
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface.cpp106
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface.h424
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface.inc182
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.cpp438
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.h32
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface_atomic.cpp920
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface_java.cpp258
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_interface_java.h99
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_malloc_mac.cpp71
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_md5.cpp250
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp436
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_mman.h78
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_mutexset.cpp132
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_mutexset.h98
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_new_delete.cpp199
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_platform.h988
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_platform_linux.cpp545
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_platform_mac.cpp326
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_platform_posix.cpp147
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_platform_windows.cpp36
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_ppc_regs.h96
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_preinit.cpp26
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_report.cpp479
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_report.h127
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp811
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl.h796
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_aarch64.S245
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_access.cpp604
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_amd64.S446
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_mips64.S214
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_mutex.cpp555
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_ppc64.S288
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_proc.cpp60
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_report.cpp984
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_s390x.S47
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_rtl_thread.cpp349
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_shadow.h233
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.cpp57
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.h42
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_suppressions.cpp161
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_suppressions.h37
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_symbolize.cpp123
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_symbolize.h30
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_sync.cpp279
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_sync.h153
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_trace.h252
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_update_shadow_word.inc (renamed from compiler-rt/lib/tsan/rtl/tsan_update_shadow_word.inc)0
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.cpp126
-rw-r--r--compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.h51
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_debugging.cpp2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_defs.h52
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h9
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_fd.cpp33
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_flags.cpp6
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_flags.inc13
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors.h12
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp33
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface.cpp14
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface.inc8
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp87
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp4
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.cpp40
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.h2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp54
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mutexset.h11
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform.h285
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp48
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp9
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp18
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp3
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_report.cpp26
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_report.h5
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cpp669
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.h337
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp891
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S236
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp644
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp1
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp353
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp195
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_shadow.h299
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_sync.cpp82
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_sync.h47
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_trace.h73
-rw-r--r--compiler-rt/lib/xray/xray_allocator.h16
-rw-r--r--compiler-rt/lib/xray/xray_basic_logging.cpp4
-rw-r--r--compiler-rt/lib/xray/xray_hexagon.cpp168
-rw-r--r--compiler-rt/lib/xray/xray_interface.cpp11
-rw-r--r--compiler-rt/lib/xray/xray_trampoline_hexagon.S99
-rw-r--r--compiler-rt/lib/xray/xray_tsc.h3
192 files changed, 24118 insertions, 3298 deletions
diff --git a/compiler-rt/include/profile/InstrProfData.inc b/compiler-rt/include/profile/InstrProfData.inc
index 008b8dde5820..44719126b596 100644
--- a/compiler-rt/include/profile/InstrProfData.inc
+++ b/compiler-rt/include/profile/InstrProfData.inc
@@ -653,15 +653,17 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
* version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
* generated profile, and 0 if this is a Clang FE generated profile.
* 1 in bit 57 indicates there are context-sensitive records in the profile.
+ * The 59th bit indicates whether to use debug info to correlate profiles.
*/
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
+#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
diff --git a/compiler-rt/include/sanitizer/dfsan_interface.h b/compiler-rt/include/sanitizer/dfsan_interface.h
index d6209a3ea2b2..bc0652c99a14 100644
--- a/compiler-rt/include/sanitizer/dfsan_interface.h
+++ b/compiler-rt/include/sanitizer/dfsan_interface.h
@@ -54,6 +54,10 @@ dfsan_origin dfsan_get_origin(long data);
/// Retrieves the label associated with the data at the given address.
dfsan_label dfsan_read_label(const void *addr, size_t size);
+/// Return the origin associated with the first taint byte in the size bytes
+/// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+
/// Returns whether the given label label contains the label elem.
int dfsan_has_label(dfsan_label label, dfsan_label elem);
@@ -87,6 +91,9 @@ void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
/// prints description at the beginning of the trace. If origin tracking is not
/// on, or the address is not labeled, it prints nothing.
void dfsan_print_origin_trace(const void *addr, const char *description);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+void dfsan_print_origin_id_trace(dfsan_origin origin);
/// Prints the origin trace of the label at the address \p addr to a
/// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -124,6 +131,10 @@ void dfsan_print_origin_trace(const void *addr, const char *description);
/// return value is not less than \p out_buf_size.
size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
char *out_buf, size_t out_buf_size);
+/// As above, but use an origin id from dfsan_get_origin() instead of address.
+/// Does not include header line with taint label and address information.
+size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
+ size_t out_buf_size);
/// Prints the stack trace leading to this call to a pre-allocated output
/// buffer.
diff --git a/compiler-rt/lib/asan/asan_activation.cpp b/compiler-rt/lib/asan/asan_activation.cpp
index 795df95a5414..1757838600ca 100644
--- a/compiler-rt/lib/asan/asan_activation.cpp
+++ b/compiler-rt/lib/asan/asan_activation.cpp
@@ -112,7 +112,7 @@ void AsanDeactivate() {
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
// Redzone must be at least Max(16, granularity) bytes long.
- disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
+ disabled.min_redzone = Max(16, (int)ASAN_SHADOW_GRANULARITY);
disabled.max_redzone = disabled.min_redzone;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 3fa36742060b..1ff7091460ad 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -210,8 +210,7 @@ struct QuarantineCallback {
CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
}
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
// Statistics.
@@ -305,7 +304,6 @@ struct Allocator {
QuarantineCache fallback_quarantine_cache;
uptr max_user_defined_malloc_size;
- atomic_uint8_t rss_limit_exceeded;
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
@@ -345,14 +343,6 @@ struct Allocator {
: kMaxAllowedMallocSize;
}
- bool RssLimitExceeded() {
- return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
- }
-
- void SetRssLimitExceeded(bool limit_exceeded) {
- atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
- }
-
void RePoisonChunk(uptr chunk) {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
@@ -366,7 +356,7 @@ struct Allocator {
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
- uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
@@ -484,14 +474,14 @@ struct Allocator {
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
- if (RssLimitExceeded()) {
+ if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
return nullptr;
ReportRssLimitExceeded(stack);
}
Flags &fl = *flags();
CHECK(stack);
- const uptr min_alignment = SHADOW_GRANULARITY;
+ const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
const uptr user_requested_alignment_log =
ComputeUserRequestedAlignmentLog(alignment);
if (alignment < min_alignment)
@@ -572,7 +562,7 @@ struct Allocator {
m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
- RoundDownTo(size, SHADOW_GRANULARITY);
+ RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
@@ -580,7 +570,7 @@ struct Allocator {
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
u8 *shadow =
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
- *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
@@ -650,8 +640,7 @@ struct Allocator {
}
// Poison the region.
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = GetCurrentThreadStats();
@@ -1071,10 +1060,6 @@ void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
instance.ForceUnlock();
}
-void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
- instance.SetRssLimitExceeded(limit_exceeded);
-}
-
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
diff --git a/compiler-rt/lib/asan/asan_debugging.cpp b/compiler-rt/lib/asan/asan_debugging.cpp
index 0b4bf52f2490..f078f1041a87 100644
--- a/compiler-rt/lib/asan/asan_debugging.cpp
+++ b/compiler-rt/lib/asan/asan_debugging.cpp
@@ -141,7 +141,7 @@ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
if (shadow_scale)
- *shadow_scale = SHADOW_SCALE;
+ *shadow_scale = ASAN_SHADOW_SCALE;
if (shadow_offset)
- *shadow_offset = SHADOW_OFFSET;
+ *shadow_offset = ASAN_SHADOW_OFFSET;
}
diff --git a/compiler-rt/lib/asan/asan_errors.cpp b/compiler-rt/lib/asan/asan_errors.cpp
index 7cd9fe911afa..a22bf130d823 100644
--- a/compiler-rt/lib/asan/asan_errors.cpp
+++ b/compiler-rt/lib/asan/asan_errors.cpp
@@ -329,7 +329,7 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
" old_mid : %p\n"
" new_mid : %p\n",
(void *)beg, (void *)end, (void *)old_mid, (void *)new_mid);
- uptr granularity = SHADOW_GRANULARITY;
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %zu\n", granularity);
stack->Print();
@@ -410,7 +410,8 @@ ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
if (AddrIsInMem(addr)) {
u8 *shadow_addr = (u8 *)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
- if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ if (*shadow_addr == 0 && access_size > ASAN_SHADOW_GRANULARITY)
+ shadow_addr++;
// If we are in the partial right redzone, look at the next shadow byte.
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
bool far_from_bounds = false;
@@ -501,10 +502,11 @@ static void PrintLegend(InternalScopedString *str) {
str->append(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
- (int)SHADOW_GRANULARITY);
+ (int)ASAN_SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
str->append(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++)
+ PrintShadowByte(str, "", i, " ");
str->append("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index 07681c10de91..08d81c72597c 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -28,8 +28,8 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
- if (SHADOW_SCALE == 3 && class_id <= 6) {
- // This code expects SHADOW_SCALE=3.
+ if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
+ // This code expects ASAN_SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
@@ -294,10 +294,10 @@ void __asan_alloca_poison(uptr addr, uptr size) {
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
uptr PartialRzAddr = addr + size;
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
- uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
+ uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1);
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
FastPoisonShadowPartialRightRedzone(
- PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
+ PartialRzAligned, PartialRzAddr % ASAN_SHADOW_GRANULARITY,
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
}
@@ -305,7 +305,8 @@ void __asan_alloca_poison(uptr addr, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
if ((!top) || (top > bottom)) return;
- REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
- (bottom - top) / SHADOW_GRANULARITY);
+ REAL(memset)
+ (reinterpret_cast<void *>(MemToShadow(top)), 0,
+ (bottom - top) / ASAN_SHADOW_GRANULARITY);
}
} // extern "C"
diff --git a/compiler-rt/lib/asan/asan_flags.cpp b/compiler-rt/lib/asan/asan_flags.cpp
index c64e46470287..9ea899f84b4b 100644
--- a/compiler-rt/lib/asan/asan_flags.cpp
+++ b/compiler-rt/lib/asan/asan_flags.cpp
@@ -140,9 +140,9 @@ void InitializeFlags() {
SanitizerToolName);
Die();
}
- // Ensure that redzone is at least SHADOW_GRANULARITY.
- if (f->redzone < (int)SHADOW_GRANULARITY)
- f->redzone = SHADOW_GRANULARITY;
+ // Ensure that redzone is at least ASAN_SHADOW_GRANULARITY.
+ if (f->redzone < (int)ASAN_SHADOW_GRANULARITY)
+ f->redzone = ASAN_SHADOW_GRANULARITY;
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
diff --git a/compiler-rt/lib/asan/asan_globals.cpp b/compiler-rt/lib/asan/asan_globals.cpp
index 5f56fe6f457d..ecc2600f039a 100644
--- a/compiler-rt/lib/asan/asan_globals.cpp
+++ b/compiler-rt/lib/asan/asan_globals.cpp
@@ -61,14 +61,13 @@ ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
}
ALWAYS_INLINE void PoisonRedZones(const Global &g) {
- uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
+ uptr aligned_size = RoundUpTo(g.size, ASAN_SHADOW_GRANULARITY);
FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
if (g.size != aligned_size) {
FastPoisonShadowPartialRightRedzone(
- g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
- g.size % SHADOW_GRANULARITY,
- SHADOW_GRANULARITY,
+ g.beg + RoundDownTo(g.size, ASAN_SHADOW_GRANULARITY),
+ g.size % ASAN_SHADOW_GRANULARITY, ASAN_SHADOW_GRANULARITY,
kAsanGlobalRedzoneMagic);
}
}
diff --git a/compiler-rt/lib/asan/asan_interface.inc b/compiler-rt/lib/asan/asan_interface.inc
index ea28fc8ae87c..89ef552b7117 100644
--- a/compiler-rt/lib/asan/asan_interface.inc
+++ b/compiler-rt/lib/asan/asan_interface.inc
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
// Asan interface list.
//===----------------------------------------------------------------------===//
+
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
INTERFACE_FUNCTION(__asan_address_is_poisoned)
INTERFACE_FUNCTION(__asan_after_dynamic_init)
diff --git a/compiler-rt/lib/asan/asan_linux.cpp b/compiler-rt/lib/asan/asan_linux.cpp
index ad3693d5e6a2..1d92c530bd11 100644
--- a/compiler-rt/lib/asan/asan_linux.cpp
+++ b/compiler-rt/lib/asan/asan_linux.cpp
@@ -107,7 +107,7 @@ uptr FindDynamicShadowStart() {
return FindPremappedShadowStart(shadow_size_bytes);
#endif
- return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
diff --git a/compiler-rt/lib/asan/asan_mac.cpp b/compiler-rt/lib/asan/asan_mac.cpp
index c6950547f089..9161f728d44c 100644
--- a/compiler-rt/lib/asan/asan_mac.cpp
+++ b/compiler-rt/lib/asan/asan_mac.cpp
@@ -55,7 +55,7 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h
index e5a7f2007aea..6ca6ee00e5c9 100644
--- a/compiler-rt/lib/asan/asan_mapping.h
+++ b/compiler-rt/lib/asan/asan_mapping.h
@@ -13,8 +13,6 @@
#ifndef ASAN_MAPPING_H
#define ASAN_MAPPING_H
-#include "asan_internal.h"
-
// The full explanation of the memory mapping could be found here:
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
//
@@ -151,149 +149,145 @@
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
-#if defined(ASAN_SHADOW_SCALE)
-static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
-#else
-static const u64 kDefaultShadowScale = 3;
-#endif
-static const u64 kDefaultShadowSentinel = ~(uptr)0;
-static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
-static const u64 kDefaultShadowOffset64 = 1ULL << 44;
-static const u64 kDefaultShort64bitShadowOffset =
- 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
-static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
-static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
-static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
-static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
-static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
-static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
-static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
-static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
-
-#define SHADOW_SCALE kDefaultShadowScale
+#define ASAN_SHADOW_SCALE 3
#if SANITIZER_FUCHSIA
-# define SHADOW_OFFSET (0)
+# define ASAN_SHADOW_OFFSET_CONST (0)
#elif SANITIZER_WORDSIZE == 32
# if SANITIZER_ANDROID
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# elif defined(__mips__)
-# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000
# elif SANITIZER_FREEBSD
-# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x40000000
# elif SANITIZER_NETBSD
-# define SHADOW_OFFSET kNetBSD_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x40000000
# elif SANITIZER_WINDOWS
-# define SHADOW_OFFSET kWindowsShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x30000000
# elif SANITIZER_IOS
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x20000000
# endif
#else
# if SANITIZER_IOS
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# elif SANITIZER_MAC && defined(__aarch64__)
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
-#elif SANITIZER_RISCV64
-#define SHADOW_OFFSET kRiscv64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_DYNAMIC
+# elif SANITIZER_RISCV64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000000d55550000
# elif defined(__aarch64__)
-# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
# elif defined(__powerpc64__)
-# define SHADOW_OFFSET kPPC64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__s390x__)
-# define SHADOW_OFFSET kSystemZ_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
# elif SANITIZER_FREEBSD
-# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
# elif SANITIZER_NETBSD
-# define SHADOW_OFFSET kNetBSD_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
# elif SANITIZER_MAC
-# define SHADOW_OFFSET kDefaultShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__mips64)
-# define SHADOW_OFFSET kMIPS64_ShadowOffset64
-#elif defined(__sparc__)
-#define SHADOW_OFFSET kSPARC64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000
+# elif defined(__sparc__)
+# define ASAN_SHADOW_OFFSET_CONST 0x0000080000000000
# elif SANITIZER_WINDOWS64
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# else
-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
+# if ASAN_SHADOW_SCALE != 3
+# error "Value below is based on shadow scale = 3."
+# error "Original formula was: 0x7FFFFFFF & (~0xFFFULL << SHADOW_SCALE)."
+# endif
+# define ASAN_SHADOW_OFFSET_CONST 0x000000007fff8000
# endif
#endif
-#if SANITIZER_ANDROID && defined(__arm__)
-# define ASAN_PREMAP_SHADOW 1
-#else
-# define ASAN_PREMAP_SHADOW 0
-#endif
+#if defined(__cplusplus)
+# include "asan_internal.h"
-#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
-#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
+# if defined(ASAN_SHADOW_OFFSET_CONST)
+static const u64 kConstShadowOffset = ASAN_SHADOW_OFFSET_CONST;
+# define ASAN_SHADOW_OFFSET kConstShadowOffset
+# elif defined(ASAN_SHADOW_OFFSET_DYNAMIC)
+# define ASAN_SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# else
+# error "ASAN_SHADOW_OFFSET can't be determined."
+# endif
-#if DO_ASAN_MAPPING_PROFILE
-# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
-#else
-# define PROFILE_ASAN_MAPPING()
-#endif
+# if SANITIZER_ANDROID && defined(__arm__)
+# define ASAN_PREMAP_SHADOW 1
+# else
+# define ASAN_PREMAP_SHADOW 0
+# endif
+
+# define ASAN_SHADOW_GRANULARITY (1ULL << ASAN_SHADOW_SCALE)
+
+# define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
+
+# if DO_ASAN_MAPPING_PROFILE
+# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
+# else
+# define PROFILE_ASAN_MAPPING()
+# endif
// If 1, all shadow boundaries are constants.
// Don't set to 1 other than for testing.
-#define ASAN_FIXED_MAPPING 0
+# define ASAN_FIXED_MAPPING 0
namespace __asan {
extern uptr AsanMappingProfile[];
-#if ASAN_FIXED_MAPPING
+# if ASAN_FIXED_MAPPING
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
// difference between fixed and non-fixed mapping is below the noise level.
static uptr kHighMemEnd = 0x7fffffffffffULL;
-static uptr kMidMemBeg = 0x3000000000ULL;
-static uptr kMidMemEnd = 0x4fffffffffULL;
-#else
+static uptr kMidMemBeg = 0x3000000000ULL;
+static uptr kMidMemEnd = 0x4fffffffffULL;
+# else
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
-#endif
+# endif
} // namespace __asan
-#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
-# include "asan_mapping_sparc64.h"
-#else
-#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
+# if defined(__sparc__) && SANITIZER_WORDSIZE == 64
+# include "asan_mapping_sparc64.h"
+# else
+# define MEM_TO_SHADOW(mem) \
+ (((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET))
-#define kLowMemBeg 0
-#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
+# define kLowMemBeg 0
+# define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0)
-#define kLowShadowBeg SHADOW_OFFSET
-#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+# define kLowShadowBeg ASAN_SHADOW_OFFSET
+# define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
-#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
+# define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
-#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
-#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
+# define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+# define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
-# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
-# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
+# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
+# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
-#define kZeroBaseShadowStart 0
-#define kZeroBaseMaxShadowStart (1 << 18)
+# define kZeroBaseShadowStart 0
+# define kZeroBaseMaxShadowStart (1 << 18)
-#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
- : kZeroBaseShadowStart)
-#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
+# define kShadowGapBeg \
+ (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart)
+# define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
-#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
-#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
+# define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
+# define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
-#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
-#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
+# define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
+# define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
namespace __asan {
@@ -331,29 +325,31 @@ static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
- return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return ASAN_SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
- if (SHADOW_OFFSET == 0)
+ if (ASAN_SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
} // namespace __asan
-#endif
+# endif
namespace __asan {
-static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+static inline uptr MemToShadowSize(uptr size) {
+ return size >> ASAN_SHADOW_SCALE;
+}
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
- (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@@ -369,17 +365,17 @@ static inline bool AddrIsInShadow(uptr a) {
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
- return (a & (SHADOW_GRANULARITY - 1)) == 0;
+ return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0;
}
static inline bool AddressIsPoisoned(uptr a) {
PROFILE_ASAN_MAPPING();
const uptr kAccessSize = 1;
- u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
+ u8 *shadow_address = (u8 *)MEM_TO_SHADOW(a);
s8 shadow_value = *shadow_address;
if (shadow_value) {
- u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1))
- + kAccessSize - 1;
+ u8 last_accessed_byte =
+ (a & (ASAN_SHADOW_GRANULARITY - 1)) + kAccessSize - 1;
return (last_accessed_byte >= shadow_value);
}
return false;
@@ -390,4 +386,6 @@ static const uptr kAsanMappingProfileSize = __LINE__;
} // namespace __asan
+#endif // __cplusplus
+
#endif // ASAN_MAPPING_H
diff --git a/compiler-rt/lib/asan/asan_mapping_sparc64.h b/compiler-rt/lib/asan/asan_mapping_sparc64.h
index 432a1816f797..90261d301f7f 100644
--- a/compiler-rt/lib/asan/asan_mapping_sparc64.h
+++ b/compiler-rt/lib/asan/asan_mapping_sparc64.h
@@ -25,13 +25,14 @@
// The idea is to chop the high bits before doing the scaling, so the two
// parts become contiguous again and the usual scheme can be applied.
-#define MEM_TO_SHADOW(mem) \
- ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET))
+#define MEM_TO_SHADOW(mem) \
+ ((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \
+ (ASAN_SHADOW_OFFSET))
#define kLowMemBeg 0
-#define kLowMemEnd (SHADOW_OFFSET - 1)
+#define kLowMemEnd (ASAN_SHADOW_OFFSET - 1)
-#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowBeg ASAN_SHADOW_OFFSET
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
// But of course there is the huge hole between the high shadow memory,
diff --git a/compiler-rt/lib/asan/asan_poisoning.cpp b/compiler-rt/lib/asan/asan_poisoning.cpp
index d97af91e692d..bbc7db4709e1 100644
--- a/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -35,7 +35,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
- CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
+ CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
CHECK(REAL(memset));
FastPoisonShadow(addr, size, value);
}
@@ -52,12 +52,12 @@ void PoisonShadowPartialRightRedzone(uptr addr,
struct ShadowSegmentEndpoint {
u8 *chunk;
- s8 offset; // in [0, SHADOW_GRANULARITY)
+ s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
s8 value; // = *chunk;
explicit ShadowSegmentEndpoint(uptr address) {
chunk = (u8*)MemToShadow(address);
- offset = address & (SHADOW_GRANULARITY - 1);
+ offset = address & (ASAN_SHADOW_GRANULARITY - 1);
value = *chunk;
}
};
@@ -72,14 +72,14 @@ void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
}
CHECK(size);
CHECK_LE(size, 4096);
- CHECK(IsAligned(end, SHADOW_GRANULARITY));
- if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
+ CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
+ if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {
*(u8 *)MemToShadow(ptr) =
- poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
- ptr |= SHADOW_GRANULARITY - 1;
+ poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
+ ptr |= ASAN_SHADOW_GRANULARITY - 1;
ptr++;
}
- for (; ptr < end; ptr += SHADOW_GRANULARITY)
+ for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
}
@@ -181,12 +181,12 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
if (!AddrIsInMem(end))
return end;
CHECK_LT(beg, end);
- uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
- uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);
+ uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
uptr shadow_end = MemToShadow(aligned_e);
// First check the first and the last application bytes,
- // then check the SHADOW_GRANULARITY-aligned region by calling
+ // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
// mem_is_zero on the corresponding shadow.
if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
(shadow_end <= shadow_beg ||
@@ -285,7 +285,7 @@ uptr __asan_load_cxx_array_cookie(uptr *p) {
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
if (size == 0) return;
- uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
+ uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
PoisonShadow(addr, aligned_size,
do_poison ? kAsanStackUseAfterScopeMagic : 0);
if (size == aligned_size)
@@ -351,7 +351,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
- uptr granularity = SHADOW_GRANULARITY;
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity))) {
GET_STACK_TRACE_FATAL_HERE;
diff --git a/compiler-rt/lib/asan/asan_poisoning.h b/compiler-rt/lib/asan/asan_poisoning.h
index 3d536f2d3097..600bd011f304 100644
--- a/compiler-rt/lib/asan/asan_poisoning.h
+++ b/compiler-rt/lib/asan/asan_poisoning.h
@@ -44,8 +44,8 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
common_flags()->clear_shadow_mmap_threshold);
#else
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
- uptr shadow_end = MEM_TO_SHADOW(
- aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
+ uptr shadow_end =
+ MEM_TO_SHADOW(aligned_beg + aligned_size - ASAN_SHADOW_GRANULARITY) + 1;
// FIXME: Page states are different on Windows, so using the same interface
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
@@ -78,11 +78,12 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
- for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
- if (i + SHADOW_GRANULARITY <= size) {
+ for (uptr i = 0; i < redzone_size; i += ASAN_SHADOW_GRANULARITY, shadow++) {
+ if (i + ASAN_SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
- *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
+ *shadow =
+ (ASAN_SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
// first size-i bytes are addressable
*shadow = poison_partial ? static_cast<u8>(size - i) : 0;
diff --git a/compiler-rt/lib/asan/asan_premap_shadow.cpp b/compiler-rt/lib/asan/asan_premap_shadow.cpp
index 666bb9b34bd3..bed2f62a2251 100644
--- a/compiler-rt/lib/asan/asan_premap_shadow.cpp
+++ b/compiler-rt/lib/asan/asan_premap_shadow.cpp
@@ -26,7 +26,7 @@ namespace __asan {
// Conservative upper limit.
uptr PremapShadowSize() {
uptr granularity = GetMmapGranularity();
- return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
+ return RoundUpTo(GetMaxVirtualAddress() >> ASAN_SHADOW_SCALE, granularity);
}
// Returns an address aligned to 8 pages, such that one page on the left and
diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp
index 5be8ef0f6d1c..f0bbbf32e6a6 100644
--- a/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/compiler-rt/lib/asan/asan_rtl.cpp
@@ -146,11 +146,11 @@ ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
uptr sp = MEM_TO_SHADOW(addr); \
- uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
- : *reinterpret_cast<u16 *>(sp); \
+ uptr s = size <= ASAN_SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
if (UNLIKELY(s)) { \
- if (UNLIKELY(size >= SHADOW_GRANULARITY || \
- ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ if (UNLIKELY(size >= ASAN_SHADOW_GRANULARITY || \
+ ((s8)((addr & (ASAN_SHADOW_GRANULARITY - 1)) + size - 1)) >= \
(s8)s)) { \
ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal); \
} \
@@ -309,7 +309,7 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
+ kHighMemEnd |= (GetMmapGranularity() << ASAN_SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
@@ -361,29 +361,16 @@ void PrintAddressSpaceLayout() {
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
- Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
- Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
- Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
- CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
+ Printf("SHADOW_SCALE: %d\n", (int)ASAN_SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %d\n", (int)ASAN_SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)ASAN_SHADOW_OFFSET);
+ CHECK(ASAN_SHADOW_SCALE >= 3 && ASAN_SHADOW_SCALE <= 7);
if (kMidMemBeg)
CHECK(kMidShadowBeg > kLowShadowEnd &&
kMidMemBeg > kMidShadowEnd &&
kHighShadowBeg > kMidMemEnd);
}
-#if defined(__thumb__) && defined(__linux__)
-#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
-#endif
-
-#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
-static bool UNUSED __local_asan_dyninit = [] {
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
-
- return false;
-}();
-#endif
-
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@@ -434,7 +421,7 @@ static void AsanInitInternal() {
MaybeReexec();
// Setup internal allocator callback.
- SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
+ SetLowLevelAllocateMinAlignment(ASAN_SHADOW_GRANULARITY);
SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors();
@@ -458,10 +445,8 @@ static void AsanInitInternal() {
allocator_options.SetFrom(flags(), common_flags());
InitializeAllocator(allocator_options);
-#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
-#endif
+ if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL)
+ MaybeStartBackgroudThread();
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@@ -557,7 +542,7 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) {
top - bottom);
return;
}
- PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
+ PoisonShadow(bottom, RoundUpTo(top - bottom, ASAN_SHADOW_GRANULARITY), 0);
}
static void UnpoisonDefaultStack() {
diff --git a/compiler-rt/lib/asan/asan_rtl_x86_64.S b/compiler-rt/lib/asan/asan_rtl_x86_64.S
new file mode 100644
index 000000000000..d27db745ed67
--- /dev/null
+++ b/compiler-rt/lib/asan/asan_rtl_x86_64.S
@@ -0,0 +1,146 @@
+#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__x86_64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+.section .text
+.file "asan_rtl_x86_64.S"
+
+#define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg
+
+#define FNAME(reg, op, s, i) NAME(__asan_check, reg, op, s, i)
+#define RLABEL(reg, op, s, i) NAME(.return, reg, op, s, i)
+#define CLABEL(reg, op, s, i) NAME(.check, reg, op, s, i)
+#define FLABEL(reg, op, s, i) NAME(.fail, reg, op, s, i)
+
+#define BEGINF(reg, op, s, i) \
+.globl FNAME(reg, op, s, i) ;\
+.hidden FNAME(reg, op, s, i) ;\
+ASM_TYPE_FUNCTION(FNAME(reg, op, s, i)) ;\
+.cfi_startproc ;\
+FNAME(reg, op, s, i): ;\
+
+#define ENDF .cfi_endproc ;\
+
+// Access check functions for 1,2 and 4 byte types, which require extra checks.
+#define ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, s) \
+ mov %##reg,%r10 ;\
+ shr $0x3,%r10 ;\
+ movsbl ASAN_SHADOW_OFFSET_CONST(%r10),%r10d ;\
+ test %r10d,%r10d ;\
+ jne CLABEL(reg, op, s, add) ;\
+RLABEL(reg, op, s, add): ;\
+ retq ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \
+CLABEL(reg, op, 1, i): ;\
+ push %rcx ;\
+ mov %##reg,%rcx ;\
+ and $0x7,%ecx ;\
+ cmp %r10d,%ecx ;\
+ pop %rcx ;\
+ jl RLABEL(reg, op, 1, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##1@PLT ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \
+CLABEL(reg, op, 2, i): ;\
+ push %rcx ;\
+ mov %##reg,%rcx ;\
+ and $0x7,%ecx ;\
+ add $0x1,%ecx ;\
+ cmp %r10d,%ecx ;\
+ pop %rcx ;\
+ jl RLABEL(reg, op, 2, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##2@PLT ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \
+CLABEL(reg, op, 4, i): ;\
+ push %rcx ;\
+ mov %##reg,%rcx ;\
+ and $0x7,%ecx ;\
+ add $0x3,%ecx ;\
+ cmp %r10d,%ecx ;\
+ pop %rcx ;\
+ jl RLABEL(reg, op, 4, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##4@PLT ;\
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, op) \
+BEGINF(reg, op, 1, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 1) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, op) \
+BEGINF(reg, op, 2, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 2) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, op) \
+BEGINF(reg, op, 4, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 4) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, add) ;\
+ENDF
+
+// Access check functions for 8 and 16 byte types: no extra checks required.
+#define ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, s, c) \
+ mov %##reg,%r10 ;\
+ shr $0x3,%r10 ;\
+ ##c $0x0,ASAN_SHADOW_OFFSET_CONST(%r10) ;\
+ jne FLABEL(reg, op, s, add) ;\
+ retq ;\
+
+#define ASAN_MEMORY_ACCESS_FAIL(reg, op, s, i) \
+FLABEL(reg, op, s, i): ;\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##s@PLT;\
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, op) \
+BEGINF(reg, op, 8, add) ;\
+ ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 8, cmpb) ;\
+ ASAN_MEMORY_ACCESS_FAIL(reg, op, 8, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, op) \
+BEGINF(reg, op, 16, add) ;\
+ ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 16, cmpw) ;\
+ ASAN_MEMORY_ACCESS_FAIL(reg, op, 16, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, store) \
+
+
+// Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with
+// the intrinsic, which guarantees that the code generation will never emit
+// R10 or R11 callback.
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15)
+
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp
index 930139968ec3..2b06c3c4e7c0 100644
--- a/compiler-rt/lib/asan/asan_thread.cpp
+++ b/compiler-rt/lib/asan/asan_thread.cpp
@@ -305,7 +305,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
uptr stack_size = 0;
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_begin_, &tls_size);
- stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
+ stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
@@ -321,8 +321,8 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_) {
- uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
- uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
+ uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY);
+ uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
tls_end_ - tls_begin_aligned,
tls_end_aligned - tls_end_, 0);
@@ -346,27 +346,27 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
return true;
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
- uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
+ uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
- mem_ptr -= SHADOW_GRANULARITY;
+ mem_ptr -= ASAN_SHADOW_GRANULARITY;
}
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
- mem_ptr -= SHADOW_GRANULARITY;
+ mem_ptr -= ASAN_SHADOW_GRANULARITY;
}
if (shadow_ptr < shadow_bottom) {
return false;
}
- uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
+ uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY);
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
diff --git a/compiler-rt/lib/asan/asan_win.cpp b/compiler-rt/lib/asan/asan_win.cpp
index 1577c83cf994..53a0e3bfd385 100644
--- a/compiler-rt/lib/asan/asan_win.cpp
+++ b/compiler-rt/lib/asan/asan_win.cpp
@@ -253,7 +253,7 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
diff --git a/compiler-rt/lib/builtins/cpu_model.c b/compiler-rt/lib/builtins/cpu_model.c
index 53e2d89708dc..cf12aa021d3d 100644
--- a/compiler-rt/lib/builtins/cpu_model.c
+++ b/compiler-rt/lib/builtins/cpu_model.c
@@ -798,6 +798,10 @@ _Bool __aarch64_have_lse_atomics
#ifndef HWCAP_ATOMICS
#define HWCAP_ATOMICS (1 << 8)
#endif
+#if defined(__ANDROID__)
+#include <string.h>
+#include <sys/system_properties.h>
+#endif
static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
#if defined(__FreeBSD__)
unsigned long hwcap;
@@ -805,8 +809,20 @@ static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
__aarch64_have_lse_atomics = result == 0 && (hwcap & HWCAP_ATOMICS) != 0;
#else
unsigned long hwcap = getauxval(AT_HWCAP);
- __aarch64_have_lse_atomics = (hwcap & HWCAP_ATOMICS) != 0;
-#endif
+ _Bool result = (hwcap & HWCAP_ATOMICS) != 0;
+#if defined(__ANDROID__)
+ if (result) {
+ char arch[PROP_VALUE_MAX];
+ if (__system_property_get("ro.arch", arch) > 0 &&
+ strncmp(arch, "exynos9810", sizeof("exynos9810") - 1) == 0) {
+ // Some cores of Exynos 9810 are ARMv8.2 and others are ARMv8.0,
+ // so disable the lse atomics completely.
+ result = false;
+ }
+ }
+#endif // defined(__ANDROID__)
+ __aarch64_have_lse_atomics = result;
+#endif // defined(__FreeBSD__)
}
#endif // defined(__has_include)
#endif // __has_include(<sys/auxv.h>)
diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp
index ce2c04df83a8..ee7221c7b9a8 100644
--- a/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/compiler-rt/lib/dfsan/dfsan.cpp
@@ -630,22 +630,16 @@ void PrintInvalidOriginWarning(dfsan_label label, const void *address) {
d.Warning(), label, address, d.Default());
}
-bool PrintOriginTraceToStr(const void *addr, const char *description,
- InternalScopedString *out) {
- CHECK(out);
- CHECK(dfsan_get_track_origins());
+void PrintInvalidOriginIdWarning(dfsan_origin origin) {
Decorator d;
+ Printf(
+ " %sOrigin Id %d has invalid origin tracking. This can "
+ "be a DFSan bug.%s\n",
+ d.Warning(), origin, d.Default());
+}
- const dfsan_label label = *__dfsan::shadow_for(addr);
- CHECK(label);
-
- const dfsan_origin origin = *__dfsan::origin_for(addr);
-
- out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
- d.Origin(), label, addr, description ? description : "",
- d.Default());
-
- Origin o = Origin::FromRawId(origin);
+bool PrintOriginTraceFramesToStr(Origin o, InternalScopedString *out) {
+ Decorator d;
bool found = false;
while (o.isChainedOrigin()) {
@@ -668,6 +662,25 @@ bool PrintOriginTraceToStr(const void *addr, const char *description,
return found;
}
+bool PrintOriginTraceToStr(const void *addr, const char *description,
+ InternalScopedString *out) {
+ CHECK(out);
+ CHECK(dfsan_get_track_origins());
+ Decorator d;
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ CHECK(label);
+
+ const dfsan_origin origin = *__dfsan::origin_for(addr);
+
+ out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
+ d.Origin(), label, addr, description ? description : "",
+ d.Default());
+
+ Origin o = Origin::FromRawId(origin);
+ return PrintOriginTraceFramesToStr(o, out);
+}
+
} // namespace
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace(
@@ -725,6 +738,50 @@ dfsan_sprint_origin_trace(const void *addr, const char *description,
return trace.length();
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_id_trace(
+ dfsan_origin origin) {
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return;
+ }
+ Origin o = Origin::FromRawId(origin);
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceFramesToStr(o, &trace);
+
+ if (trace.length())
+ Printf("%s", trace.data());
+
+ if (!success)
+ PrintInvalidOriginIdWarning(origin);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr dfsan_sprint_origin_id_trace(
+ dfsan_origin origin, char *out_buf, uptr out_buf_size) {
+ CHECK(out_buf);
+
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return 0;
+ }
+ Origin o = Origin::FromRawId(origin);
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceFramesToStr(o, &trace);
+
+ if (!success) {
+ PrintInvalidOriginIdWarning(origin);
+ return 0;
+ }
+
+ if (out_buf_size) {
+ internal_strncpy(out_buf, trace.data(), out_buf_size - 1);
+ out_buf[out_buf_size - 1] = '\0';
+ }
+
+ return trace.length();
+}
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
dfsan_get_init_origin(const void *addr) {
if (!dfsan_get_track_origins())
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index b2e94564446e..c50aee7a55a0 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -87,6 +87,12 @@ static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) {
BufferedStackTrace stack;
ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportRssLimitExceeded(&stack);
+ }
DFsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
diff --git a/compiler-rt/lib/dfsan/done_abilist.txt b/compiler-rt/lib/dfsan/done_abilist.txt
index eef7c48948cc..fc2dd02ccf5f 100644
--- a/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/compiler-rt/lib/dfsan/done_abilist.txt
@@ -30,12 +30,18 @@ fun:dfsan_flush=uninstrumented
fun:dfsan_flush=discard
fun:dfsan_print_origin_trace=uninstrumented
fun:dfsan_print_origin_trace=discard
+fun:dfsan_print_origin_id_trace=uninstrumented
+fun:dfsan_print_origin_id_trace=discard
fun:dfsan_sprint_origin_trace=uninstrumented
fun:dfsan_sprint_origin_trace=discard
+fun:dfsan_sprint_origin_id_trace=uninstrumented
+fun:dfsan_sprint_origin_id_trace=discard
fun:dfsan_sprint_stack_trace=uninstrumented
fun:dfsan_sprint_stack_trace=discard
fun:dfsan_get_origin=uninstrumented
fun:dfsan_get_origin=custom
+fun:dfsan_read_origin_of_first_taint=uninstrumented
+fun:dfsan_read_origin_of_first_taint=discard
fun:dfsan_get_init_origin=uninstrumented
fun:dfsan_get_init_origin=discard
fun:dfsan_get_track_origins=uninstrumented
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 9e1729964e27..84e183f2384f 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -132,6 +132,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
+ }
alignment = Max(alignment, kShadowAlignment);
uptr size = TaggedSize(orig_size);
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index f96ed8804102..8dc886e587e7 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -47,6 +47,12 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
return res;
}
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return REAL(pthread_join)(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
@@ -189,7 +195,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(vfork);
#endif // __linux__
INTERCEPT_FUNCTION(pthread_create);
-#endif
+ INTERCEPT_FUNCTION(pthread_join);
+# endif
inited = 1;
}
diff --git a/compiler-rt/lib/lsan/lsan.h b/compiler-rt/lib/lsan/lsan.h
index 1e82ad72f005..af8efa6153a5 100644
--- a/compiler-rt/lib/lsan/lsan.h
+++ b/compiler-rt/lib/lsan/lsan.h
@@ -13,17 +13,17 @@
#include "lsan_thread.h"
#if SANITIZER_POSIX
-#include "lsan_posix.h"
+# include "lsan_posix.h"
#elif SANITIZER_FUCHSIA
-#include "lsan_fuchsia.h"
+# include "lsan_fuchsia.h"
#endif
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#define GET_STACK_TRACE(max_size, fast) \
- __sanitizer::BufferedStackTrace stack; \
- stack.Unwind(StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), nullptr, fast, max_size);
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \
+ max_size);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@@ -40,11 +40,12 @@ void InitializeInterceptors();
void ReplaceSystemMalloc();
void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
+#define ENSURE_LSAN_INITED \
+ do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+ } while (0)
} // namespace __lsan
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 91e34ebb3214..ea4c6c9cf647 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -27,11 +27,11 @@ extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
#if defined(__i386__) || defined(__arm__)
-static const uptr kMaxAllowedMallocSize = 1UL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 30;
#elif defined(__mips64) || defined(__aarch64__)
-static const uptr kMaxAllowedMallocSize = 4UL << 30;
+static const uptr kMaxAllowedMallocSize = 4ULL << 30;
#else
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kMaxAllowedMallocSize = 8ULL << 30;
#endif
static Allocator allocator;
@@ -88,6 +88,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1;
if (size > max_malloc_size)
return ReportAllocationSizeTooBig(size, stack);
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(&stack);
+ }
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
index 308dbb3e41da..fd7aa38d99db 100644
--- a/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/compiler-rt/lib/lsan/lsan_common.cpp
@@ -34,7 +34,6 @@ Mutex global_mutex;
Flags lsan_flags;
-
void DisableCounterUnderflow() {
if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
@@ -43,44 +42,48 @@ void DisableCounterUnderflow() {
}
void Flags::SetDefaults() {
-#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
-#include "lsan_flags.inc"
-#undef LSAN_FLAG
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
}
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
-#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
- RegisterFlag(parser, #Name, Description, &f->Name);
-#include "lsan_flags.inc"
-#undef LSAN_FLAG
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
}
-#define LOG_POINTERS(...) \
- do { \
- if (flags()->log_pointers) Report(__VA_ARGS__); \
- } while (0)
+# define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) \
+ Report(__VA_ARGS__); \
+ } while (0)
-#define LOG_THREADS(...) \
- do { \
- if (flags()->log_threads) Report(__VA_ARGS__); \
- } while (0)
+# define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) \
+ Report(__VA_ARGS__); \
+ } while (0)
class LeakSuppressionContext {
bool parsed = false;
SuppressionContext context;
bool suppressed_stacks_sorted = true;
InternalMmapVector<u32> suppressed_stacks;
+ const LoadedModule *suppress_module = nullptr;
- Suppression *GetSuppressionForAddr(uptr addr);
void LazyInit();
+ Suppression *GetSuppressionForAddr(uptr addr);
+ bool SuppressInvalid(const StackTrace &stack);
+ bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
public:
LeakSuppressionContext(const char *supprression_types[],
int suppression_types_num)
: context(supprression_types, suppression_types_num) {}
- Suppression *GetSuppressionForStack(u32 stack_trace_id,
- const StackTrace &stack);
+ bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
if (!suppressed_stacks_sorted) {
@@ -95,17 +98,17 @@ class LeakSuppressionContext {
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
-static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char *kSuppressionTypes[] = {kSuppressionLeak};
static const char kStdSuppressions[] =
-#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// definition.
"leak:*pthread_exit*\n"
-#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
-#if SANITIZER_MAC
+# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_MAC
// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
"leak:*_os_trace*\n"
-#endif
+# endif
// TLS leak in some glibc versions, described in
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
"leak:*tls_get_addr*\n";
@@ -123,9 +126,92 @@ void LeakSuppressionContext::LazyInit() {
if (&__lsan_default_suppressions)
context.Parse(__lsan_default_suppressions());
context.Parse(kStdSuppressions);
+ if (flags()->use_tls && flags()->use_ld_allocations)
+ suppress_module = GetLinker();
}
}
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ if (const char *module_name =
+ Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
+ if (context.Match(module_name, kSuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
+ }
+ frames->ClearAll();
+ return s;
+}
+
+static uptr GetCallerPC(const StackTrace &stack) {
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
+ uptr caller_pc = GetCallerPC(stack);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ return !caller_pc ||
+ (suppress_module && suppress_module->containsAddress(caller_pc));
+}
+
+bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
+ uptr hit_count, uptr total_size) {
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+ if (s) {
+ s->weight += total_size;
+ atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
+ uptr total_size) {
+ LazyInit();
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
+ return false;
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return true;
+}
+
static LeakSuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx);
return suppression_ctx;
@@ -146,9 +232,9 @@ void InitCommonLsan() {
}
}
-class Decorator: public __sanitizer::SanitizerCommonDecorator {
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : SanitizerCommonDecorator() { }
+ Decorator() : SanitizerCommonDecorator() {}
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
};
@@ -157,19 +243,19 @@ static inline bool CanBeAHeapPointer(uptr p) {
// Since our heap is located in mmap-ed memory, we can assume a sensible lower
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
- if (p < kMinAddress) return false;
-#if defined(__x86_64__)
+ if (p < kMinAddress)
+ return false;
+# if defined(__x86_64__)
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
-#elif defined(__mips64)
+# elif defined(__mips64)
return ((p >> 40) == 0);
-#elif defined(__aarch64__)
- unsigned runtimeVMA =
- (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+# elif defined(__aarch64__)
+ unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
return ((p >> runtimeVMA) == 0);
-#else
+# else
return true;
-#endif
+# endif
}
// Scans the memory range, looking for byte patterns that point into allocator
@@ -178,8 +264,7 @@ static inline bool CanBeAHeapPointer(uptr p) {
// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
-void ScanRangeForPointers(uptr begin, uptr end,
- Frontier *frontier,
+void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag) {
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
@@ -190,13 +275,17 @@ void ScanRangeForPointers(uptr begin, uptr end,
pp = pp + alignment - pp % alignment;
for (; pp + sizeof(void *) <= end; pp += alignment) {
void *p = *reinterpret_cast<void **>(pp);
- if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
+ if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
+ continue;
uptr chunk = PointsIntoChunk(p);
- if (!chunk) continue;
+ if (!chunk)
+ continue;
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
- if (chunk == begin) continue;
+ if (chunk == begin)
+ continue;
LsanMetadata m(chunk);
- if (m.tag() == kReachable || m.tag() == kIgnored) continue;
+ if (m.tag() == kReachable || m.tag() == kIgnored)
+ continue;
// Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
@@ -234,23 +323,23 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
}
}
-void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
+void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
}
-#if SANITIZER_FUCHSIA
+# if SANITIZER_FUCHSIA
// Fuchsia handles all threads together with its own callback.
static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
-#else
+# else
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
// FIXME: Move this out into *libcdep.cpp
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
pid_t, void (*cb)(void *, void *, uptr, void *), void *);
-#endif
+# endif
static void ProcessThreadRegistry(Frontier *frontier) {
InternalMmapVector<uptr> ptrs;
@@ -282,9 +371,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
LOG_THREADS("Processing thread %llu.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
- bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
- &tls_begin, &tls_end,
- &cache_begin, &cache_end, &dtls);
+ bool thread_found =
+ GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
+ &tls_end, &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
@@ -298,7 +387,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Report("Unable to get registers from thread %llu.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
// GetRegistersAndSP failed with ESRCH.
- if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
+ continue;
sp = stack_begin;
}
@@ -353,7 +443,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
void *arg) -> void {
ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
@@ -366,7 +456,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// thread is suspended in the middle of updating its DTLS. IOWs, we
// could scan already freed memory. (probably fine for now)
__libc_iterate_dynamic_tls(os_id, cb, frontier);
-#else
+# else
if (dtls && !DTLSInDestruction(dtls)) {
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtv.beg;
@@ -383,7 +473,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// this and continue.
LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
}
-#endif
+# endif
}
}
@@ -391,13 +481,14 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
ProcessThreadRegistry(frontier);
}
-#endif // SANITIZER_FUCHSIA
+# endif // SANITIZER_FUCHSIA
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
uptr region_begin, uptr region_end, bool is_readable) {
uptr intersection_begin = Max(root_region.begin, region_begin);
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
- if (intersection_begin >= intersection_end) return;
+ if (intersection_begin >= intersection_end)
+ return;
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
(void *)root_region.begin,
(void *)(root_region.begin + root_region.size),
@@ -420,7 +511,8 @@ static void ProcessRootRegion(Frontier *frontier,
// Scans root regions for heap pointers.
static void ProcessRootRegions(Frontier *frontier) {
- if (!flags()->use_root_regions) return;
+ if (!flags()->use_root_regions)
+ return;
for (uptr i = 0; i < root_regions.size(); i++)
ProcessRootRegion(frontier, root_regions[i]);
}
@@ -477,68 +569,6 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
}
}
-static uptr GetCallerPC(const StackTrace &stack) {
- // The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (stack.size >= 2)
- return stack.trace[1];
- return 0;
-}
-
-struct InvalidPCParam {
- Frontier *frontier;
- bool skip_linker_allocations;
-};
-
-// ForEachChunk callback. If the caller pc is invalid or is within the linker,
-// mark as reachable. Called by ProcessPlatformSpecificAllocations.
-static void MarkInvalidPCCb(uptr chunk, void *arg) {
- CHECK(arg);
- InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
- u32 stack_id = m.stack_trace_id();
- uptr caller_pc = 0;
- if (stack_id > 0)
- caller_pc = GetCallerPC(StackDepotGet(stack_id));
- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
- // it as reachable, as we can't properly report its allocation stack anyway.
- if (caller_pc == 0 || (param->skip_linker_allocations &&
- GetLinker()->containsAddress(caller_pc))) {
- m.set_tag(kReachable);
- param->frontier->push_back(chunk);
- }
- }
-}
-
-// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
-// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
-// modules accounting etc.
-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
-// They are allocated with a __libc_memalign() call in allocate_and_init()
-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
-// blocks, but we can make sure they come from our own allocator by intercepting
-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
-// addresses are stored in a dynamically allocated array (the DTV) which is
-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
-// being reachable from the static TLS, and the dynamic TLS being reachable from
-// the DTV. This is because the initial DTV is allocated before our interception
-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
-// can't special-case it either, since we don't know its size.
-// Our solution is to include in the root set all allocations made from
-// ld-linux.so (which is where allocate_and_init() is implemented). This is
-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
-// which we don't care about).
-// On all other platforms, this simply checks to ensure that the caller pc is
-// valid before reporting chunks as leaked.
-static void ProcessPC(Frontier *frontier) {
- InvalidPCParam arg;
- arg.frontier = frontier;
- arg.skip_linker_allocations =
- flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
- ForEachChunk(MarkInvalidPCCb, &arg);
-}
-
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
@@ -554,9 +584,6 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
ProcessRootRegions(frontier);
FloodFillTag(frontier, kReachable);
- CHECK_EQ(0, frontier->size());
- ProcessPC(frontier);
-
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
@@ -583,14 +610,13 @@ static void ResetTagsCb(uptr chunk, void *arg) {
// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
- LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+ LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
- m.tag());
- }
+ if (!m.allocated())
+ return;
+ if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
+ leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
}
void LeakSuppressionContext::PrintMatchedSuppressions() {
@@ -622,13 +648,13 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
}
}
-#if SANITIZER_FUCHSIA
+# if SANITIZER_FUCHSIA
// Fuchsia provides a libc interface that guarantees all threads are
// covered, and SuspendedThreadList is never really used.
static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
-#else // !SANITIZER_FUCHSIA
+# else // !SANITIZER_FUCHSIA
static void ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
@@ -642,7 +668,7 @@ static void ReportUnsuspendedThreads(
&ReportIfNotSuspended, &threads);
}
-#endif // !SANITIZER_FUCHSIA
+# endif // !SANITIZER_FUCHSIA
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
@@ -651,7 +677,7 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
CHECK(!param->success);
ReportUnsuspendedThreads(suspended_threads);
ClassifyAllChunks(suspended_threads, &param->frontier);
- ForEachChunk(CollectLeaksCb, &param->leak_report);
+ ForEachChunk(CollectLeaksCb, &param->leaks);
// Clean up for subsequent leak checks. This assumes we did not overwrite any
// kIgnored tags.
ForEachChunk(ResetTagsCb, nullptr);
@@ -700,17 +726,20 @@ static bool CheckForLeaks() {
"etc)\n");
Die();
}
+ LeakReport leak_report;
+ leak_report.AddLeakedChunks(param.leaks);
+
// No new suppressions stacks, so rerun will not help and we can report.
- if (!param.leak_report.ApplySuppressions())
- return PrintResults(param.leak_report);
+ if (!leak_report.ApplySuppressions())
+ return PrintResults(leak_report);
// No indirect leaks to report, so we are done here.
- if (!param.leak_report.IndirectUnsuppressedLeakCount())
- return PrintResults(param.leak_report);
+ if (!leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(leak_report);
if (i >= 8) {
Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
- return PrintResults(param.leak_report);
+ return PrintResults(leak_report);
}
// We found a new previously unseen suppressed call stack. Rerun to make
@@ -726,10 +755,12 @@ bool HasReportedLeaks() { return has_reported_leaks; }
void DoLeakCheck() {
Lock l(&global_mutex);
static bool already_done;
- if (already_done) return;
+ if (already_done)
+ return;
already_done = true;
has_reported_leaks = CheckForLeaks();
- if (has_reported_leaks) HandleLeaks();
+ if (has_reported_leaks)
+ HandleLeaks();
}
static int DoRecoverableLeakCheck() {
@@ -740,80 +771,50 @@ static int DoRecoverableLeakCheck() {
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
-Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
- Suppression *s = nullptr;
-
- // Suppress by module name.
- if (const char *module_name =
- Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
- if (context.Match(module_name, kSuppressionLeak, &s))
- return s;
-
- // Suppress by file or function name.
- SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
- context.Match(cur->info.file, kSuppressionLeak, &s)) {
- break;
- }
- }
- frames->ClearAll();
- return s;
-}
-
-Suppression *LeakSuppressionContext::GetSuppressionForStack(
- u32 stack_trace_id, const StackTrace &stack) {
- LazyInit();
- for (uptr i = 0; i < stack.size; i++) {
- Suppression *s = GetSuppressionForAddr(
- StackTrace::GetPreviousInstructionPc(stack.trace[i]));
- if (s) {
- suppressed_stacks_sorted = false;
- suppressed_stacks.push_back(stack_trace_id);
- return s;
- }
- }
- return nullptr;
-}
-
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
// in real-world applications.
-// FIXME: Get rid of this limit by changing the implementation of LeakReport to
-// use a hash table.
+// FIXME: Get rid of this limit by moving logic into DedupLeaks.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
- uptr leaked_size, ChunkTag tag) {
- CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
+ for (const LeakedChunk &leak : chunks) {
+ uptr chunk = leak.chunk;
+ u32 stack_trace_id = leak.stack_trace_id;
+ uptr leaked_size = leak.leaked_size;
+ ChunkTag tag = leak.tag;
+ CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
- if (u32 resolution = flags()->resolution) {
- StackTrace stack = StackDepotGet(stack_trace_id);
- stack.size = Min(stack.size, resolution);
- stack_trace_id = StackDepotPut(stack);
- }
+ if (u32 resolution = flags()->resolution) {
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
+ }
- bool is_directly_leaked = (tag == kDirectlyLeaked);
- uptr i;
- for (i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].stack_trace_id == stack_trace_id &&
- leaks_[i].is_directly_leaked == is_directly_leaked) {
- leaks_[i].hit_count++;
- leaks_[i].total_size += leaked_size;
- break;
+ bool is_directly_leaked = (tag == kDirectlyLeaked);
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].stack_trace_id == stack_trace_id &&
+ leaks_[i].is_directly_leaked == is_directly_leaked) {
+ leaks_[i].hit_count++;
+ leaks_[i].total_size += leaked_size;
+ break;
+ }
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered)
+ return;
+ Leak leak = {next_id_++, /* hit_count */ 1,
+ leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false};
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
}
- }
- if (i == leaks_.size()) {
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
- }
- if (flags()->report_objects) {
- LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
- leaked_objects_.push_back(obj);
}
}
@@ -828,9 +829,10 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
- Printf("Too many leaks! Only the first %zu leaks encountered will be "
- "reported.\n",
- kMaxLeaksConsidered);
+ Printf(
+ "Too many leaks! Only the first %zu leaks encountered will be "
+ "reported.\n",
+ kMaxLeaksConsidered);
uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
@@ -838,10 +840,12 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
Sort(leaks_.data(), leaks_.size(), &LeakComparator);
uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].is_suppressed) continue;
+ if (leaks_[i].is_suppressed)
+ continue;
PrintReportForLeak(i);
leaks_reported++;
- if (leaks_reported == num_leaks_to_report) break;
+ if (leaks_reported == num_leaks_to_report)
+ break;
}
if (leaks_reported < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_reported;
@@ -880,9 +884,10 @@ void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].is_suppressed) continue;
- bytes += leaks_[i].total_size;
- allocations += leaks_[i].hit_count;
+ if (leaks_[i].is_suppressed)
+ continue;
+ bytes += leaks_[i].total_size;
+ allocations += leaks_[i].hit_count;
}
InternalScopedString summary;
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
@@ -894,12 +899,8 @@ uptr LeakReport::ApplySuppressions() {
LeakSuppressionContext *suppressions = GetSuppressionContext();
uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) {
- Suppression *s = suppressions->GetSuppressionForStack(
- leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id));
- if (s) {
- s->weight += leaks_[i].total_size;
- atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
- leaks_[i].hit_count);
+ if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
+ leaks_[i].total_size)) {
leaks_[i].is_suppressed = true;
++new_suppressions;
}
@@ -910,7 +911,8 @@ uptr LeakReport::ApplySuppressions() {
uptr LeakReport::UnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) result++;
+ if (!leaks_[i].is_suppressed)
+ result++;
return result;
}
@@ -922,16 +924,16 @@ uptr LeakReport::IndirectUnsuppressedLeakCount() {
return result;
}
-} // namespace __lsan
-#else // CAN_SANITIZE_LEAKS
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
namespace __lsan {
-void InitCommonLsan() { }
-void DoLeakCheck() { }
-void DoRecoverableLeakCheckVoid() { }
-void DisableInThisThread() { }
-void EnableInThisThread() { }
-}
-#endif // CAN_SANITIZE_LEAKS
+void InitCommonLsan() {}
+void DoLeakCheck() {}
+void DoRecoverableLeakCheckVoid() {}
+void DisableInThisThread() {}
+void EnableInThisThread() {}
+} // namespace __lsan
+#endif // CAN_SANITIZE_LEAKS
using namespace __lsan;
@@ -948,11 +950,13 @@ void __lsan_ignore_object(const void *p) {
if (res == kIgnoreObjectInvalid)
VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
if (res == kIgnoreObjectAlreadyIgnored)
- VReport(1, "__lsan_ignore_object(): "
- "heap object at %p is already being ignored\n", p);
+ VReport(1,
+ "__lsan_ignore_object(): "
+ "heap object at %p is already being ignored\n",
+ p);
if (res == kIgnoreObjectSuccess)
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -962,7 +966,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
RootRegion region = {reinterpret_cast<uptr>(begin), size};
root_regions.push_back(region);
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -988,7 +992,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
begin, size);
Die();
}
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -1010,7 +1014,7 @@ void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
__lsan::DoLeakCheck();
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -1018,7 +1022,7 @@ int __lsan_do_recoverable_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
return __lsan::DoRecoverableLeakCheck();
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
return 0;
}
@@ -1027,14 +1031,14 @@ SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-int __lsan_is_turned_off() {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
+__lsan_is_turned_off() {
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__lsan_default_suppressions() {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
+__lsan_default_suppressions() {
return "";
}
#endif
-} // extern "C"
+} // extern "C"
diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h
index f9b55e4e8006..61b64d4dc30f 100644
--- a/compiler-rt/lib/lsan/lsan_common.h
+++ b/compiler-rt/lib/lsan/lsan_common.h
@@ -33,21 +33,21 @@
// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
// is missing. This caused a link error.
#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
-#define CAN_SANITIZE_LEAKS 0
+# define CAN_SANITIZE_LEAKS 0
#elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390x__))
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && SANITIZER_LINUX
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_RISCV64 && SANITIZER_LINUX
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#else
-#define CAN_SANITIZE_LEAKS 0
+# define CAN_SANITIZE_LEAKS 0
#endif
namespace __sanitizer {
@@ -82,6 +82,15 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
+struct LeakedChunk {
+ uptr chunk;
+ u32 stack_trace_id;
+ uptr leaked_size;
+ ChunkTag tag;
+};
+
+using LeakedChunks = InternalMmapVector<LeakedChunk>;
+
struct Leak {
u32 id;
uptr hit_count;
@@ -101,8 +110,7 @@ struct LeakedObject {
class LeakReport {
public:
LeakReport() {}
- void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
- ChunkTag tag);
+ void AddLeakedChunks(const LeakedChunks &chunks);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
uptr ApplySuppressions();
@@ -136,7 +144,7 @@ struct RootRegion {
// threads and enumerating roots.
struct CheckForLeaksParam {
Frontier frontier;
- LeakReport leak_report;
+ LeakedChunks leaks;
bool success = false;
};
@@ -224,6 +232,22 @@ bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
+
+struct ScopedStopTheWorldLock {
+ ScopedStopTheWorldLock() {
+ LockThreadRegistry();
+ LockAllocator();
+ }
+
+ ~ScopedStopTheWorldLock() {
+ UnlockAllocator();
+ UnlockThreadRegistry();
+ }
+
+ ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
+ ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
+};
+
ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
diff --git a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
index 2d35fa5b1cff..e5e8cd2b4c23 100644
--- a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
+++ b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -58,8 +58,7 @@ int ExitHook(int status) {
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
CheckForLeaksParam *argument) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
struct Params {
InternalMmapVector<uptr> allocator_caches;
@@ -149,9 +148,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
params->callback(SuspendedThreadsListFuchsia(), params->argument);
},
&params);
-
- UnlockAllocator();
- UnlockThreadRegistry();
}
} // namespace __lsan
diff --git a/compiler-rt/lib/lsan/lsan_common_linux.cpp b/compiler-rt/lib/lsan/lsan_common_linux.cpp
index 3af586e220f6..692ad35169e1 100644
--- a/compiler-rt/lib/lsan/lsan_common_linux.cpp
+++ b/compiler-rt/lib/lsan/lsan_common_linux.cpp
@@ -122,12 +122,9 @@ void HandleLeaks() {
static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info,
size_t size, void *data) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
StopTheWorld(param->callback, param->argument);
- UnlockAllocator();
- UnlockThreadRegistry();
return 1;
}
diff --git a/compiler-rt/lib/lsan/lsan_common_mac.cpp b/compiler-rt/lib/lsan/lsan_common_mac.cpp
index 4301dcc615d7..6e8a6dfe16b4 100644
--- a/compiler-rt/lib/lsan/lsan_common_mac.cpp
+++ b/compiler-rt/lib/lsan/lsan_common_mac.cpp
@@ -195,11 +195,8 @@ void HandleLeaks() {}
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
CheckForLeaksParam *argument) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
StopTheWorld(callback, argument);
- UnlockAllocator();
- UnlockThreadRegistry();
}
} // namespace __lsan
diff --git a/compiler-rt/lib/lsan/lsan_interceptors.cpp b/compiler-rt/lib/lsan/lsan_interceptors.cpp
index ee723f210c9d..205e85685a7f 100644
--- a/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -479,6 +479,12 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
return res;
}
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return REAL(pthread_join)(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
@@ -511,6 +517,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index 059ce283b8c9..adbdf365bc4c 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -218,7 +218,6 @@ struct Allocator {
AllocatorCache fallback_allocator_cache;
uptr max_user_defined_malloc_size;
- atomic_uint8_t rss_limit_exceeded;
// Holds the mapping of stack ids to MemInfoBlocks.
MIBMapTy MIBMap;
@@ -301,20 +300,12 @@ struct Allocator {
: kMaxAllowedMallocSize;
}
- bool RssLimitExceeded() {
- return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
- }
-
- void SetRssLimitExceeded(bool limit_exceeded) {
- atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
- }
-
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type) {
if (UNLIKELY(!memprof_inited))
MemprofInitFromRtl();
- if (RssLimitExceeded()) {
+ if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
return nullptr;
ReportRssLimitExceeded(stack);
@@ -662,10 +653,6 @@ uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
return usable_size;
}
-void MemprofSoftRssLimitExceededCallback(bool limit_exceeded) {
- instance.SetRssLimitExceeded(limit_exceeded);
-}
-
} // namespace __memprof
// ---------------------- Interface ---------------- {{{1
diff --git a/compiler-rt/lib/memprof/memprof_allocator.h b/compiler-rt/lib/memprof/memprof_allocator.h
index f1438baaa20e..001502cde08a 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.h
+++ b/compiler-rt/lib/memprof/memprof_allocator.h
@@ -98,7 +98,6 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
void PrintInternalAllocatorStats();
-void MemprofSoftRssLimitExceededCallback(bool exceeded);
} // namespace __memprof
#endif // MEMPROF_ALLOCATOR_H
diff --git a/compiler-rt/lib/memprof/memprof_rtl.cpp b/compiler-rt/lib/memprof/memprof_rtl.cpp
index fb2ef37e51a2..c3d1c5f096fb 100644
--- a/compiler-rt/lib/memprof/memprof_rtl.cpp
+++ b/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -133,13 +133,6 @@ void PrintAddressSpaceLayout() {
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
}
-static bool UNUSED __local_memprof_dyninit = [] {
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(MemprofSoftRssLimitExceededCallback);
-
- return false;
-}();
-
static void MemprofInitInternal() {
if (LIKELY(memprof_inited))
return;
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index a97bd8371e08..dc006457a59f 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -160,6 +160,11 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
}
ReportAllocationSizeTooBig(size, max_malloc_size, stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
+ }
MsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp
index eaa3b3ae9404..df0cdecf79c7 100644
--- a/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -1065,6 +1065,8 @@ INTERCEPTOR(int, pthread_join, void *th, void **retval) {
return res;
}
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
extern char *tzname[2];
INTERCEPTOR(void, tzset, int fake) {
@@ -1705,6 +1707,7 @@ void InitializeInterceptors() {
#else
INTERCEPT_FUNCTION(pthread_create);
#endif
+ INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(pthread_key_create);
#if SANITIZER_NETBSD
diff --git a/compiler-rt/lib/profile/InstrProfiling.c b/compiler-rt/lib/profile/InstrProfiling.c
index 6df65f66df73..7c1d357d96fe 100644
--- a/compiler-rt/lib/profile/InstrProfiling.c
+++ b/compiler-rt/lib/profile/InstrProfiling.c
@@ -38,7 +38,7 @@ __llvm_profile_get_num_padding_bytes(uint64_t SizeInBytes) {
}
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_version(void) {
- return __llvm_profile_raw_version;
+ return INSTR_PROF_RAW_VERSION_VAR;
}
COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
diff --git a/compiler-rt/lib/profile/InstrProfilingMerge.c b/compiler-rt/lib/profile/InstrProfilingMerge.c
index 80db2527461e..bf99521d4da7 100644
--- a/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -95,6 +95,14 @@ static uintptr_t signextIfWin64(void *V) {
COMPILER_RT_VISIBILITY
int __llvm_profile_merge_from_buffer(const char *ProfileData,
uint64_t ProfileSize) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_DBG_CORRELATE) {
+ PROF_ERR(
+ "%s\n",
+ "Debug info correlation does not support profile merging at runtime. "
+ "Instead, merge raw profiles using the llvm-profdata tool.");
+ return 1;
+ }
+
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
uint64_t *SrcCountersStart;
diff --git a/compiler-rt/lib/profile/InstrProfilingWriter.c b/compiler-rt/lib/profile/InstrProfilingWriter.c
index 9cb05570989d..e5c0dc1479d9 100644
--- a/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -259,16 +259,19 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
const uint64_t *CountersBegin, const uint64_t *CountersEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite) {
+ int DebugInfoCorrelate =
+ (__llvm_profile_get_version() & VARIANT_MASK_DBG_CORRELATE) != 0ULL;
/* Calculate size of sections. */
- const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ const uint64_t DataSize =
+ DebugInfoCorrelate ? 0 : __llvm_profile_get_data_size(DataBegin, DataEnd);
const uint64_t CountersSize = CountersEnd - CountersBegin;
- const uint64_t NamesSize = NamesEnd - NamesBegin;
+ const uint64_t NamesSize = DebugInfoCorrelate ? 0 : NamesEnd - NamesBegin;
/* Create the header. */
__llvm_profile_header Header;
- if (!DataSize)
+ if (!DataSize && (!DebugInfoCorrelate || !CountersSize))
return 0;
/* Determine how much padding is needed before/after the counters and after
@@ -289,6 +292,12 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
Header.CountersDelta = (uint32_t)Header.CountersDelta;
#endif
+ /* The data and names sections are omitted in lightweight mode. */
+ if (DebugInfoCorrelate) {
+ Header.CountersDelta = 0;
+ Header.NamesDelta = 0;
+ }
+
/* Write the profile header. */
ProfDataIOVec IOVec[] = {{&Header, sizeof(__llvm_profile_header), 1, 0}};
if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec)))
@@ -300,11 +309,13 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Write the profile data. */
ProfDataIOVec IOVecData[] = {
- {DataBegin, sizeof(__llvm_profile_data), DataSize, 0},
+ {DebugInfoCorrelate ? NULL : DataBegin, sizeof(__llvm_profile_data),
+ DataSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
{CountersBegin, sizeof(uint64_t), CountersSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
- {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize, 0},
+ {(SkipNameDataWrite || DebugInfoCorrelate) ? NULL : NamesBegin,
+ sizeof(uint8_t), NamesSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterNames, 1}};
if (Writer->Write(Writer, IOVecData, sizeof(IOVecData) / sizeof(*IOVecData)))
return -1;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index af0b0949a88e..c5a5fb7371dd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -17,6 +17,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -195,4 +196,14 @@ void PrintHintAllocatorCannotReturnNull() {
"allocator_may_return_null=1\n");
}
+static atomic_uint8_t rss_limit_exceeded;
+
+bool IsRssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+}
+
+void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+}
+
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index ec23465d9584..76b936ff5eaa 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -70,6 +70,9 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
+bool IsRssLimitExceeded();
+void SetRssLimitExceeded(bool limit_exceeded);
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index 5fae8e33b905..e9379b7bdc96 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -138,9 +138,17 @@ void LoadedModule::set(const char *module_name, uptr base_address,
set(module_name, base_address);
arch_ = arch;
internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ uuid_size_ = kModuleUUIDSize;
instrumented_ = instrumented;
}
+void LoadedModule::setUuid(const char *uuid, uptr size) {
+ if (size > kModuleUUIDSize)
+ size = kModuleUUIDSize;
+ internal_memcpy(uuid_, uuid, size);
+ uuid_size_ = size;
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
base_address_ = 0;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 6ec6bb4bd856..9ddb099a8dbc 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -326,12 +326,6 @@ void SetUserDieCallback(DieCallbackType callback);
void SetCheckUnwindCallback(void (*callback)());
-// Callback will be called if soft_rss_limit_mb is given and the limit is
-// exceeded (exceeded==true) or if rss went down below the limit
-// (exceeded==false).
-// The callback should be registered once at the tool init time.
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
-
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
HandleSignalMode GetHandleSignalMode(int signum);
@@ -673,11 +667,9 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less
// than the val.
-template <class Container,
+template <class Container, class T,
class Compare = CompareLess<typename Container::value_type>>
-uptr InternalLowerBound(const Container &v,
- const typename Container::value_type &val,
- Compare comp = {}) {
+uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
uptr first = 0;
uptr last = v.size();
while (last > first) {
@@ -778,7 +770,7 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "";
}
-const uptr kModuleUUIDSize = 16;
+const uptr kModuleUUIDSize = 32;
const uptr kMaxSegName = 16;
// Represents a binary loaded into virtual memory (e.g. this can be an
@@ -790,6 +782,7 @@ class LoadedModule {
base_address_(0),
max_executable_address_(0),
arch_(kModuleArchUnknown),
+ uuid_size_(0),
instrumented_(false) {
internal_memset(uuid_, 0, kModuleUUIDSize);
ranges_.clear();
@@ -797,6 +790,7 @@ class LoadedModule {
void set(const char *module_name, uptr base_address);
void set(const char *module_name, uptr base_address, ModuleArch arch,
u8 uuid[kModuleUUIDSize], bool instrumented);
+ void setUuid(const char *uuid, uptr size);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
const char *name = nullptr);
@@ -807,6 +801,7 @@ class LoadedModule {
uptr max_executable_address() const { return max_executable_address_; }
ModuleArch arch() const { return arch_; }
const u8 *uuid() const { return uuid_; }
+ uptr uuid_size() const { return uuid_size_; }
bool instrumented() const { return instrumented_; }
struct AddressRange {
@@ -835,6 +830,7 @@ class LoadedModule {
uptr base_address_;
uptr max_executable_address_;
ModuleArch arch_;
+ uptr uuid_size_;
u8 uuid_[kModuleUUIDSize];
bool instrumented_;
IntrusiveList<AddressRange> ranges_;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index d219734fa0a3..b0ab08dff1db 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -7857,12 +7857,12 @@ INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
unpoison_file(stream);
}
-INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
+INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
- REAL(setbuffer)(stream, buf, mode);
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size);
+ REAL(setbuffer)(stream, buf, size);
if (buf) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
}
if (stream)
unpoison_file(stream);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
index 38f9531148d4..a5259be9335a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -11,3 +11,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index bc4b477e350f..c4cc0e45193e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -10,25 +10,21 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator.h"
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
-
+#include "sanitizer_stackdepot.h"
namespace __sanitizer {
-static void (*SoftRssLimitExceededCallback)(bool exceeded);
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
- CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
- SoftRssLimitExceededCallback = Callback;
-}
-
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
void *BackgroundThread(void *arg) {
+ VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
const bool heap_profile = common_flags()->heap_profile;
@@ -66,13 +62,11 @@ void *BackgroundThread(void *arg) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(true);
+ SetRssLimitExceeded(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(false);
+ SetRssLimitExceeded(false);
}
}
if (heap_profile &&
@@ -83,6 +77,38 @@ void *BackgroundThread(void *arg) {
}
}
}
+
+void MaybeStartBackgroudThread() {
+ // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
+ if (!&real_pthread_create) {
+ VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
+ return; // Can't spawn the thread anyway.
+ }
+
+ static bool started = false;
+ if (!started) {
+ started = true;
+ internal_start_thread(BackgroundThread, nullptr);
+ }
+}
+
+# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+# pragma clang diagnostic push
+// We avoid global-constructors to be sure that globals are ready when
+// sanitizers need them. This can happend before global constructors executed.
+// Here we don't mind if thread is started on later stages.
+# pragma clang diagnostic ignored "-Wglobal-constructors"
+static struct BackgroudThreadStarted {
+ BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
+} background_thread_strarter UNUSED;
+# pragma clang diagnostic pop
+# endif
+#else
+void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
@@ -105,18 +131,6 @@ void WriteToSyslog(const char *msg) {
WriteOneLineToSyslog(p);
}
-void MaybeStartBackgroudThread() {
-#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
- !SANITIZER_GO // Need to implement/test on other platforms.
- // Start the background thread if one of the rss limits is given.
- if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb &&
- !common_flags()->heap_profile) return;
- if (!&real_pthread_create) return; // Can't spawn the thread anyway.
- internal_start_thread(BackgroundThread, nullptr);
-#endif
-}
-
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
@@ -185,10 +199,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
#endif // !SANITIZER_FUCHSIA
+#if !SANITIZER_WINDOWS && !SANITIZER_GO
+// Weak default implementation for when sanitizer_stackdepot is not linked in.
+SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
+static void StopStackDepotBackgroundThread() {
+ StackDepotStopBackgroundThread();
+}
+#else
+// SANITIZER_WEAK_ATTRIBUTE is unsupported.
+static void StopStackDepotBackgroundThread() {}
+#endif
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
__sanitizer_sandbox_arguments *args) {
+ __sanitizer::StopStackDepotBackgroundThread();
__sanitizer::PlatformPrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index 95da82b1a1da..0ca91aff8dd4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -179,6 +179,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true,
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
@@ -191,6 +192,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
+COMMON_FLAG(int, compress_stack_depot, 0,
+ "Compress stack depot to save memory.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")
COMMON_FLAG(bool, strict_string_checks, false,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 9b5f6f1da1a1..66a0fd64a05a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -275,11 +275,11 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
}
bool MprotectNoAccess(uptr addr, uptr size) {
- return _zx_vmar_protect(_zx_vmar_root_self(), 0, Addr, Size) == ZX_OK;
+ return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
}
bool MprotectReadOnly(uptr addr, uptr size) {
- return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, Addr, Size) ==
+ return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
ZX_OK;
}
@@ -484,6 +484,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
+void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) {}
+
void InitializePlatformCommonFlags(CommonFlags *cf) {}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 2d787332a445..a92ea01ccccc 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -163,6 +163,12 @@ ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
// See test/sanitizer_common/TestCases/Linux/setuid.c.
internal_sigdelset(&set, 33);
# endif
+# if SANITIZER_LINUX
+ // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
+ // If this signal is blocked, such calls cannot be handled and the process may
+ // hang.
+ internal_sigdelset(&set, 31);
+# endif
SetSigProcMask(&set, &saved_);
if (copy)
internal_memcpy(copy, &saved_, sizeof(saved_));
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index 6a235db0ee2e..ebd60e0b10f2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -55,6 +55,9 @@ struct ScopedBlockSignals {
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
~ScopedBlockSignals();
+ ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
+ ScopedBlockSignals(const ScopedBlockSignals &) = delete;
+
private:
__sanitizer_sigset_t saved_;
};
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 7ce9e25da342..3c15c35cf488 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -603,6 +603,32 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
+ } else if (phdr->p_type == PT_NOTE) {
+ uptr off = 0;
+ while (off < phdr->p_memsz - sizeof(ElfW(Nhdr))) {
+ auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
+ phdr->p_vaddr + off);
+ constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
+ static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
+ if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
+ if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
+ phdr->p_memsz) {
+ // Something is very wrong, bail out instead of reading potentially
+ // arbitrary memory.
+ break;
+ }
+ const char *name =
+ reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
+ if (internal_memcmp(name, "GNU", 3) == 0) {
+ const char *value = reinterpret_cast<const char *>(nhdr) +
+ sizeof(*nhdr) + kGnuNamesz;
+ cur_module.setUuid(value, nhdr->n_descsz);
+ break;
+ }
+ }
+ off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
+ RoundUpTo(nhdr->n_descsz, 4);
+ }
}
}
modules->push_back(cur_module);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h b/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h
new file mode 100644
index 000000000000..42acfbdcea09
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h
@@ -0,0 +1,159 @@
+//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Lempel–Ziv–Welch encoding/decoding
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LZW_H
+#define SANITIZER_LZW_H
+
+#include "sanitizer_dense_map.h"
+
+namespace __sanitizer {
+
+using LzwCodeType = u32;
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
+ using Substring =
+ detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
+
+ // Sentinel value for substrings of len 1.
+ static constexpr LzwCodeType kNoPrefix =
+ Min(DenseMapInfo<Substring>::getEmptyKey().first,
+ DenseMapInfo<Substring>::getTombstoneKey().first) -
+ 1;
+ DenseMap<Substring, LzwCodeType> prefix_to_code;
+ {
+ // Add all substring of len 1 as initial dictionary.
+ InternalMmapVector<T> dict_len1;
+ for (auto it = begin; it != end; ++it)
+ if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
+ dict_len1.push_back(*it);
+
+ // Slightly helps with later delta encoding.
+ Sort(dict_len1.data(), dict_len1.size());
+
+ // For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
+ // just generate them.
+ *out = dict_len1.size();
+ ++out;
+
+ for (uptr i = 0; i != dict_len1.size(); ++i) {
+ // Remap after the Sort.
+ prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
+ *out = dict_len1[i];
+ ++out;
+ }
+ CHECK_EQ(prefix_to_code.size(), dict_len1.size());
+ }
+
+ if (begin == end)
+ return out;
+
+ // Main LZW encoding loop.
+ LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
+ ++begin;
+ for (auto it = begin; it != end; ++it) {
+ // Extend match with the new item.
+ auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
+ if (ins.second) {
+ // This is a new substring, but emit the code for the current match
+ // (before extend). This allows LZW decoder to recover the dictionary.
+ *out = match;
+ ++out;
+ // Reset the match to a single item, which must be already in the map.
+ match = prefix_to_code.find({kNoPrefix, *it})->second;
+ } else {
+ // Already known, use as the current match.
+ match = ins.first->second;
+ }
+ }
+
+ *out = match;
+ ++out;
+
+ return out;
+}
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
+ if (begin == end)
+ return out;
+
+ // Load dictionary of len 1 substrings. Theses correspont to lowest codes.
+ InternalMmapVector<T> dict_len1(*begin);
+ ++begin;
+
+ if (begin == end)
+ return out;
+
+ for (auto& v : dict_len1) {
+ v = *begin;
+ ++begin;
+ }
+
+ // Substrings of len 2 and up. Indexes are shifted because [0,
+ // dict_len1.size()) stored in dict_len1. Substings get here after being
+ // emitted to the output, so we can use output position.
+ InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
+ code_to_substr;
+
+ // Copies already emitted substrings into the output again.
+ auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
+ if (code < dict_len1.size()) {
+ *out = dict_len1[code];
+ ++out;
+ return out;
+ }
+ const auto& s = code_to_substr[code - dict_len1.size()];
+
+ for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
+ return out;
+ };
+
+ // Returns lens of the substring with the given code.
+ auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
+ if (code < dict_len1.size())
+ return 1;
+ const auto& s = code_to_substr[code - dict_len1.size()];
+ return s.second - s.first;
+ };
+
+ // Main LZW decoding loop.
+ LzwCodeType prev_code = *begin;
+ ++begin;
+ out = copy(prev_code, out);
+ for (auto it = begin; it != end; ++it) {
+ LzwCodeType code = *it;
+ auto start = out;
+ if (code == dict_len1.size() + code_to_substr.size()) {
+ // Special LZW case. The code is not in the dictionary yet. This is
+ // possible only when the new substring is the same as previous one plus
+ // the first item of the previous substring. We can emit that in two
+ // steps.
+ out = copy(prev_code, out);
+ *out = *start;
+ ++out;
+ } else {
+ out = copy(code, out);
+ }
+
+ // Every time encoded emits the code, it also creates substing of len + 1
+ // including the first item of the just emmited substring. Do the same here.
+ uptr len = code_to_len(prev_code);
+ code_to_substr.push_back({start - len, start + 1});
+
+ prev_code = code;
+ }
+ return out;
+}
+
+} // namespace __sanitizer
+#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index f9b5c531aeee..a2fc310ad1a2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -888,7 +888,7 @@ bool SignalContext::IsTrueFaultingAddress() const {
(uptr)ptrauth_strip( \
(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
#else
- #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
+ #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
#endif
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
@@ -1223,7 +1223,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
@@ -1232,20 +1232,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
+ (void *)largest_gap_found, (void *)max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ (void *)space_size, (void *)largest_gap_found,
+ (void *)max_occupied_addr, (void *)new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
space_size = (high_mem_end >> shadow_scale) + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
@@ -1325,7 +1326,7 @@ void SignalContext::DumpAllRegisters(void *context) {
# define DUMPREG64(r) \
Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREGA64(r) \
- Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
+ Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
# define DUMPREG32(r) \
Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREG_(r) Printf(" "); DUMPREG(r);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index 3153de34e5a3..8de765cf6669 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -22,103 +22,103 @@
// function declarations into a .S file which doesn't compile.
// https://crbug.com/1162741
#if __has_include(<features.h>) && !defined(__ANDROID__)
-#include <features.h>
+# include <features.h>
#endif
#if defined(__linux__)
-# define SANITIZER_LINUX 1
+# define SANITIZER_LINUX 1
#else
-# define SANITIZER_LINUX 0
+# define SANITIZER_LINUX 0
#endif
#if defined(__GLIBC__)
-# define SANITIZER_GLIBC 1
+# define SANITIZER_GLIBC 1
#else
-# define SANITIZER_GLIBC 0
+# define SANITIZER_GLIBC 0
#endif
#if defined(__FreeBSD__)
-# define SANITIZER_FREEBSD 1
+# define SANITIZER_FREEBSD 1
#else
-# define SANITIZER_FREEBSD 0
+# define SANITIZER_FREEBSD 0
#endif
#if defined(__NetBSD__)
-# define SANITIZER_NETBSD 1
+# define SANITIZER_NETBSD 1
#else
-# define SANITIZER_NETBSD 0
+# define SANITIZER_NETBSD 0
#endif
#if defined(__sun__) && defined(__svr4__)
-# define SANITIZER_SOLARIS 1
+# define SANITIZER_SOLARIS 1
#else
-# define SANITIZER_SOLARIS 0
+# define SANITIZER_SOLARIS 0
#endif
#if defined(__APPLE__)
-# define SANITIZER_MAC 1
-# include <TargetConditionals.h>
-# if TARGET_OS_OSX
-# define SANITIZER_OSX 1
-# else
-# define SANITIZER_OSX 0
-# endif
-# if TARGET_OS_IPHONE
-# define SANITIZER_IOS 1
-# else
-# define SANITIZER_IOS 0
-# endif
-# if TARGET_OS_SIMULATOR
-# define SANITIZER_IOSSIM 1
-# else
-# define SANITIZER_IOSSIM 0
-# endif
+# define SANITIZER_MAC 1
+# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define SANITIZER_OSX 1
+# else
+# define SANITIZER_OSX 0
+# endif
+# if TARGET_OS_IPHONE
+# define SANITIZER_IOS 1
+# else
+# define SANITIZER_IOS 0
+# endif
+# if TARGET_OS_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
#else
-# define SANITIZER_MAC 0
-# define SANITIZER_IOS 0
-# define SANITIZER_IOSSIM 0
-# define SANITIZER_OSX 0
+# define SANITIZER_MAC 0
+# define SANITIZER_IOS 0
+# define SANITIZER_IOSSIM 0
+# define SANITIZER_OSX 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
-# define SANITIZER_WATCHOS 1
+# define SANITIZER_WATCHOS 1
#else
-# define SANITIZER_WATCHOS 0
+# define SANITIZER_WATCHOS 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
-# define SANITIZER_TVOS 1
+# define SANITIZER_TVOS 1
#else
-# define SANITIZER_TVOS 0
+# define SANITIZER_TVOS 0
#endif
#if defined(_WIN32)
-# define SANITIZER_WINDOWS 1
+# define SANITIZER_WINDOWS 1
#else
-# define SANITIZER_WINDOWS 0
+# define SANITIZER_WINDOWS 0
#endif
#if defined(_WIN64)
-# define SANITIZER_WINDOWS64 1
+# define SANITIZER_WINDOWS64 1
#else
-# define SANITIZER_WINDOWS64 0
+# define SANITIZER_WINDOWS64 0
#endif
#if defined(__ANDROID__)
-# define SANITIZER_ANDROID 1
+# define SANITIZER_ANDROID 1
#else
-# define SANITIZER_ANDROID 0
+# define SANITIZER_ANDROID 0
#endif
#if defined(__Fuchsia__)
-# define SANITIZER_FUCHSIA 1
+# define SANITIZER_FUCHSIA 1
#else
-# define SANITIZER_FUCHSIA 0
+# define SANITIZER_FUCHSIA 0
#endif
-#define SANITIZER_POSIX \
+#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
- SANITIZER_NETBSD || SANITIZER_SOLARIS)
+ SANITIZER_NETBSD || SANITIZER_SOLARIS)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@@ -127,58 +127,64 @@
#endif
#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
+# define FIRST_32_SECOND_64(a, b) (b)
#else
-# define FIRST_32_SECOND_64(a, b) (a)
+# define FIRST_32_SECOND_64(a, b) (a)
#endif
#if defined(__x86_64__) && !defined(_LP64)
-# define SANITIZER_X32 1
+# define SANITIZER_X32 1
#else
-# define SANITIZER_X32 0
+# define SANITIZER_X32 0
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+# define SANITIZER_X64 1
+#else
+# define SANITIZER_X64 0
#endif
#if defined(__i386__) || defined(_M_IX86)
-# define SANITIZER_I386 1
+# define SANITIZER_I386 1
#else
-# define SANITIZER_I386 0
+# define SANITIZER_I386 0
#endif
#if defined(__mips__)
-# define SANITIZER_MIPS 1
-# if defined(__mips64)
+# define SANITIZER_MIPS 1
+# if defined(__mips64)
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
+#else
+# define SANITIZER_MIPS 0
# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 1
-# else
-# define SANITIZER_MIPS32 1
# define SANITIZER_MIPS64 0
-# endif
-#else
-# define SANITIZER_MIPS 0
-# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 0
#endif
#if defined(__s390__)
-# define SANITIZER_S390 1
-# if defined(__s390x__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 1
-# else
-# define SANITIZER_S390_31 1
# define SANITIZER_S390_64 0
-# endif
-#else
-# define SANITIZER_S390 0
-# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 0
#endif
#if defined(__powerpc__)
-# define SANITIZER_PPC 1
-# if defined(__powerpc64__)
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 1
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
// big-endian, and uses v1 ABI (known for its function descriptors),
// while the new powerpc64le target is little-endian and uses v2.
@@ -186,43 +192,49 @@
// (eg. big-endian v2), but you won't find such combinations in the wild
// (it'd require bootstrapping a whole system, which would be quite painful
// - there's no target triple for that). LLVM doesn't support them either.
-# if _CALL_ELF == 2
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 1
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
# else
-# define SANITIZER_PPC64V1 1
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
# endif
-# else
-# define SANITIZER_PPC32 1
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
-# endif
+#endif
+
+#if defined(__arm__) || defined(_M_ARM)
+# define SANITIZER_ARM 1
#else
-# define SANITIZER_PPC 0
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 0
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_ARM 0
#endif
-#if defined(__arm__)
-# define SANITIZER_ARM 1
+#if defined(__aarch64__) || defined(_M_ARM64)
+# define SANITIZER_ARM64 1
#else
-# define SANITIZER_ARM 0
+# define SANITIZER_ARM64 0
#endif
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
-# define SANITIZER_SOLARIS32 1
+# define SANITIZER_SOLARIS32 1
#else
-# define SANITIZER_SOLARIS32 0
+# define SANITIZER_SOLARIS32 0
#endif
#if defined(__riscv) && (__riscv_xlen == 64)
-#define SANITIZER_RISCV64 1
+# define SANITIZER_RISCV64 1
#else
-#define SANITIZER_RISCV64 0
+# define SANITIZER_RISCV64 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
@@ -231,50 +243,52 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
-# define SANITIZER_CAN_USE_ALLOCATOR64 1
-# elif defined(__mips64) || defined(__aarch64__)
-# define SANITIZER_CAN_USE_ALLOCATOR64 0
-# else
-# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
-# endif
+# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
#endif
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
-#if SANITIZER_GO && defined(__mips64)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
-#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
-#endif
+# if SANITIZER_GO && defined(__mips64)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+# endif
#elif SANITIZER_RISCV64
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
-# if SANITIZER_MAC
-# if SANITIZER_OSX || SANITIZER_IOSSIM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# if SANITIZER_MAC
+# if SANITIZER_OSX || SANITIZER_IOSSIM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# endif
# else
- // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
-# else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
-# endif
#elif defined(__sparc__)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
// Whether the addresses are sign-extended from the VMA range to the word.
// The SPARC64 Linux port implements this to split the VMA space into two
// non-contiguous halves with a huge hole in the middle.
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
#else
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
// The AArch64 and RISC-V linux ports use the canonical syscall set as
@@ -297,15 +311,15 @@
// Since we don't want to include libc headers here, we check the
// target only.
#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
-#define SANITIZER_USES_UID16_SYSCALLS 1
+# define SANITIZER_USES_UID16_SYSCALLS 1
#else
-#define SANITIZER_USES_UID16_SYSCALLS 0
+# define SANITIZER_USES_UID16_SYSCALLS 0
#endif
#if defined(__mips__)
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
/// \macro MSC_PREREQ
@@ -314,15 +328,15 @@
/// * 1800: Microsoft Visual Studio 2013 / 12.0
/// * 1900: Microsoft Visual Studio 2015 / 14.0
#ifdef _MSC_VER
-# define MSC_PREREQ(version) (_MSC_VER >= (version))
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
#else
-# define MSC_PREREQ(version) 0
+# define MSC_PREREQ(version) 0
#endif
#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
-# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
#else
-# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
#endif
// On linux, some architectures had an ABI transition from 64-bit long double
@@ -330,11 +344,11 @@
// involving long doubles come in two versions, and we need to pass the
// correct one to dlvsym when intercepting them.
#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
-#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+# define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
#if SANITIZER_GO == 0
-# define SANITIZER_GO 0
+# define SANITIZER_GO 0
#endif
// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
@@ -342,40 +356,39 @@
// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
// that this allocation happens in dynamic linker and should be ignored.
#if SANITIZER_PPC || defined(__thumb__)
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
#else
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
#endif
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
-# define SANITIZER_MADVISE_DONTNEED MADV_FREE
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
-# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
+# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
#endif
// Older gcc have issues aligning to a constexpr, and require an integer.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
#if defined(__powerpc__) || defined(__powerpc64__)
-# define SANITIZER_CACHE_LINE_SIZE 128
+# define SANITIZER_CACHE_LINE_SIZE 128
#else
-# define SANITIZER_CACHE_LINE_SIZE 64
+# define SANITIZER_CACHE_LINE_SIZE 64
#endif
// Enable offline markup symbolizer for Fuchsia.
#if SANITIZER_FUCHSIA
# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
-#define SANITIZER_SYMBOLIZER_MARKUP 0
+# define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
// Enable ability to support sanitizer initialization that is
// compatible with the sanitizer library being loaded via
// `dlopen()`.
#if SANITIZER_MAC
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
#else
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
#endif
// SANITIZER_SUPPORTS_THREADLOCAL
@@ -392,4 +405,15 @@
# endif
#endif
-#endif // SANITIZER_PLATFORM_H
+#if defined(__thumb__) && defined(__linux__)
+// Workaround for
+// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage
+// or
+// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage
+// It fails *rss_limit_mb_test* without meaningful errors.
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1
+#else
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
+#endif
+
+#endif // SANITIZER_PLATFORM_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index b1c15d8c2834..4791a3a35bdb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -10,6 +10,10 @@
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_leb128.h"
+#include "sanitizer_lzw.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
@@ -52,7 +56,7 @@ StackTrace StackStore::Load(Id id) {
uptr idx = IdToOffset(id);
uptr block_idx = GetBlockIdx(idx);
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
- const uptr *stack_trace = blocks_[block_idx].GetOrUnpack();
+ const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
if (!stack_trace)
return {};
stack_trace += GetInBlockIdx(idx);
@@ -61,11 +65,7 @@ StackTrace StackStore::Load(Id id) {
}
uptr StackStore::Allocated() const {
- uptr next_block = GetBlockIdx(
- RoundUpTo(atomic_load_relaxed(&total_frames_), kBlockSizeFrames));
- uptr res = 0;
- for (uptr i = 0; i < next_block; ++i) res += blocks_[i].Allocated();
- return res + sizeof(*this);
+ return atomic_load_relaxed(&allocated_) + sizeof(*this);
}
uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
@@ -79,7 +79,7 @@ uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
// Fits into the a single block.
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
*idx = start;
- return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start);
+ return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
}
// Retry. We can't use range allocated in two different blocks.
@@ -92,43 +92,157 @@ uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
}
}
+void *StackStore::Map(uptr size, const char *mem_type) {
+ atomic_fetch_add(&allocated_, size, memory_order_relaxed);
+ return MmapNoReserveOrDie(size, mem_type);
+}
+
+void StackStore::Unmap(void *addr, uptr size) {
+ atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
+ UnmapOrDie(addr, size);
+}
+
uptr StackStore::Pack(Compression type) {
uptr res = 0;
- for (BlockInfo &b : blocks_) res += b.Pack(type);
+ for (BlockInfo &b : blocks_) res += b.Pack(type, this);
return res;
}
+void StackStore::LockAll() {
+ for (BlockInfo &b : blocks_) b.Lock();
+}
+
+void StackStore::UnlockAll() {
+ for (BlockInfo &b : blocks_) b.Unlock();
+}
+
void StackStore::TestOnlyUnmap() {
- for (BlockInfo &b : blocks_) b.TestOnlyUnmap();
+ for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
internal_memset(this, 0, sizeof(*this));
}
uptr *StackStore::BlockInfo::Get() const {
// Idiomatic double-checked locking uses memory_order_acquire here. But
- // relaxed is find for us, justification is similar to
+ // relaxed is fine for us, justification is similar to
// TwoLevelMap::GetOrCreate.
return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
}
-uptr *StackStore::BlockInfo::Create() {
+uptr *StackStore::BlockInfo::Create(StackStore *store) {
SpinMutexLock l(&mtx_);
uptr *ptr = Get();
if (!ptr) {
- ptr = reinterpret_cast<uptr *>(
- MmapNoReserveOrDie(kBlockSizeBytes, "StackStore"));
+ ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
}
return ptr;
}
-uptr *StackStore::BlockInfo::GetOrCreate() {
+uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
uptr *ptr = Get();
if (LIKELY(ptr))
return ptr;
- return Create();
+ return Create(store);
+}
+
+class SLeb128Encoder {
+ public:
+ SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Encoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Encoder &other) const {
+ return begin != other.begin;
+ }
+
+ SLeb128Encoder &operator=(uptr v) {
+ sptr diff = v - previous;
+ begin = EncodeSLEB128(diff, begin, end);
+ previous = v;
+ return *this;
+ }
+ SLeb128Encoder &operator*() { return *this; }
+ SLeb128Encoder &operator++() { return *this; }
+
+ u8 *base() const { return begin; }
+
+ private:
+ u8 *begin;
+ u8 *end;
+ uptr previous = 0;
+};
+
+class SLeb128Decoder {
+ public:
+ SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Decoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Decoder &other) const {
+ return begin != other.begin;
+ }
+
+ uptr operator*() {
+ sptr diff;
+ begin = DecodeSLEB128(begin, end, &diff);
+ previous += diff;
+ return previous;
+ }
+ SLeb128Decoder &operator++() { return *this; }
+
+ SLeb128Decoder operator++(int) { return *this; }
+
+ private:
+ const u8 *begin;
+ const u8 *end;
+ uptr previous = 0;
+};
+
+static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ for (; from != from_end; ++from, ++encoder) *encoder = *from;
+ return encoder.base();
}
-uptr *StackStore::BlockInfo::GetOrUnpack() {
+static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ for (; decoder != end; ++to, ++decoder) *to = *decoder;
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ encoder = LzwEncode<uptr>(from, from_end, encoder);
+ return encoder.base();
+}
+
+static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ to = LzwDecode<uptr>(decoder, end, to);
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+namespace {
+struct PackedHeader {
+ uptr size;
+ StackStore::Compression type;
+ u8 data[];
+};
+} // namespace
+
+uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
SpinMutexLock l(&mtx_);
switch (state) {
case State::Storing:
@@ -140,15 +254,43 @@ uptr *StackStore::BlockInfo::GetOrUnpack() {
break;
}
- uptr *ptr = Get();
+ u8 *ptr = reinterpret_cast<u8 *>(Get());
CHECK_NE(nullptr, ptr);
- // Fake unpacking.
- for (uptr i = 0; i < kBlockSizeFrames; ++i) ptr[i] = ~ptr[i];
+ const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
+ CHECK_LE(header->size, kBlockSizeBytes);
+ CHECK_GE(header->size, sizeof(PackedHeader));
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+
+ uptr *unpacked =
+ reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
+
+ uptr *unpacked_end;
+ switch (header->type) {
+ case Compression::Delta:
+ unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ case Compression::LZW:
+ unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
+ }
+
+ CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
+
+ MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
+ atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
+ store->Unmap(ptr, packed_size_aligned);
+
state = State::Unpacked;
return Get();
}
-uptr StackStore::BlockInfo::Pack(Compression type) {
+uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
if (type == Compression::None)
return 0;
@@ -165,26 +307,55 @@ uptr StackStore::BlockInfo::Pack(Compression type) {
if (!ptr || !Stored(0))
return 0;
- // Fake packing.
- for (uptr i = 0; i < kBlockSizeFrames; ++i) ptr[i] = ~ptr[i];
- state = State::Packed;
- return kBlockSizeBytes - kBlockSizeBytes / 10;
-}
+ u8 *packed =
+ reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
+ PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
+ u8 *alloc_end = packed + kBlockSizeBytes;
-uptr StackStore::BlockInfo::Allocated() const {
- SpinMutexLock l(&mtx_);
- switch (state) {
- case State::Packed:
- return kBlockSizeBytes / 10;
- case State::Unpacked:
- case State::Storing:
- return kBlockSizeBytes;
+ u8 *packed_end = nullptr;
+ switch (type) {
+ case Compression::Delta:
+ packed_end =
+ CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ case Compression::LZW:
+ packed_end =
+ CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
}
+
+ header->type = type;
+ header->size = packed_end - packed;
+
+ VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
+ header->size >> 10);
+
+ if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
+ VPrintf(1, "Undo and keep block unpacked\n");
+ MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
+ store->Unmap(packed, kBlockSizeBytes);
+ state = State::Unpacked;
+ return 0;
+ }
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+ store->Unmap(packed + packed_size_aligned,
+ kBlockSizeBytes - packed_size_aligned);
+ MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
+
+ atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
+ store->Unmap(ptr, kBlockSizeBytes);
+
+ state = State::Packed;
+ return kBlockSizeBytes - packed_size_aligned;
}
-void StackStore::BlockInfo::TestOnlyUnmap() {
+void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
if (uptr *ptr = Get())
- UnmapOrDie(ptr, StackStore::kBlockSizeBytes);
+ store->Unmap(ptr, kBlockSizeBytes);
}
bool StackStore::BlockInfo::Stored(uptr n) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index e0bc4e9c4a45..1bfad811f712 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -25,7 +25,8 @@ class StackStore {
public:
enum class Compression : u8 {
None = 0,
- Test,
+ Delta,
+ LZW,
};
constexpr StackStore() = default;
@@ -45,6 +46,9 @@ class StackStore {
// Returns the number of released bytes.
uptr Pack(Compression type);
+ void LockAll();
+ void UnlockAll();
+
void TestOnlyUnmap();
private:
@@ -71,9 +75,15 @@ class StackStore {
uptr *Alloc(uptr count, uptr *idx, uptr *pack);
+ void *Map(uptr size, const char *mem_type);
+ void Unmap(void *addr, uptr size);
+
// Total number of allocated frames.
atomic_uintptr_t total_frames_ = {};
+ // Tracks total allocated memory in bytes.
+ atomic_uintptr_t allocated_ = {};
+
// Each block will hold pointer to exactly kBlockSizeFrames.
class BlockInfo {
atomic_uintptr_t data_;
@@ -89,17 +99,18 @@ class StackStore {
};
State state GUARDED_BY(mtx_);
- uptr *Create();
+ uptr *Create(StackStore *store);
public:
uptr *Get() const;
- uptr *GetOrCreate();
- uptr *GetOrUnpack();
- uptr Pack(Compression type);
- uptr Allocated() const;
- void TestOnlyUnmap();
+ uptr *GetOrCreate(StackStore *store);
+ uptr *GetOrUnpack(StackStore *store);
+ uptr Pack(Compression type, StackStore *store);
+ void TestOnlyUnmap(StackStore *store);
bool Stored(uptr n);
bool IsPacked() const;
+ void Lock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
+ void Unlock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
};
BlockInfo blocks_[kBlockCount] = {};
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 527221b0c85c..c755b1829d2a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -12,8 +12,10 @@
#include "sanitizer_stackdepot.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_hash.h"
+#include "sanitizer_mutex.h"
#include "sanitizer_stack_store.h"
#include "sanitizer_stackdepotbase.h"
@@ -72,12 +74,125 @@ uptr StackDepotNode::allocated() {
return stackStore.Allocated() + useCounts.MemoryUsage();
}
+static void CompressStackStore() {
+ u64 start = MonotonicNanoTime();
+ uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
+ Abs(common_flags()->compress_stack_depot)));
+ if (!diff)
+ return;
+ u64 finish = MonotonicNanoTime();
+ uptr total_before = theDepot.GetStats().allocated + diff;
+ VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
+ SanitizerToolName, diff >> 10, total_before >> 10,
+ (finish - start) / 1000000);
+}
+
+namespace {
+
+class CompressThread {
+ public:
+ constexpr CompressThread() = default;
+ void NewWorkNotify();
+ void Stop();
+ void LockAndStop() NO_THREAD_SAFETY_ANALYSIS;
+ void Unlock() NO_THREAD_SAFETY_ANALYSIS;
+
+ private:
+ enum class State {
+ NotStarted = 0,
+ Started,
+ Failed,
+ Stopped,
+ };
+
+ void Run();
+
+ bool WaitForWork() {
+ semaphore_.Wait();
+ return atomic_load(&run_, memory_order_acquire);
+ }
+
+ Semaphore semaphore_ = {};
+ StaticSpinMutex mutex_ = {};
+ State state_ GUARDED_BY(mutex_) = State::NotStarted;
+ void *thread_ GUARDED_BY(mutex_) = nullptr;
+ atomic_uint8_t run_ = {};
+};
+
+static CompressThread compress_thread;
+
+void CompressThread::NewWorkNotify() {
+ int compress = common_flags()->compress_stack_depot;
+ if (!compress)
+ return;
+ if (compress > 0 /* for testing or debugging */) {
+ SpinMutexLock l(&mutex_);
+ if (state_ == State::NotStarted) {
+ atomic_store(&run_, 1, memory_order_release);
+ CHECK_EQ(nullptr, thread_);
+ thread_ = internal_start_thread(
+ [](void *arg) -> void * {
+ reinterpret_cast<CompressThread *>(arg)->Run();
+ return nullptr;
+ },
+ this);
+ state_ = thread_ ? State::Started : State::Failed;
+ }
+ if (state_ == State::Started) {
+ semaphore_.Post();
+ return;
+ }
+ }
+ CompressStackStore();
+}
+
+void CompressThread::Run() {
+ VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
+ while (WaitForWork()) CompressStackStore();
+ VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
+}
+
+void CompressThread::Stop() {
+ void *t = nullptr;
+ {
+ SpinMutexLock l(&mutex_);
+ if (state_ != State::Started)
+ return;
+ state_ = State::Stopped;
+ CHECK_NE(nullptr, thread_);
+ t = thread_;
+ thread_ = nullptr;
+ }
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(t);
+}
+
+void CompressThread::LockAndStop() {
+ mutex_.Lock();
+ if (state_ != State::Started)
+ return;
+ CHECK_NE(nullptr, thread_);
+
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(thread_);
+ // Allow to restart after Unlock() if needed.
+ state_ = State::NotStarted;
+ thread_ = nullptr;
+}
+
+void CompressThread::Unlock() { mutex_.Unlock(); }
+
+} // namespace
+
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
stack_hash = hash;
uptr pack = 0;
store_id = stackStore.Store(args, &pack);
- if (pack)
- stackStore.Pack(StackStore::Compression::None);
+ if (LIKELY(!pack))
+ return;
+ compress_thread.NewWorkNotify();
}
StackDepotNode::args_type StackDepotNode::load(u32 id) const {
@@ -100,9 +215,13 @@ StackTrace StackDepotGet(u32 id) {
void StackDepotLockAll() {
theDepot.LockAll();
+ compress_thread.LockAndStop();
+ stackStore.LockAll();
}
void StackDepotUnlockAll() {
+ stackStore.UnlockAll();
+ compress_thread.Unlock();
theDepot.UnlockAll();
}
@@ -112,6 +231,8 @@ void StackDepotPrintAll() {
#endif
}
+void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
+
StackDepotHandle StackDepotNode::get_handle(u32 id) {
return StackDepotHandle(&theDepot.nodes[id], id);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
index 56d655d9404c..cca6fd534688 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -42,6 +42,7 @@ StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();
+void StackDepotStopBackgroundThread();
void StackDepotTestOnlyUnmap();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
index c6356dae23c1..2d0eccc1602a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -104,6 +104,19 @@ static const char *DemangleFunctionName(const char *function) {
return function;
}
+static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,
+ InternalScopedString *buffer) {
+ if (info.uuid_size) {
+ if (PrefixSpace)
+ buffer->append(" ");
+ buffer->append("(BuildId: ");
+ for (uptr i = 0; i < info.uuid_size; ++i) {
+ buffer->append("%02x", info.uuid[i]);
+ }
+ buffer->append(")");
+ }
+}
+
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
@@ -140,6 +153,9 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
case 'o':
buffer->append("0x%zx", info->module_offset);
break;
+ case 'b':
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
+ break;
case 'f':
buffer->append("%s", DemangleFunctionName(StripFunctionName(
info->function, strip_func_prefix)));
@@ -181,6 +197,8 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
} else if (info->module) {
RenderModuleLocation(buffer, info->module, info->module_offset,
info->module_arch, strip_path_prefix);
+
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
} else {
buffer->append("(<unknown module>)");
}
@@ -193,6 +211,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info->module),
info->module_offset, info->module_arch, "");
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
} else {
buffer->append("(%p)", (void *)address);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
new file mode 100644
index 000000000000..e12b9e5bee06
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
@@ -0,0 +1,175 @@
+//===-- sanitizer_stoptheworld_win.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_WINDOWS
+
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+// windows.h needs to be included before tlhelp32.h
+# include <tlhelp32.h>
+
+# include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+
+namespace {
+
+struct SuspendedThreadsListWindows final : public SuspendedThreadsList {
+ InternalMmapVector<HANDLE> threadHandles;
+ InternalMmapVector<DWORD> threadIds;
+
+ SuspendedThreadsListWindows() {
+ threadIds.reserve(1024);
+ threadHandles.reserve(1024);
+ }
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
+
+ tid_t GetThreadID(uptr index) const override;
+ uptr ThreadCount() const override;
+};
+
+// Stack Pointer register names on different architectures
+# if SANITIZER_X64
+# define SP_REG Rsp
+# elif SANITIZER_I386
+# define SP_REG Esp
+# elif SANITIZER_ARM | SANITIZER_ARM64
+# define SP_REG Sp
+# else
+# error Architecture not supported!
+# endif
+
+PtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP(
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
+ CHECK_LT(index, threadHandles.size());
+
+ buffer->resize(RoundUpTo(sizeof(CONTEXT), sizeof(uptr)) / sizeof(uptr));
+ CONTEXT *thread_context = reinterpret_cast<CONTEXT *>(buffer->data());
+ thread_context->ContextFlags = CONTEXT_ALL;
+ CHECK(GetThreadContext(threadHandles[index], thread_context));
+ *sp = thread_context->SP_REG;
+
+ return REGISTERS_AVAILABLE;
+}
+
+tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const {
+ CHECK_LT(index, threadIds.size());
+ return threadIds[index];
+}
+
+uptr SuspendedThreadsListWindows::ThreadCount() const {
+ return threadIds.size();
+}
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+DWORD WINAPI RunThread(void *argument) {
+ RunThreadArgs *run_args = (RunThreadArgs *)argument;
+
+ const DWORD this_thread = GetCurrentThreadId();
+ const DWORD this_process = GetCurrentProcessId();
+
+ SuspendedThreadsListWindows suspended_threads_list;
+ bool new_thread_found;
+
+ do {
+ // Take a snapshot of all Threads
+ const HANDLE threads = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+ CHECK(threads != INVALID_HANDLE_VALUE);
+
+ THREADENTRY32 thread_entry;
+ thread_entry.dwSize = sizeof(thread_entry);
+ new_thread_found = false;
+
+ if (!Thread32First(threads, &thread_entry))
+ break;
+
+ do {
+ if (thread_entry.th32ThreadID == this_thread ||
+ thread_entry.th32OwnerProcessID != this_process)
+ continue;
+
+ bool suspended_thread = false;
+ for (const auto thread_id : suspended_threads_list.threadIds) {
+ if (thread_id == thread_entry.th32ThreadID) {
+ suspended_thread = true;
+ break;
+ }
+ }
+
+ // Skip the Thread if it was already suspended
+ if (suspended_thread)
+ continue;
+
+ const HANDLE thread =
+ OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID);
+ CHECK(thread);
+
+ if (SuspendThread(thread) == -1) {
+ DWORD last_error = GetLastError();
+
+ VPrintf(1, "Could not suspend thread %lu (error %lu)",
+ thread_entry.th32ThreadID, last_error);
+ continue;
+ }
+
+ suspended_threads_list.threadIds.push_back(thread_entry.th32ThreadID);
+ suspended_threads_list.threadHandles.push_back(thread);
+ new_thread_found = true;
+ } while (Thread32Next(threads, &thread_entry));
+
+ CloseHandle(threads);
+
+ // Between the call to `CreateToolhelp32Snapshot` and suspending the
+ // relevant Threads, new Threads could have potentially been created. So
+ // continue to find and suspend new Threads until we don't find any.
+ } while (new_thread_found);
+
+ // Now all Threads of this Process except of this Thread should be suspended.
+ // Execute the callback function.
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ // Resume all Threads
+ for (const auto suspended_thread_handle :
+ suspended_threads_list.threadHandles) {
+ CHECK_NE(ResumeThread(suspended_thread_handle), -1);
+ CloseHandle(suspended_thread_handle);
+ }
+
+ return 0;
+}
+
+} // namespace
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ DWORD trace_thread_id;
+
+ auto trace_thread =
+ CreateThread(nullptr, 0, RunThread, &arg, 0, &trace_thread_id);
+ CHECK(trace_thread);
+
+ WaitForSingleObject(trace_thread, INFINITE);
+ CloseHandle(trace_thread);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
index 0c4b84c767aa..d3cffaa6eeff 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
@@ -11,10 +11,11 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
-#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -30,6 +31,7 @@ void AddressInfo::Clear() {
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
function_offset = kUnknown;
+ uuid_size = 0;
}
void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
@@ -37,6 +39,16 @@ void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
module = internal_strdup(mod_name);
module_offset = mod_offset;
module_arch = mod_arch;
+ uuid_size = 0;
+}
+
+void AddressInfo::FillModuleInfo(const LoadedModule &mod) {
+ module = internal_strdup(mod.full_name());
+ module_offset = address - mod.base_address();
+ module_arch = mod.arch();
+ if (mod.uuid_size())
+ internal_memcpy(uuid, mod.uuid(), mod.uuid_size());
+ uuid_size = mod.uuid_size();
}
SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
@@ -126,10 +138,4 @@ Symbolizer::SymbolizerScope::~SymbolizerScope() {
sym_->end_hook_();
}
-void Symbolizer::LateInitializeTools() {
- for (auto &tool : tools_) {
- tool.LateInitialize();
- }
-}
-
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
index 42bd157fa627..bad4761e345f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -32,6 +32,8 @@ struct AddressInfo {
char *module;
uptr module_offset;
ModuleArch module_arch;
+ u8 uuid[kModuleUUIDSize];
+ uptr uuid_size;
static const uptr kUnknown = ~(uptr)0;
char *function;
@@ -45,6 +47,8 @@ struct AddressInfo {
// Deletes all strings and resets all fields.
void Clear();
void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
+ void FillModuleInfo(const LoadedModule &mod);
+ uptr module_base() const { return address - module_offset; }
};
// Linked list of symbolized frames (each frame is described by AddressInfo).
@@ -209,9 +213,6 @@ class Symbolizer final {
private:
const Symbolizer *sym_;
};
-
- // Calls `LateInitialize()` on all items in `tools_`.
- void LateInitializeTools();
};
#ifdef SANITIZER_WINDOWS
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
index b8670941a05e..df122ed3425c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -70,11 +70,6 @@ class SymbolizerTool {
return nullptr;
}
- // Called during the LateInitialize phase of Sanitizer initialization.
- // Usually this is a safe place to call code that might need to use user
- // memory allocators.
- virtual void LateInitialize() {}
-
protected:
~SymbolizerTool() {}
};
@@ -91,7 +86,7 @@ class SymbolizerProcess {
~SymbolizerProcess() {}
/// The maximum number of arguments required to invoke a tool process.
- static const unsigned kArgVMax = 6;
+ static const unsigned kArgVMax = 16;
// Customizable by subclasses.
virtual bool StartSymbolizerSubprocess();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 3fc994fd3deb..8bbd4af0c7c2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -84,15 +84,12 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
Lock l(&mu_);
- const char *module_name = nullptr;
- uptr module_offset;
- ModuleArch arch;
SymbolizedStack *res = SymbolizedStack::New(addr);
- if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
- &arch))
+ auto *mod = FindModuleForAddress(addr);
+ if (!mod)
return res;
// Always fill data about module name and offset.
- res->info.FillModuleInfo(module_name, module_offset, arch);
+ res->info.FillModuleInfo(*mod);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizePC(addr, res)) {
@@ -277,14 +274,17 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=unknown";
#endif
- const char *const inline_flag = common_flags()->symbolize_inline_frames
- ? "--inlines"
- : "--no-inlines";
+ const char *const demangle_flag =
+ common_flags()->demangle ? "--demangle" : "--no-demangle";
+ const char *const inline_flag =
+ common_flags()->symbolize_inline_frames ? "--inlines" : "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
+ argv[i++] = demangle_flag;
argv[i++] = inline_flag;
argv[i++] = kSymbolizerArch;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
};
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index 5c25b28b5dc9..ac811c8a9136 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -20,7 +20,6 @@
#include <dlfcn.h>
#include <errno.h>
-#include <mach/mach.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
@@ -58,13 +57,6 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
return true;
}
-#define K_ATOS_ENV_VAR "__check_mach_ports_lookup"
-
-// This cannot live in `AtosSymbolizerProcess` because instances of that object
-// are allocated by the internal allocator which under ASan is poisoned with
-// kAsanInternalHeapMagic.
-static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000";
-
class AtosSymbolizerProcess final : public SymbolizerProcess {
public:
explicit AtosSymbolizerProcess(const char *path)
@@ -72,51 +64,13 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
pid_str_[0] = '\0';
}
- void LateInitialize() {
- if (SANITIZER_IOSSIM) {
- // `putenv()` may call malloc/realloc so it is only safe to do this
- // during LateInitialize() or later (i.e. we can't do this in the
- // constructor). We also can't do this in `StartSymbolizerSubprocess()`
- // because in TSan we switch allocators when we're symbolizing.
- // We use `putenv()` rather than `setenv()` so that we can later directly
- // write into the storage without LibC getting involved to change what the
- // variable is set to
- int result = putenv(kAtosMachPortEnvEntry);
- CHECK_EQ(result, 0);
- }
- }
-
private:
bool StartSymbolizerSubprocess() override {
- // Configure sandbox before starting atos process.
-
// Put the string command line argument in the object so that it outlives
// the call to GetArgV.
- internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid());
-
- if (SANITIZER_IOSSIM) {
- // `atos` in the simulator is restricted in its ability to retrieve the
- // task port for the target process (us) so we need to do extra work
- // to pass our task port to it.
- mach_port_t ports[]{mach_task_self()};
- kern_return_t ret =
- mach_ports_register(mach_task_self(), ports, /*count=*/1);
- CHECK_EQ(ret, KERN_SUCCESS);
-
- // Set environment variable that signals to `atos` that it should look
- // for our task port. We can't call `setenv()` here because it might call
- // malloc/realloc. To avoid that we instead update the
- // `mach_port_env_var_entry_` variable with our current PID.
- uptr count = internal_snprintf(kAtosMachPortEnvEntry,
- sizeof(kAtosMachPortEnvEntry),
- K_ATOS_ENV_VAR "=%s", pid_str_);
- CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_));
- // Document our assumption but without calling `getenv()` in normal
- // builds.
- DCHECK(getenv(K_ATOS_ENV_VAR));
- DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0);
- }
+ internal_snprintf(pid_str_, sizeof(pid_str_), "%d", (int)internal_getpid());
+ // Configure sandbox before starting atos process.
return SymbolizerProcess::StartSymbolizerSubprocess();
}
@@ -137,13 +91,10 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
argv[i++] = "-d";
}
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
char pid_str_[16];
- // Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`.
- static_assert(sizeof(kAtosMachPortEnvEntry) ==
- (sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)),
- "sizes should match");
};
#undef K_ATOS_ENV_VAR
@@ -249,8 +200,6 @@ bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
-void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); }
-
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
index 401d30fa5033..d5abe9d98c1f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -35,7 +35,6 @@ class AtosSymbolizer final : public SymbolizerTool {
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
bool SymbolizeData(uptr addr, DataInfo *info) override;
- void LateInitialize() override;
private:
AtosSymbolizerProcess *process_;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 9a5b4a8c54c7..1ec0c5cad7a2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -100,9 +100,7 @@ Symbolizer *Symbolizer::PlatformInit() {
return new (symbolizer_allocator_) Symbolizer({});
}
-void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
-}
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
void StartReportDeadlySignal() {}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index 4cd4b4636f0a..5f6e4cc3180e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -213,9 +213,14 @@ class Addr2LineProcess final : public SymbolizerProcess {
const char *(&argv)[kArgVMax]) const override {
int i = 0;
argv[i++] = path_to_binary;
- argv[i++] = "-iCfe";
+ if (common_flags()->demangle)
+ argv[i++] = "-C";
+ if (common_flags()->symbolize_inline_frames)
+ argv[i++] = "-i";
+ argv[i++] = "-fe";
argv[i++] = module_name_;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
@@ -312,37 +317,42 @@ class Addr2LinePool final : public SymbolizerTool {
FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
+# if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength,
- bool SymbolizeInlineFrames);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_symbolize_flush();
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
- int MaxLength);
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_symbolize_flush();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
+__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_demangle(bool Demangle);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_inline_frames(bool InlineFrames);
} // extern "C"
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
- if (__sanitizer_symbolize_code != 0 &&
- __sanitizer_symbolize_data != 0) {
- return new(*alloc) InternalSymbolizer();
- }
+ if (__sanitizer_symbolize_set_demangle)
+ CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
+ if (__sanitizer_symbolize_set_inline_frames)
+ CHECK(__sanitizer_symbolize_set_inline_frames(
+ common_flags()->symbolize_inline_frames));
+ if (__sanitizer_symbolize_code && __sanitizer_symbolize_data)
+ return new (*alloc) InternalSymbolizer();
return 0;
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
- common_flags()->symbolize_inline_frames);
- if (result) ParseSymbolizePCOutput(buffer_, stack);
+ stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ if (result)
+ ParseSymbolizePCOutput(buffer_, stack);
return result;
}
@@ -365,7 +375,7 @@ class InternalSymbolizer final : public SymbolizerTool {
if (__sanitizer_symbolize_demangle) {
for (uptr res_length = 1024;
res_length <= InternalSizeClassMap::kMaxSize;) {
- char *res_buff = static_cast<char*>(InternalAlloc(res_length));
+ char *res_buff = static_cast<char *>(InternalAlloc(res_length));
uptr req_length =
__sanitizer_symbolize_demangle(name, res_buff, res_length);
if (req_length > res_length) {
@@ -380,19 +390,19 @@ class InternalSymbolizer final : public SymbolizerTool {
}
private:
- InternalSymbolizer() { }
+ InternalSymbolizer() {}
static const int kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
};
-#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+# else // SANITIZER_SUPPORTS_WEAK_HOOKS
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
};
-#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+# endif // SANITIZER_SUPPORTS_WEAK_HOOKS
const char *Symbolizer::PlatformDemangle(const char *name) {
return DemangleSwiftAndCXX(name);
@@ -492,7 +502,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
InitializeSwiftDemangler();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index 702d901353db..c647ab107ec5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -318,7 +318,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
index 2e1cd0238812..278f6defca95 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
@@ -110,7 +110,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
max_reuse_(max_reuse),
- mtx_(),
+ mtx_(MutexThreadRegistry),
total_threads_(0),
alive_threads_(0),
max_alive_threads_(0),
@@ -365,4 +365,20 @@ void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
CHECK(live_.try_emplace(user_id, tctx->tid).second);
}
+u32 ThreadRegistry::OnFork(u32 tid) {
+ ThreadRegistryLock l(this);
+ // We only purge user_id (pthread_t) of live threads because
+ // they cause CHECK failures if new threads with matching pthread_t
+ // created after fork.
+ // Potentially we could purge more info (ThreadContextBase themselves),
+ // but it's hard to test and easy to introduce new issues by doing this.
+ for (auto *tctx : threads_) {
+ if (tctx->tid == tid || !tctx->user_id)
+ continue;
+ CHECK(live_.erase(tctx->user_id));
+ tctx->user_id = 0;
+ }
+ return alive_threads_;
+}
+
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index a259b324220f..9975d78ec0bb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -104,6 +104,8 @@ class MUTEX ThreadRegistry {
return threads_.empty() ? nullptr : threads_[tid];
}
+ u32 NumThreadsLocked() const { return threads_.size(); }
+
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
@@ -131,6 +133,11 @@ class MUTEX ThreadRegistry {
u32 ConsumeThreadUserId(uptr user_id);
void SetThreadUserId(u32 tid, uptr user_id);
+ // OnFork must be called in the child process after fork to purge old
+ // threads that don't exist anymore (except for the current thread tid).
+ // Returns number of alive threads before fork.
+ u32 OnFork(u32 tid);
+
private:
const ThreadContextFactory context_factory_;
const u32 max_threads_;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index 1a31ce02af4c..cfe6cc2b394b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -16,7 +16,6 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
-#include <direct.h>
#include <windows.h>
#include <io.h>
#include <psapi.h>
@@ -571,7 +570,9 @@ void Abort() {
internal__exit(3);
}
-bool CreateDir(const char *pathname) { return _mkdir(pathname) == 0; }
+bool CreateDir(const char *pathname) {
+ return CreateDirectoryA(pathname, nullptr) != 0;
+}
#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
index 3809880d50b4..80cab36426c5 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
@@ -17,10 +17,17 @@
#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+static llvm::symbolize::LLVMSymbolizer *Symbolizer = nullptr;
+static bool Demangle = true;
+static bool InlineFrames = true;
+
static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
- static llvm::symbolize::LLVMSymbolizer *DefaultSymbolizer =
- new llvm::symbolize::LLVMSymbolizer();
- return DefaultSymbolizer;
+ if (Symbolizer)
+ return Symbolizer;
+ llvm::symbolize::LLVMSymbolizer::Options Opts;
+ Opts.Demangle = Demangle;
+ Symbolizer = new llvm::symbolize::LLVMSymbolizer(Opts);
+ return Symbolizer;
}
static llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {
@@ -43,8 +50,7 @@ extern "C" {
typedef uint64_t u64;
bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
- char *Buffer, int MaxLength,
- bool SymbolizeInlineFrames) {
+ char *Buffer, int MaxLength) {
std::string Result;
{
llvm::raw_string_ostream OS(Result);
@@ -55,7 +61,7 @@ bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
// TODO: it is neccessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
- if (SymbolizeInlineFrames) {
+ if (InlineFrames) {
auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
@@ -93,7 +99,10 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
Result.c_str()) < MaxLength;
}
-void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
+void __sanitizer_symbolize_flush() {
+ if (Symbolizer)
+ Symbolizer->flush();
+}
int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
int MaxLength) {
@@ -105,6 +114,19 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
: 0;
}
+bool __sanitizer_symbolize_set_demangle(bool Value) {
+ // Must be called before LLVMSymbolizer created.
+ if (Symbolizer)
+ return false;
+ Demangle = Value;
+ return true;
+}
+
+bool __sanitizer_symbolize_set_inline_frames(bool Value) {
+ InlineFrames = Value;
+ return true;
+}
+
// Override __cxa_atexit and ignore callbacks.
// This prevents crashes in a configuration when the symbolizer
// is built into sanitizer runtime and consequently into the test process.
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index d1d61fb7ab2a..599f063b45c9 100755
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -143,7 +143,7 @@ if [[ ! -d ${LLVM_BUILD} ]]; then
$LLVM_SRC
fi
cd ${LLVM_BUILD}
-ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC LLVMDemangle LLVMTextAPI
+ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMDebuginfod LLVMMC LLVMDemangle LLVMTextAPI
cd ${BUILD_DIR}
rm -rf ${SYMBOLIZER_BUILD}
@@ -155,7 +155,12 @@ SYMBOLIZER_FLAGS="$LLVM_FLAGS -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -std
$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cpp ${SRC_DIR}/sanitizer_wrappers.cpp -c
$AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o
-SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
+SYMBOLIZER_API_LIST=__sanitizer_symbolize_code
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_data
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_flush
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_demangle
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_demangle
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_inline_frames
LIBCXX_ARCHIVE_DIR=$(dirname $(find $LIBCXX_BUILD -name libc++.a | head -n1))
@@ -170,6 +175,7 @@ $SCRIPT_DIR/ar_to_bc.sh $LIBCXX_ARCHIVE_DIR/libc++.a \
$LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
$LLVM_BUILD/lib/libLLVMDebugInfoMSF.a \
$LLVM_BUILD/lib/libLLVMDebugInfoCodeView.a \
+ $LLVM_BUILD/lib/libLLVMDebuginfod.a \
$LLVM_BUILD/lib/libLLVMDemangle.a \
$LLVM_BUILD/lib/libLLVMMC.a \
$LLVM_BUILD/lib/libLLVMTextAPI.a \
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index 29b2960e11fe..0bb38ba951a8 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -38,6 +38,8 @@ __sanitizer_symbolize_code T
__sanitizer_symbolize_data T
__sanitizer_symbolize_demangle T
__sanitizer_symbolize_flush T
+__sanitizer_symbolize_set_demangle T
+__sanitizer_symbolize_set_inline_frames T
__strdup U
__udivdi3 U
__umoddi3 U
@@ -51,8 +53,8 @@ catgets U
catopen U
ceil U
ceilf U
-clock_gettime U
cfgetospeed U
+clock_gettime U
dl_iterate_phdr U
dlsym U
dup U
@@ -76,15 +78,17 @@ getcwd U
getenv U
getpagesize U
getpid U
+getpwuid U
getrlimit U
gettimeofday U
+getuid U
ioctl U
isalnum U
isalpha U
isatty U
islower U
-isspace U
isprint U
+isspace U
isupper U
isxdigit U
log10 U
@@ -111,12 +115,17 @@ posix_spawn_file_actions_addopen U
posix_spawn_file_actions_destroy U
posix_spawn_file_actions_init U
qsort U
+raise U
rand U
readlink U
realloc U
remove U
+rename U
setrlimit U
setvbuf U
+sigaction U
+sigaltstack U
+sigemptyset U
sigfillset U
sigprocmask U
snprintf U
@@ -146,6 +155,7 @@ strtold_l U
strtoll_l U
strtoull_l U
syscall U
+sysconf U
tcgetattr U
uname U
ungetc U
diff --git a/compiler-rt/lib/sanitizer_common/weak_symbols.txt b/compiler-rt/lib/sanitizer_common/weak_symbols.txt
index 5a2b275184f4..d07f81bc8c12 100644
--- a/compiler-rt/lib/sanitizer_common/weak_symbols.txt
+++ b/compiler-rt/lib/sanitizer_common/weak_symbols.txt
@@ -6,3 +6,5 @@ ___sanitizer_symbolize_code
___sanitizer_symbolize_data
___sanitizer_symbolize_demangle
___sanitizer_symbolize_flush
+___sanitizer_symbolize_set_demangle
+___sanitizer_symbolize_set_inline_frames
diff --git a/compiler-rt/lib/tsan/go/tsan_go.cpp b/compiler-rt/lib/tsan/go/tsan_go.cpp
index 104c5b325aee..c689a51fb5e1 100644
--- a/compiler-rt/lib/tsan/go/tsan_go.cpp
+++ b/compiler-rt/lib/tsan/go/tsan_go.cpp
@@ -214,7 +214,7 @@ void __tsan_malloc(ThreadState *thr, uptr pc, uptr p, uptr sz) {
}
void __tsan_free(uptr p, uptr sz) {
- ctx->metamap.FreeRange(get_cur_proc(), p, sz);
+ ctx->metamap.FreeRange(get_cur_proc(), p, sz, false);
}
void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan.syms.extra b/compiler-rt/lib/tsan/rtl-old/tsan.syms.extra
new file mode 100644
index 000000000000..4838bb0a7279
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan.syms.extra
@@ -0,0 +1,31 @@
+__tsan_init
+__tsan_flush_memory
+__tsan_read*
+__tsan_write*
+__tsan_vptr*
+__tsan_func*
+__tsan_atomic*
+__tsan_java*
+__tsan_unaligned*
+__tsan_release
+__tsan_acquire
+__tsan_mutex_create
+__tsan_mutex_destroy
+__tsan_mutex_pre_lock
+__tsan_mutex_post_lock
+__tsan_mutex_pre_unlock
+__tsan_mutex_post_unlock
+__tsan_mutex_pre_signal
+__tsan_mutex_post_signal
+__tsan_mutex_pre_divert
+__tsan_mutex_post_divert
+__tsan_get_current_fiber
+__tsan_create_fiber
+__tsan_destroy_fiber
+__tsan_switch_to_fiber
+__tsan_set_fiber_name
+__ubsan_*
+Annotate*
+WTFAnnotate*
+RunningOnValgrind
+ValgrindSlowdown
diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_clock.cpp
index d122b67c0aaa..d122b67c0aaa 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_clock.cpp
diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.h b/compiler-rt/lib/tsan/rtl-old/tsan_clock.h
index 11cbc0c0b86b..11cbc0c0b86b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.h
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_clock.h
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_debugging.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_debugging.cpp
new file mode 100644
index 000000000000..1d3c3849a446
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_debugging.cpp
@@ -0,0 +1,262 @@
+//===-- tsan_debugging.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// TSan debugging API implementation.
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+using namespace __tsan;
+
+static const char *ReportTypeDescription(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace: return "data-race";
+ case ReportTypeVptrRace: return "data-race-vptr";
+ case ReportTypeUseAfterFree: return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree: return "heap-use-after-free-vptr";
+ case ReportTypeExternalRace: return "external-race";
+ case ReportTypeThreadLeak: return "thread-leak";
+ case ReportTypeMutexDestroyLocked: return "locked-mutex-destroy";
+ case ReportTypeMutexDoubleLock: return "mutex-double-lock";
+ case ReportTypeMutexInvalidAccess: return "mutex-invalid-access";
+ case ReportTypeMutexBadUnlock: return "mutex-bad-unlock";
+ case ReportTypeMutexBadReadLock: return "mutex-bad-read-lock";
+ case ReportTypeMutexBadReadUnlock: return "mutex-bad-read-unlock";
+ case ReportTypeSignalUnsafe: return "signal-unsafe-call";
+ case ReportTypeErrnoInSignal: return "errno-in-signal-handler";
+ case ReportTypeDeadlock: return "lock-order-inversion";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static const char *ReportLocationTypeDescription(ReportLocationType typ) {
+ switch (typ) {
+ case ReportLocationGlobal: return "global";
+ case ReportLocationHeap: return "heap";
+ case ReportLocationStack: return "stack";
+ case ReportLocationTLS: return "tls";
+ case ReportLocationFD: return "fd";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static void CopyTrace(SymbolizedStack *first_frame, void **trace,
+ uptr trace_size) {
+ uptr i = 0;
+ for (SymbolizedStack *frame = first_frame; frame != nullptr;
+ frame = frame->next) {
+ trace[i++] = (void *)frame->info.address;
+ if (i >= trace_size) break;
+ }
+}
+
+// Meant to be called by the debugger.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report() {
+ return const_cast<ReportDesc*>(cur_thread()->current_report);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *description = ReportTypeDescription(rep->typ);
+ *count = rep->count;
+ *stack_count = rep->stacks.Size();
+ *mop_count = rep->mops.Size();
+ *loc_count = rep->locs.Size();
+ *mutex_count = rep->mutexes.Size();
+ *thread_count = rep->threads.Size();
+ *unique_tid_count = rep->unique_tids.Size();
+ if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *tag = rep->tag;
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->stacks.Size());
+ ReportStack *stack = rep->stacks[idx];
+ if (stack) CopyTrace(stack->frames, trace, trace_size);
+ return stack ? 1 : 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mops.Size());
+ ReportMop *mop = rep->mops[idx];
+ *tid = mop->tid;
+ *addr = (void *)mop->addr;
+ *size = mop->size;
+ *write = mop->write ? 1 : 0;
+ *atomic = mop->atomic ? 1 : 0;
+ if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *type = ReportLocationTypeDescription(loc->type);
+ *addr = (void *)loc->global.start;
+ *start = loc->heap_chunk_start;
+ *size = loc->heap_chunk_size;
+ *tid = loc->tid;
+ *fd = loc->fd;
+ *suppressable = loc->suppressable;
+ if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *object_type = GetObjectTypeFromTag(loc->external_tag);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mutexes.Size());
+ ReportMutex *mutex = rep->mutexes[idx];
+ *mutex_id = mutex->id;
+ *addr = (void *)mutex->addr;
+ *destroyed = mutex->destroyed;
+ if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->threads.Size());
+ ReportThread *thread = rep->threads[idx];
+ *tid = thread->id;
+ *os_id = thread->os_id;
+ *running = thread->running;
+ *name = thread->name;
+ *parent_tid = thread->parent_tid;
+ if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->unique_tids.Size());
+ *tid = rep->unique_tids[idx];
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(reinterpret_cast<u32 *>(addr))) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(reinterpret_cast<RawShadow *>(addr))) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry.Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry.Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry.GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_defs.h b/compiler-rt/lib/tsan/rtl-old/tsan_defs.h
new file mode 100644
index 000000000000..4712c2be1813
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_defs.h
@@ -0,0 +1,236 @@
+//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_DEFS_H
+#define TSAN_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "ubsan/ubsan_platform.h"
+
+#ifndef TSAN_VECTORIZE
+# define TSAN_VECTORIZE __SSE4_2__
+#endif
+
+#if TSAN_VECTORIZE
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+# define _MM_MALLOC_H_INCLUDED
+# define __MM_MALLOC_H
+# include <emmintrin.h>
+# include <smmintrin.h>
+# define VECTOR_ALIGNED ALIGNED(16)
+typedef __m128i m128;
+#else
+# define VECTOR_ALIGNED
+#endif
+
+// Setup defaults for compile definitions.
+#ifndef TSAN_NO_HISTORY
+# define TSAN_NO_HISTORY 0
+#endif
+
+#ifndef TSAN_CONTAINS_UBSAN
+# if CAN_SANITIZE_UB && !SANITIZER_GO
+# define TSAN_CONTAINS_UBSAN 1
+# else
+# define TSAN_CONTAINS_UBSAN 0
+# endif
+#endif
+
+namespace __tsan {
+
+constexpr uptr kByteBits = 8;
+
+// Thread slot ID.
+enum class Sid : u8 {};
+constexpr uptr kThreadSlotCount = 256;
+constexpr Sid kFreeSid = static_cast<Sid>(255);
+
+// Abstract time unit, vector clock element.
+enum class Epoch : u16 {};
+constexpr uptr kEpochBits = 14;
+constexpr Epoch kEpochZero = static_cast<Epoch>(0);
+constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
+
+const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits; // tid reuse count
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+ static const uptr kRefIdx = kTableSize - 1;
+ static const uptr kBlockIdx = kTableSize - 2;
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+const int kTidBits = 13;
+// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
+// occupied by reference counter, so total number of elements we can store
+// in SyncClock is kClockCount * (kTableSize - 1).
+const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
+#if !SANITIZER_GO
+const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+#else
+const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
+#endif
+const uptr kShadowStackSize = 64 * 1024;
+
+// Count of shadow values in a shadow cell.
+const uptr kShadowCnt = 4;
+
+// That many user bytes are mapped onto a single shadow cell.
+const uptr kShadowCell = 8;
+
+// Single shadow value.
+typedef u64 RawShadow;
+const uptr kShadowSize = sizeof(RawShadow);
+
+// Shadow memory is kShadowMultiplier times larger than user memory.
+const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+// All addresses and PCs are assumed to be compressable to that many bits.
+const uptr kCompressedAddrBits = 44;
+
+#if TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
+// The following "build consistency" machinery ensures that all source files
+// are built in the same configuration. Inconsistent builds lead to
+// hard to debug crashes.
+#if SANITIZER_DEBUG
+void build_consistency_debug();
+#else
+void build_consistency_release();
+#endif
+
+static inline void USED build_consistency() {
+#if SANITIZER_DEBUG
+ build_consistency_debug();
+#else
+ build_consistency_release();
+#endif
+}
+
+template<typename T>
+T min(T a, T b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+T max(T a, T b) {
+ return a > b ? a : b;
+}
+
+template<typename T>
+T RoundUp(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)(((u64)p + align - 1) & ~(align - 1));
+}
+
+template<typename T>
+T RoundDown(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)((u64)p & ~(align - 1));
+}
+
+// Zeroizes high part, returns 'bits' lsb bits.
+template<typename T>
+T GetLsb(T v, int bits) {
+ return (T)((u64)v & ((1ull << bits) - 1));
+}
+
+struct MD5Hash {
+ u64 hash[2];
+ bool operator==(const MD5Hash &other) const;
+};
+
+MD5Hash md5_hash(const void *data, uptr size);
+
+struct Processor;
+struct ThreadState;
+class ThreadContext;
+struct Context;
+struct ReportStack;
+class ReportDesc;
+class RegionAlloc;
+
+typedef uptr AccessType;
+
+enum : AccessType {
+ kAccessWrite = 0,
+ kAccessRead = 1 << 0,
+ kAccessAtomic = 1 << 1,
+ kAccessVptr = 1 << 2, // read or write of an object virtual table pointer
+ kAccessFree = 1 << 3, // synthetic memory access during memory freeing
+ kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set
+};
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz : 48;
+ u64 tag : 16;
+ StackID stk;
+ Tid tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+enum ExternalTag : uptr {
+ kExternalTagNone = 0,
+ kExternalTagSwiftModifyingAccess = 1,
+ kExternalTagFirstUserAvailable = 2,
+ kExternalTagMax = 1024,
+ // Don't set kExternalTagMax over 65,536, since MBlock only stores tags
+ // as 16-bit values, see tsan_defs.h.
+};
+
+enum MutexType {
+ MutexTypeTrace = MutexLastCommon,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+ MutexTypeFired,
+ MutexTypeRacy,
+ MutexTypeGlobalProc,
+ MutexTypeInternalAlloc,
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DEFS_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl-old/tsan_dense_alloc.h
new file mode 100644
index 000000000000..9e15f74a0615
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_dense_alloc.h
@@ -0,0 +1,156 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template <typename, uptr, uptr, u64>
+ friend class DenseSlabAlloc;
+};
+
+template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ static_assert((kL1Size & (kL1Size - 1)) == 0,
+ "kL1Size must be a power-of-two");
+ static_assert((kL2Size & (kL2Size - 1)) == 0,
+ "kL2Size must be a power-of-two");
+ static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
+ "kL1Size/kL2Size are too large");
+ static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
+ "reserved bits don't fit");
+ static_assert(sizeof(T) > sizeof(IndexT),
+ "it doesn't make sense to use dense alloc");
+
+ DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
+
+ explicit DenseSlabAlloc(const char *name)
+ : DenseSlabAlloc(LINKER_INITIALIZED, name) {
+ // It can be very large.
+ // Don't page it in for linker initialized objects.
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ if (!c->pos)
+ return;
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ uptr AllocatedMemory() const {
+ return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_ = {0};
+ atomic_uintptr_t fillpos_ = {0};
+ const char *const name_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ if (fillpos == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
+ name_, kL1Size, kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
+ fillpos, kL1Size, kL2Size);
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T;
+ *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos * kL2Size + start;
+ map_[fillpos] = batch;
+ atomic_store_relaxed(&fillpos_, fillpos + 1);
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_dispatch_defs.h b/compiler-rt/lib/tsan/rtl-old/tsan_dispatch_defs.h
new file mode 100644
index 000000000000..94e0b50fed36
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_dispatch_defs.h
@@ -0,0 +1,73 @@
+//===-- tsan_dispatch_defs.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DISPATCH_DEFS_H
+#define TSAN_DISPATCH_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+typedef struct dispatch_object_s {} *dispatch_object_t;
+
+#define DISPATCH_DECL(name) \
+ typedef struct name##_s : public dispatch_object_s {} *name##_t
+
+DISPATCH_DECL(dispatch_queue);
+DISPATCH_DECL(dispatch_source);
+DISPATCH_DECL(dispatch_group);
+DISPATCH_DECL(dispatch_data);
+DISPATCH_DECL(dispatch_semaphore);
+DISPATCH_DECL(dispatch_io);
+
+typedef void (*dispatch_function_t)(void *arg);
+typedef void (^dispatch_block_t)(void);
+typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data,
+ int error);
+
+typedef long dispatch_once_t;
+typedef __sanitizer::u64 dispatch_time_t;
+typedef int dispatch_fd_t;
+typedef unsigned long dispatch_io_type_t;
+typedef unsigned long dispatch_io_close_flags_t;
+
+extern "C" {
+void *dispatch_get_context(dispatch_object_t object);
+void dispatch_retain(dispatch_object_t object);
+void dispatch_release(dispatch_object_t object);
+
+extern const dispatch_block_t _dispatch_data_destructor_free;
+extern const dispatch_block_t _dispatch_data_destructor_munmap;
+} // extern "C"
+
+#define DISPATCH_DATA_DESTRUCTOR_DEFAULT nullptr
+#define DISPATCH_DATA_DESTRUCTOR_FREE _dispatch_data_destructor_free
+#define DISPATCH_DATA_DESTRUCTOR_MUNMAP _dispatch_data_destructor_munmap
+
+#if __has_attribute(noescape)
+# define DISPATCH_NOESCAPE __attribute__((__noescape__))
+#else
+# define DISPATCH_NOESCAPE
+#endif
+
+#if SANITIZER_MAC
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
+#else
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
+#endif
+
+
+// Data types used in dispatch APIs
+typedef unsigned long size_t;
+typedef unsigned long uintptr_t;
+typedef __sanitizer::s64 off_t;
+typedef __sanitizer::u16 mode_t;
+typedef long long_t;
+
+#endif // TSAN_DISPATCH_DEFS_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_external.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_external.cpp
new file mode 100644
index 000000000000..19ae174f20a5
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_external.cpp
@@ -0,0 +1,126 @@
+//===-- tsan_external.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
+
+#if !SANITIZER_GO
+# include "tsan_interceptors.h"
+#endif
+
+namespace __tsan {
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+struct TagData {
+ const char *object_type;
+ const char *header;
+};
+
+static TagData registered_tags[kExternalTagMax] = {
+ {},
+ {"Swift variable", "Swift access race"},
+};
+static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable};
+static TagData *GetTagData(uptr tag) {
+ // Invalid/corrupted tag? Better return NULL and let the caller deal with it.
+ if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
+ return &registered_tags[tag];
+}
+
+const char *GetObjectTypeFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->object_type : nullptr;
+}
+
+const char *GetReportHeaderFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->header : nullptr;
+}
+
+void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)&registered_tags[tag]);
+}
+
+uptr TagFromShadowStackFrame(uptr pc) {
+ uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
+ void *pc_ptr = (void *)pc;
+ if (pc_ptr < GetTagData(0) || pc_ptr > GetTagData(tag_count - 1))
+ return 0;
+ return (TagData *)pc_ptr - GetTagData(0);
+}
+
+#if !SANITIZER_GO
+
+void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ if (caller_pc) FuncEntry(thr, caller_pc);
+ InsertShadowStackFrameForTag(thr, (uptr)tag);
+ bool in_ignored_lib;
+ if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib))
+ MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ);
+ FuncExit(thr);
+ if (caller_pc) FuncExit(thr);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type) {
+ uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
+ CHECK_LT(new_tag, kExternalTagMax);
+ GetTagData(new_tag)->object_type = internal_strdup(object_type);
+ char header[127] = {0};
+ internal_snprintf(header, sizeof(header), "race on %s", object_type);
+ GetTagData(new_tag)->header = internal_strdup(header);
+ return (void *)new_tag;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header) {
+ CHECK_GE((uptr)tag, kExternalTagFirstUserAvailable);
+ CHECK_LT((uptr)tag, kExternalTagMax);
+ atomic_uintptr_t *header_ptr =
+ (atomic_uintptr_t *)&GetTagData((uptr)tag)->header;
+ header = internal_strdup(header);
+ char *old_header =
+ (char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
+ Free(old_header);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ Allocator *a = allocator();
+ MBlock *b = nullptr;
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b) {
+ b->tag = (uptr)tag;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite);
+}
+} // extern "C"
+
+#endif // !SANITIZER_GO
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_fd.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_fd.cpp
new file mode 100644
index 000000000000..255ffa8daf76
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_fd.cpp
@@ -0,0 +1,316 @@
+//===-- tsan_fd.cpp -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fd.h"
+#include "tsan_rtl.h"
+#include <sanitizer_common/sanitizer_atomic.h>
+
+namespace __tsan {
+
+const int kTableSizeL1 = 1024;
+const int kTableSizeL2 = 1024;
+const int kTableSize = kTableSizeL1 * kTableSizeL2;
+
+struct FdSync {
+ atomic_uint64_t rc;
+};
+
+struct FdDesc {
+ FdSync *sync;
+ Tid creation_tid;
+ StackID creation_stack;
+};
+
+struct FdContext {
+ atomic_uintptr_t tab[kTableSizeL1];
+ // Addresses used for synchronization.
+ FdSync globsync;
+ FdSync filesync;
+ FdSync socksync;
+ u64 connectsync;
+};
+
+static FdContext fdctx;
+
+static bool bogusfd(int fd) {
+ // Apparently a bogus fd value.
+ return fd < 0 || fd >= kTableSize;
+}
+
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
+ atomic_store(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static FdSync *ref(FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
+ atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static void unref(ThreadState *thr, uptr pc, FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
+ if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
+ CHECK_NE(s, &fdctx.globsync);
+ CHECK_NE(s, &fdctx.filesync);
+ CHECK_NE(s, &fdctx.socksync);
+ user_free(thr, pc, s, false);
+ }
+ }
+}
+
+static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
+ CHECK_GE(fd, 0);
+ CHECK_LT(fd, kTableSize);
+ atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
+ uptr l1 = atomic_load(pl1, memory_order_consume);
+ if (l1 == 0) {
+ uptr size = kTableSizeL2 * sizeof(FdDesc);
+ // We need this to reside in user memory to properly catch races on it.
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
+ internal_memset(p, 0, size);
+ MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
+ if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
+ l1 = (uptr)p;
+ else
+ user_free(thr, pc, p, false);
+ }
+ FdDesc *fds = reinterpret_cast<FdDesc *>(l1);
+ return &fds[fd % kTableSizeL2];
+}
+
+// pd must be already ref'ed.
+static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
+ bool write = true) {
+ FdDesc *d = fddesc(thr, pc, fd);
+ // As a matter of fact, we don't intercept all close calls.
+ // See e.g. libc __res_iclose().
+ if (d->sync) {
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ }
+ if (flags()->io_sync == 0) {
+ unref(thr, pc, s);
+ } else if (flags()->io_sync == 1) {
+ d->sync = s;
+ } else if (flags()->io_sync == 2) {
+ unref(thr, pc, s);
+ d->sync = &fdctx.globsync;
+ }
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
+ if (write) {
+ // To catch races between fd usage and open.
+ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+ } else {
+ // See the dup-related comment in FdClose.
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ }
+}
+
+void FdInit() {
+ atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
+}
+
+void FdOnFork(ThreadState *thr, uptr pc) {
+ // On fork() we need to reset all fd's, because the child is going
+ // close all them, and that will cause races between previous read/write
+ // and the close.
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ for (int l2 = 0; l2 < kTableSizeL2; l2++) {
+ FdDesc *d = &tab[l2];
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ }
+ }
+}
+
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
+ int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
+ FdDesc *d = &tab[l2];
+ *fd = l1 * kTableSizeL1 + l2;
+ *tid = d->creation_tid;
+ *stack = d->creation_stack;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FdAcquire(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ if (s)
+ Acquire(thr, pc, (uptr)s);
+}
+
+void FdRelease(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ if (s)
+ Release(thr, pc, (uptr)s);
+}
+
+void FdAccess(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+}
+
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
+ DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ }
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ d->creation_tid = kInvalidTid;
+ d->creation_stack = kInvalidStackID;
+}
+
+void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.filesync);
+}
+
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
+ DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
+ if (bogusfd(oldfd) || bogusfd(newfd))
+ return;
+ // Ignore the case when user dups not yet connected socket.
+ FdDesc *od = fddesc(thr, pc, oldfd);
+ MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
+ FdClose(thr, pc, newfd, write);
+ init(thr, pc, newfd, ref(od->sync), write);
+}
+
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
+ DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
+ FdSync *s = allocsync(thr, pc);
+ init(thr, pc, rfd, ref(s));
+ init(thr, pc, wfd, ref(s));
+ unref(thr, pc, s);
+}
+
+void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // It can be a UDP socket.
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
+ DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Acquire(thr, pc, (uptr)&fdctx.connectsync);
+ init(thr, pc, newfd, &fdctx.socksync);
+}
+
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Release(thr, pc, (uptr)&fdctx.connectsync);
+}
+
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+uptr File2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+uptr Dir2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_fd.h b/compiler-rt/lib/tsan/rtl-old/tsan_fd.h
new file mode 100644
index 000000000000..d9648178481c
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_fd.h
@@ -0,0 +1,64 @@
+//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// This file handles synchronization via IO.
+// People use IO for synchronization along the lines of:
+//
+// int X;
+// int client_socket; // initialized elsewhere
+// int server_socket; // initialized elsewhere
+//
+// Thread 1:
+// X = 42;
+// send(client_socket, ...);
+//
+// Thread 2:
+// if (recv(server_socket, ...) > 0)
+// assert(X == 42);
+//
+// This file determines the scope of the file descriptor (pipe, socket,
+// all local files, etc) and executes acquire and release operations on
+// the scope as necessary. Some scopes are very fine grained (e.g. pipe
+// operations synchronize only with operations on the same pipe), while
+// others are corse-grained (e.g. all operations on local files synchronize
+// with each other).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FD_H
+#define TSAN_FD_H
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void FdInit();
+void FdAcquire(ThreadState *thr, uptr pc, int fd);
+void FdRelease(ThreadState *thr, uptr pc, int fd);
+void FdAccess(ThreadState *thr, uptr pc, int fd);
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true);
+void FdFileCreate(ThreadState *thr, uptr pc, int fd);
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write);
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
+void FdEventCreate(ThreadState *thr, uptr pc, int fd);
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack);
+void FdOnFork(ThreadState *thr, uptr pc);
+
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_flags.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_flags.cpp
new file mode 100644
index 000000000000..ee89862d17bd
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_flags.cpp
@@ -0,0 +1,126 @@
+//===-- tsan_flags.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "ubsan/ubsan_flags.h"
+
+namespace __tsan {
+
+// Can be overriden in frontend.
+#ifdef TSAN_EXTERNAL_HOOKS
+extern "C" const char* __tsan_default_options();
+#else
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_options() {
+ return "";
+}
+#endif
+
+void Flags::SetDefaults() {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ second_deadlock_stack = false;
+}
+
+void RegisterTsanFlags(FlagParser *parser, Flags *f) {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ RegisterFlag(parser, "second_deadlock_stack",
+ "Report where each mutex is locked in deadlock reports",
+ &f->second_deadlock_stack);
+}
+
+void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("TSAN_SYMBOLIZER_PATH");
+ cf.allow_addr2line = true;
+ if (SANITIZER_GO) {
+ // Does not work as expected for Go: runtime handles SIGABRT and crashes.
+ cf.abort_on_error = false;
+ // Go does not have mutexes.
+ cf.detect_deadlocks = false;
+ }
+ cf.print_suppressions = false;
+ cf.stack_trace_format = " #%n %f %S %M";
+ cf.exitcode = 66;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterTsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Let a frontend override.
+ parser.ParseString(__tsan_default_options());
+#if TSAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan_default_options();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+ // Override from command line.
+ parser.ParseString(env, env_option_name);
+#if TSAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ // Sanity check.
+ if (!f->report_bugs) {
+ f->report_thread_leaks = false;
+ f->report_destroy_locked = false;
+ f->report_signal_unsafe = false;
+ }
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
+
+ if (f->io_sync < 0 || f->io_sync > 2) {
+ Printf("ThreadSanitizer: incorrect value for io_sync"
+ " (must be [0..2])\n");
+ Die();
+ }
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_flags.h b/compiler-rt/lib/tsan/rtl-old/tsan_flags.h
new file mode 100644
index 000000000000..da27d5b992bc
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_flags.h
@@ -0,0 +1,34 @@
+//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FLAGS_H
+#define TSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+
+namespace __tsan {
+
+struct Flags : DDFlags {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+
+ void SetDefaults();
+ void ParseFromString(const char *str);
+};
+
+void InitializeFlags(Flags *flags, const char *env,
+ const char *env_option_name = nullptr);
+} // namespace __tsan
+
+#endif // TSAN_FLAGS_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_flags.inc b/compiler-rt/lib/tsan/rtl-old/tsan_flags.inc
new file mode 100644
index 000000000000..7954a4307fa1
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_flags.inc
@@ -0,0 +1,84 @@
+//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// TSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FLAG
+# error "Define TSAN_FLAG prior to including this file!"
+#endif
+
+// TSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+TSAN_FLAG(bool, enable_annotations, true,
+ "Enable dynamic annotations, otherwise they are no-ops.")
+// Suppress a race report if we've already output another race report
+// with the same stack.
+TSAN_FLAG(bool, suppress_equal_stacks, true,
+ "Suppress a race report if we've already output another race report "
+ "with the same stack.")
+TSAN_FLAG(bool, suppress_equal_addresses, true,
+ "Suppress a race report if we've already output another race report "
+ "on the same address.")
+
+TSAN_FLAG(bool, report_bugs, true,
+ "Turns off bug reporting entirely (useful for benchmarking).")
+TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
+TSAN_FLAG(bool, report_destroy_locked, true,
+ "Report destruction of a locked mutex?")
+TSAN_FLAG(bool, report_mutex_bugs, true,
+ "Report incorrect usages of mutexes and mutex annotations?")
+TSAN_FLAG(bool, report_signal_unsafe, true,
+ "Report violations of async signal-safety "
+ "(e.g. malloc() call from a signal handler).")
+TSAN_FLAG(bool, report_atomic_races, true,
+ "Report races between atomic and plain memory accesses.")
+TSAN_FLAG(
+ bool, force_seq_cst_atomics, false,
+ "If set, all atomics are effectively sequentially consistent (seq_cst), "
+ "regardless of what user actually specified.")
+TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
+TSAN_FLAG(int, atexit_sleep_ms, 1000,
+ "Sleep in main thread before exiting for that many ms "
+ "(useful to catch \"at exit\" races).")
+TSAN_FLAG(const char *, profile_memory, "",
+ "If set, periodically write memory profile to that file.")
+TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
+TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
+TSAN_FLAG(
+ int, memory_limit_mb, 0,
+ "Resident memory limit in MB to aim at."
+ "If the process consumes more memory, then TSan will flush shadow memory.")
+TSAN_FLAG(bool, stop_on_start, false,
+ "Stops on start until __tsan_resume() is called (for debugging).")
+TSAN_FLAG(bool, running_on_valgrind, false,
+ "Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
+TSAN_FLAG(
+ int, history_size, SANITIZER_GO ? 1 : 3,
+ "Per-thread history size, controls how many previous memory accesses "
+ "are remembered per thread. Possible values are [0..7]. "
+ "history_size=0 amounts to 32K memory accesses. Each next value doubles "
+ "the amount of memory accesses, up to history_size=7 that amounts to "
+ "4M memory accesses. The default value is 2 (128K memory accesses).")
+TSAN_FLAG(int, io_sync, 1,
+ "Controls level of synchronization implied by IO operations. "
+ "0 - no synchronization "
+ "1 - reasonable level of synchronization (write->read)"
+ "2 - global synchronization of all IO operations.")
+TSAN_FLAG(bool, die_after_fork, true,
+ "Die after multi-threaded fork if the child creates new threads.")
+TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+ "Ignore reads and writes from all interceptors.")
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+ "Interceptors should only detect races when called from instrumented "
+ "modules.")
+TSAN_FLAG(bool, shared_ptr_interceptor, true,
+ "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.cpp
new file mode 100644
index 000000000000..1fca1cf4f9fc
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.cpp
@@ -0,0 +1,38 @@
+//===-- tsan_ignoreset.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_ignoreset.h"
+
+namespace __tsan {
+
+const uptr IgnoreSet::kMaxSize;
+
+IgnoreSet::IgnoreSet()
+ : size_() {
+}
+
+void IgnoreSet::Add(StackID stack_id) {
+ if (size_ == kMaxSize)
+ return;
+ for (uptr i = 0; i < size_; i++) {
+ if (stacks_[i] == stack_id)
+ return;
+ }
+ stacks_[size_++] = stack_id;
+}
+
+StackID IgnoreSet::At(uptr i) const {
+ CHECK_LT(i, size_);
+ CHECK_LE(size_, kMaxSize);
+ return stacks_[i];
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.h b/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.h
new file mode 100644
index 000000000000..4e2511291ce4
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_ignoreset.h
@@ -0,0 +1,36 @@
+//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// IgnoreSet holds a set of stack traces where ignores were enabled.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_IGNORESET_H
+#define TSAN_IGNORESET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class IgnoreSet {
+ public:
+ IgnoreSet();
+ void Add(StackID stack_id);
+ void Reset() { size_ = 0; }
+ uptr Size() const { return size_; }
+ StackID At(uptr i) const;
+
+ private:
+ static constexpr uptr kMaxSize = 16;
+ uptr size_;
+ StackID stacks_[kMaxSize];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_IGNORESET_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_ilist.h b/compiler-rt/lib/tsan/rtl-old/tsan_ilist.h
new file mode 100644
index 000000000000..d7d8be219dbe
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_ilist.h
@@ -0,0 +1,189 @@
+//===-- tsan_ilist.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_ILIST_H
+#define TSAN_ILIST_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+class INode {
+ public:
+ INode() = default;
+
+ private:
+ INode* next_ = nullptr;
+ INode* prev_ = nullptr;
+
+ template <typename Base, INode Base::*Node, typename Elem>
+ friend class IList;
+ INode(const INode&) = delete;
+ void operator=(const INode&) = delete;
+};
+
+// Intrusive doubly-linked list.
+//
+// The node class (MyNode) needs to include "INode foo" field,
+// then the list can be declared as IList<MyNode, &MyNode::foo>.
+// This design allows to link MyNode into multiple lists using
+// different INode fields.
+// The optional Elem template argument allows to specify node MDT
+// (most derived type) if it's different from MyNode.
+template <typename Base, INode Base::*Node, typename Elem = Base>
+class IList {
+ public:
+ IList();
+
+ void PushFront(Elem* e);
+ void PushBack(Elem* e);
+ void Remove(Elem* e);
+
+ Elem* PopFront();
+ Elem* PopBack();
+ Elem* Front();
+ Elem* Back();
+
+ // Prev links point towards front of the queue.
+ Elem* Prev(Elem* e);
+ // Next links point towards back of the queue.
+ Elem* Next(Elem* e);
+
+ uptr Size() const;
+ bool Empty() const;
+ bool Queued(Elem* e) const;
+
+ private:
+ INode node_;
+ uptr size_ = 0;
+
+ void Push(Elem* e, INode* after);
+ static INode* ToNode(Elem* e);
+ static Elem* ToElem(INode* n);
+
+ IList(const IList&) = delete;
+ void operator=(const IList&) = delete;
+};
+
+template <typename Base, INode Base::*Node, typename Elem>
+IList<Base, Node, Elem>::IList() {
+ node_.next_ = node_.prev_ = &node_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushFront(Elem* e) {
+ Push(e, &node_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushBack(Elem* e) {
+ Push(e, node_.prev_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Push(Elem* e, INode* after) {
+ INode* n = ToNode(e);
+ DCHECK_EQ(n->next_, nullptr);
+ DCHECK_EQ(n->prev_, nullptr);
+ INode* next = after->next_;
+ n->next_ = next;
+ n->prev_ = after;
+ next->prev_ = n;
+ after->next_ = n;
+ size_++;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Remove(Elem* e) {
+ INode* n = ToNode(e);
+ INode* next = n->next_;
+ INode* prev = n->prev_;
+ DCHECK(next);
+ DCHECK(prev);
+ DCHECK(size_);
+ next->prev_ = prev;
+ prev->next_ = next;
+ n->prev_ = n->next_ = nullptr;
+ size_--;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopFront() {
+ Elem* e = Front();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopBack() {
+ Elem* e = Back();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Front() {
+ return size_ ? ToElem(node_.next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Back() {
+ return size_ ? ToElem(node_.prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Prev(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->prev_);
+ return n->prev_ != &node_ ? ToElem(n->prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Next(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->next_);
+ return n->next_ != &node_ ? ToElem(n->next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+uptr IList<Base, Node, Elem>::Size() const {
+ return size_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Empty() const {
+ return size_ == 0;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Queued(Elem* e) const {
+ INode* n = ToNode(e);
+ DCHECK_EQ(!n->next_, !n->prev_);
+ return n->next_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+INode* IList<Base, Node, Elem>::ToNode(Elem* e) {
+ return &(e->*Node);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::ToElem(INode* n) {
+ return static_cast<Elem*>(reinterpret_cast<Base*>(
+ reinterpret_cast<uptr>(n) -
+ reinterpret_cast<uptr>(&(reinterpret_cast<Elem*>(0)->*Node))));
+}
+
+} // namespace __tsan
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors.h b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors.h
new file mode 100644
index 000000000000..61dbb81ffec4
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors.h
@@ -0,0 +1,93 @@
+#ifndef TSAN_INTERCEPTORS_H
+#define TSAN_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ void DisableIgnores() {
+ if (UNLIKELY(ignoring_))
+ DisableIgnoresImpl();
+ }
+ void EnableIgnores() {
+ if (UNLIKELY(ignoring_))
+ EnableIgnoresImpl();
+ }
+
+ private:
+ ThreadState *const thr_;
+ bool in_ignored_lib_;
+ bool ignoring_;
+
+ void DisableIgnoresImpl();
+ void EnableIgnoresImpl();
+};
+
+LibIgnore *libignore();
+
+#if !SANITIZER_GO
+inline bool in_symbolizer() {
+ return UNLIKELY(cur_thread_init()->in_symbolizer);
+}
+#endif
+
+} // namespace __tsan
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread_init(); \
+ ScopedInterceptor si(thr, #func, GET_CALLER_PC()); \
+ UNUSED const uptr pc = GET_CURRENT_PC();
+
+#ifdef __powerpc64__
+// Debugging of crashes on powerpc after commit:
+// c80604f7a3 ("tsan: remove real func check from interceptors")
+// Somehow replacing if with DCHECK leads to strange failures in:
+// SanitizerCommon-tsan-powerpc64le-Linux :: Linux/ptrace.cpp
+// https://lab.llvm.org/buildbot/#/builders/105
+// https://lab.llvm.org/buildbot/#/builders/121
+// https://lab.llvm.org/buildbot/#/builders/57
+# define CHECK_REAL_FUNC(func) \
+ if (REAL(func) == 0) { \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ }
+#else
+# define CHECK_REAL_FUNC(func) DCHECK(REAL(func))
+#endif
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ CHECK_REAL_FUNC(func); \
+ if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
+ return REAL(func)(__VA_ARGS__);
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
+ si.DisableIgnores();
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
+ si.EnableIgnores();
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+
+#if SANITIZER_NETBSD
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func2));
+#else
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
+#endif
+
+#endif // TSAN_INTERCEPTORS_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_libdispatch.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_libdispatch.cpp
new file mode 100644
index 000000000000..cbbb7ecb2397
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_libdispatch.cpp
@@ -0,0 +1,814 @@
+//===-- tsan_interceptors_libdispatch.cpp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Support for intercepting libdispatch (GCD).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
+#include "BlocksRuntime/Block.h"
+#include "tsan_dispatch_defs.h"
+
+#if SANITIZER_MAC
+# include <Availability.h>
+#endif
+
+namespace __tsan {
+ typedef u16 uint16_t;
+
+typedef struct {
+ dispatch_queue_t queue;
+ void *orig_context;
+ dispatch_function_t orig_work;
+ bool free_context_in_callback;
+ bool submitted_synchronously;
+ bool is_barrier_block;
+ uptr non_queue_sync_object;
+} block_context_t;
+
+// The offsets of different fields of the dispatch_queue_t structure, exported
+// by libdispatch.dylib.
+extern "C" struct dispatch_queue_offsets_s {
+ const uint16_t dqo_version;
+ const uint16_t dqo_label;
+ const uint16_t dqo_label_size;
+ const uint16_t dqo_flags;
+ const uint16_t dqo_flags_size;
+ const uint16_t dqo_serialnum;
+ const uint16_t dqo_serialnum_size;
+ const uint16_t dqo_width;
+ const uint16_t dqo_width_size;
+ const uint16_t dqo_running;
+ const uint16_t dqo_running_size;
+ const uint16_t dqo_suspend_cnt;
+ const uint16_t dqo_suspend_cnt_size;
+ const uint16_t dqo_target_queue;
+ const uint16_t dqo_target_queue_size;
+ const uint16_t dqo_priority;
+ const uint16_t dqo_priority_size;
+} dispatch_queue_offsets;
+
+static bool IsQueueSerial(dispatch_queue_t q) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_width_size, 2);
+ uptr width = *(uint16_t *)(((uptr)q) + dispatch_queue_offsets.dqo_width);
+ CHECK_NE(width, 0);
+ return width == 1;
+}
+
+static dispatch_queue_t GetTargetQueueFromQueue(dispatch_queue_t q) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
+ dispatch_queue_t tq = *(
+ dispatch_queue_t *)(((uptr)q) + dispatch_queue_offsets.dqo_target_queue);
+ return tq;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ dispatch_queue_t tq = GetTargetQueueFromQueue((dispatch_queue_t)source);
+ CHECK_NE(tq, 0);
+ return tq;
+}
+
+static block_context_t *AllocContext(ThreadState *thr, uptr pc,
+ dispatch_queue_t queue, void *orig_context,
+ dispatch_function_t orig_work) {
+ block_context_t *new_context =
+ (block_context_t *)user_alloc_internal(thr, pc, sizeof(block_context_t));
+ new_context->queue = queue;
+ new_context->orig_context = orig_context;
+ new_context->orig_work = orig_work;
+ new_context->free_context_in_callback = true;
+ new_context->submitted_synchronously = false;
+ new_context->is_barrier_block = false;
+ new_context->non_queue_sync_object = 0;
+ return new_context;
+}
+
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = sync_ptr ? ((uptr)sync_ptr) + sizeof(uptr) : 0; \
+ bool serial_task = context->is_barrier_block || is_queue_serial
+
+static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
+ block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ Acquire(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_sync) Acquire(thr, pc, serial_sync);
+ if (serial_task && concurrent_sync) Acquire(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
+ block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_task && serial_sync) Release(thr, pc, serial_sync);
+ if (!serial_task && concurrent_sync) Release(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_callback_wrap(void *param) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
+ block_context_t *context = (block_context_t *)param;
+
+ dispatch_sync_pre_execute(thr, pc, context);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ context->orig_work(context->orig_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+
+ dispatch_sync_post_execute(thr, pc, context);
+
+ if (context->free_context_in_callback) user_free(thr, pc, context);
+}
+
+static void invoke_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+}
+
+static void invoke_and_release_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+ Block_release(block);
+}
+
+#define DISPATCH_INTERCEPT_ASYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ dispatch_block_t heap_block = Block_copy(block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ block_context_t *new_context = \
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, \
+ DISPATCH_NOESCAPE dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ block_context_t new_context = { \
+ q, block, &invoke_block, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+#define DISPATCH_INTERCEPT_ASYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ block_context_t *new_context = \
+ AllocContext(thr, pc, q, context, work); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ block_context_t new_context = { \
+ q, context, work, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+#define DISPATCH_INTERCEPT(name, barrier) \
+ DISPATCH_INTERCEPT_ASYNC_F(name##_async_f, barrier) \
+ DISPATCH_INTERCEPT_ASYNC_B(name##_async, barrier) \
+ DISPATCH_INTERCEPT_SYNC_F(name##_sync_f, barrier) \
+ DISPATCH_INTERCEPT_SYNC_B(name##_sync, barrier)
+
+// We wrap dispatch_async, dispatch_sync and friends where we allocate a new
+// context, which is used to synchronize (we release the context before
+// submitting, and the callback acquires it before executing the original
+// callback).
+DISPATCH_INTERCEPT(dispatch, false)
+DISPATCH_INTERCEPT(dispatch_barrier, true)
+
+// dispatch_async_and_wait() and friends were introduced in macOS 10.14.
+// Linking of these interceptors fails when using an older SDK.
+#if !SANITIZER_MAC || defined(__MAC_10_14)
+// macOS 10.14 is greater than our minimal deployment target. To ensure we
+// generate a weak reference so the TSan dylib continues to work on older
+// systems, we need to forward declare the intercepted functions as "weak
+// imports". Note that this file is multi-platform, so we cannot include the
+// actual header file (#include <dispatch/dispatch.h>).
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+
+DISPATCH_INTERCEPT_SYNC_F(dispatch_async_and_wait_f, false)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_async_and_wait, false)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_barrier_async_and_wait_f, true)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_barrier_async_and_wait, true)
+#endif
+
+
+DECLARE_REAL(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context, dispatch_function_t work)
+
+TSAN_INTERCEPTOR(void, dispatch_after, dispatch_time_t when,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after, when, queue, block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, queue, heap_block, &invoke_and_release_block);
+ Release(thr, pc, (uptr)new_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_after_f)(when, queue, new_context, dispatch_callback_wrap);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after_f, when, queue, context, work);
+ WRAP(dispatch_after)(when, queue, ^(void) {
+ work(context);
+ });
+}
+
+// GCD's dispatch_once implementation has a fast path that contains a racy read
+// and it's inlined into user's code. Furthermore, this fast path doesn't
+// establish a proper happens-before relations between the initialization and
+// code following the call to dispatch_once. We could deal with this in
+// instrumented code, but there's not much we can do about it in system
+// libraries. Let's disable the fast path (by never storing the value ~0 to
+// predicate), so the interceptor is always called, and let's add proper release
+// and acquire semantics. Since TSan does not see its own atomic stores, the
+// race on predicate won't be reported - the only accesses to it that TSan sees
+// are the loads on the fast path. Loads don't race. Secondly, dispatch_once is
+// both a macro and a real function, we want to intercept the function, so we
+// need to undefine the macro.
+#undef dispatch_once
+TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
+ DISPATCH_NOESCAPE dispatch_block_t block) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
+ atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 &&
+ atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) {
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block();
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, (uptr)a);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ Acquire(thr, pc, (uptr)a);
+ }
+}
+
+#undef dispatch_once_f
+TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate,
+ void *context, dispatch_function_t function) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_once_f, predicate, context, function);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ WRAP(dispatch_once)(predicate, ^(void) {
+ function(context);
+ });
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_signal,
+ dispatch_semaphore_t dsema) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_signal, dsema);
+ Release(thr, pc, (uptr)dsema);
+ return REAL(dispatch_semaphore_signal)(dsema);
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_wait, dispatch_semaphore_t dsema,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_wait, dsema, timeout);
+ long_t result = REAL(dispatch_semaphore_wait)(dsema, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)dsema);
+ return result;
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_group_wait, dispatch_group_t group,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_wait, group, timeout);
+ long_t result = REAL(dispatch_group_wait)(group, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)group);
+ return result;
+}
+
+// Used, but not intercepted.
+extern "C" void dispatch_group_enter(dispatch_group_t group);
+
+TSAN_INTERCEPTOR(void, dispatch_group_leave, dispatch_group_t group) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_leave, group);
+ // Acquired in the group notification callback in dispatch_group_notify[_f].
+ Release(thr, pc, (uptr)group);
+ REAL(dispatch_group_leave)(group);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async, dispatch_group_t group,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async, group, queue, block);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ __block dispatch_block_t block_copy = (dispatch_block_t)Block_copy(block);
+ WRAP(dispatch_async)(queue, ^(void) {
+ block_copy();
+ Block_release(block_copy);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async_f, group, queue, context, work);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ WRAP(dispatch_async)(queue, ^(void) {
+ work(context);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+DECLARE_REAL(void, dispatch_group_notify_f, dispatch_group_t group,
+ dispatch_queue_t q, void *context, dispatch_function_t work)
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify, dispatch_group_t group,
+ dispatch_queue_t q, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify, group, q, block);
+
+ // To make sure the group is still available in the callback (otherwise
+ // it can be already destroyed). Will be released in the callback.
+ dispatch_retain(group);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(^(void) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_read_callback);
+ // Released when leaving the group (dispatch_group_leave).
+ Acquire(thr, pc, (uptr)group);
+ }
+ dispatch_release(group);
+ block();
+ });
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ new_context->is_barrier_block = true;
+ Release(thr, pc, (uptr)new_context);
+ REAL(dispatch_group_notify_f)(group, q, new_context, dispatch_callback_wrap);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify_f, dispatch_group_t group,
+ dispatch_queue_t q, void *context, dispatch_function_t work) {
+ WRAP(dispatch_group_notify)(group, q, ^(void) { work(context); });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0 };
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_event_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler_f, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_event_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_cancel_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_cancel_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_registration_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_registration_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
+ dispatch_queue_t queue,
+ DISPATCH_NOESCAPE void (^block)(size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
+
+ u8 sync1, sync2;
+ uptr parent_to_child_sync = (uptr)&sync1;
+ uptr child_to_parent_sync = (uptr)&sync2;
+
+ Release(thr, pc, parent_to_child_sync);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply);
+ Acquire(thr, pc, parent_to_child_sync);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block(iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply)(iterations, queue, new_block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync);
+}
+
+static void invoke_block_iteration(void *param, size_t iteration) {
+ auto block = (void (^)(size_t)) param;
+ block(iteration);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
+ dispatch_queue_t queue, void *context,
+ void (*work)(void *, size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply_f, iterations, queue, context, work);
+
+ // Unfortunately, we cannot delegate to dispatch_apply, since libdispatch
+ // implements dispatch_apply in terms of dispatch_apply_f.
+ u8 sync1, sync2;
+ uptr parent_to_child_sync = (uptr)&sync1;
+ uptr child_to_parent_sync = (uptr)&sync2;
+
+ Release(thr, pc, parent_to_child_sync);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply_f);
+ Acquire(thr, pc, parent_to_child_sync);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ work(context, iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply_f)(iterations, queue, new_block, invoke_block_iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync);
+}
+
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+
+TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
+ size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_data_create, buffer, size, q, destructor);
+ if ((q == nullptr) || (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT))
+ return REAL(dispatch_data_create)(buffer, size, q, destructor);
+
+ if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE)
+ destructor = ^(void) { WRAP(free)((void *)(uintptr_t)buffer); };
+ else if (destructor == DISPATCH_DATA_DESTRUCTOR_MUNMAP)
+ destructor = ^(void) { WRAP(munmap)((void *)(uintptr_t)buffer, size); };
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(destructor);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ uptr submit_sync = (uptr)new_context;
+ Release(thr, pc, submit_sync);
+ return REAL(dispatch_data_create)(buffer, size, q, ^(void) {
+ dispatch_callback_wrap(new_context);
+ });
+}
+
+typedef void (^fd_handler_t)(dispatch_data_t data, int error);
+typedef void (^cleanup_handler_t)(int error);
+
+TSAN_INTERCEPTOR(void, dispatch_read, dispatch_fd_t fd, size_t length,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_read, fd, length, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_read)(fd, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_write, dispatch_fd_t fd, dispatch_data_t data,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_write, fd, data, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_write)(fd, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_read, dispatch_io_t channel, off_t offset,
+ size_t length, dispatch_queue_t q, dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_read, channel, offset, length, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_read)(channel, offset, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_write, dispatch_io_t channel, off_t offset,
+ dispatch_data_t data, dispatch_queue_t q,
+ dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_write, channel, offset, data, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_write)(channel, offset, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_barrier, dispatch_io_t channel,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_barrier, channel, barrier);
+ __block block_context_t new_context = {
+ nullptr, nullptr, &invoke_block, false, false, false, 0};
+ new_context.non_queue_sync_object = (uptr)channel;
+ new_context.is_barrier_block = true;
+ dispatch_block_t new_block = Block_copy(^(void) {
+ new_context.orig_context = ^(void) {
+ barrier();
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_barrier)(channel, new_block);
+ Block_release(new_block);
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create, dispatch_io_type_t type,
+ dispatch_fd_t fd, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create, type, fd, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create)(type, fd, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_path,
+ dispatch_io_type_t type, const char *path, int oflag,
+ mode_t mode, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_path, type, path, oflag, mode,
+ q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel =
+ REAL(dispatch_io_create_with_path)(type, path, oflag, mode, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_io,
+ dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t q,
+ cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_io, type, io, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create_with_io)(type, io, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
+ dispatch_io_close_flags_t flags) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_close, channel, flags);
+ Release(thr, pc, (uptr)channel); // Acquire() in dispatch_io_create[_*].
+ return REAL(dispatch_io_close)(channel, flags);
+}
+
+// Resuming a suspended queue needs to synchronize with all subsequent
+// executions of blocks in that queue.
+TSAN_INTERCEPTOR(void, dispatch_resume, dispatch_object_t o) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_resume, o);
+ Release(thr, pc, (uptr)o); // Synchronizes with the Acquire() on serial_sync
+ // in dispatch_sync_pre_execute
+ return REAL(dispatch_resume)(o);
+}
+
+void InitializeLibdispatchInterceptors() {
+ INTERCEPT_FUNCTION(dispatch_async);
+ INTERCEPT_FUNCTION(dispatch_async_f);
+ INTERCEPT_FUNCTION(dispatch_sync);
+ INTERCEPT_FUNCTION(dispatch_sync_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_async);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_sync);
+ INTERCEPT_FUNCTION(dispatch_barrier_sync_f);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait_f);
+ INTERCEPT_FUNCTION(dispatch_after);
+ INTERCEPT_FUNCTION(dispatch_after_f);
+ INTERCEPT_FUNCTION(dispatch_once);
+ INTERCEPT_FUNCTION(dispatch_once_f);
+ INTERCEPT_FUNCTION(dispatch_semaphore_signal);
+ INTERCEPT_FUNCTION(dispatch_semaphore_wait);
+ INTERCEPT_FUNCTION(dispatch_group_wait);
+ INTERCEPT_FUNCTION(dispatch_group_leave);
+ INTERCEPT_FUNCTION(dispatch_group_async);
+ INTERCEPT_FUNCTION(dispatch_group_async_f);
+ INTERCEPT_FUNCTION(dispatch_group_notify);
+ INTERCEPT_FUNCTION(dispatch_group_notify_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_event_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_event_handler_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_cancel_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_cancel_handler_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_registration_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_registration_handler_f);
+ INTERCEPT_FUNCTION(dispatch_apply);
+ INTERCEPT_FUNCTION(dispatch_apply_f);
+ INTERCEPT_FUNCTION(dispatch_data_create);
+ INTERCEPT_FUNCTION(dispatch_read);
+ INTERCEPT_FUNCTION(dispatch_write);
+ INTERCEPT_FUNCTION(dispatch_io_read);
+ INTERCEPT_FUNCTION(dispatch_io_write);
+ INTERCEPT_FUNCTION(dispatch_io_barrier);
+ INTERCEPT_FUNCTION(dispatch_io_create);
+ INTERCEPT_FUNCTION(dispatch_io_create_with_path);
+ INTERCEPT_FUNCTION(dispatch_io_create_with_io);
+ INTERCEPT_FUNCTION(dispatch_io_close);
+ INTERCEPT_FUNCTION(dispatch_resume);
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mac.cpp
new file mode 100644
index 000000000000..ed064150d005
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mac.cpp
@@ -0,0 +1,521 @@
+//===-- tsan_interceptors_mac.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific interceptors.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <objc/objc-sync.h>
+#include <os/lock.h>
+#include <sys/ucontext.h>
+
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#include <xpc/xpc.h>
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+typedef long long_t;
+
+extern "C" {
+int getcontext(ucontext_t *ucp) __attribute__((returns_twice));
+int setcontext(const ucontext_t *ucp);
+}
+
+namespace __tsan {
+
+// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
+// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
+// actually aliases of each other, and we cannot have different interceptors for
+// them, because they're actually the same function. Thus, we have to stay
+// conservative and treat the non-barrier versions as mo_acq_rel.
+static constexpr morder kMacOrderBarrier = mo_acq_rel;
+static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
+static constexpr morder kMacFailureOrder = mo_relaxed;
+
+#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
+ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderBarrier)
+
+#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
+ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_X)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_1)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
+ OSATOMIC_INTERCEPTOR_MINUS_1)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
+ OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+
+#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
+ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderNonBarrier, kMacFailureOrder); \
+ } \
+ \
+ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
+ t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderBarrier, kMacFailureOrder); \
+ }
+
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
+ long_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
+ void *)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
+ int32_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
+ int64_t)
+
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
+ char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
+ return orig_byte & bit; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
+ true)
+
+TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
+ void *item = REAL(OSAtomicDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
+#if !SANITIZER_IOS
+
+TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicFifoEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
+ void *item = REAL(OSAtomicFifoDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+#endif
+
+TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockLock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
+ REAL(OSSpinLockLock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockTry)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
+ bool result = REAL(OSSpinLockTry)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockUnlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(OSSpinLockUnlock)(lock);
+}
+
+TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_lock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
+ REAL(os_lock_lock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_trylock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
+ bool result = REAL(os_lock_trylock)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_unlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(os_lock_unlock)(lock);
+}
+
+TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) {
+ if (!cur_thread()->is_inited || cur_thread()->is_dead) {
+ return REAL(os_unfair_lock_lock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock);
+ REAL(os_unfair_lock_lock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock,
+ u32 options) {
+ if (!cur_thread()->is_inited || cur_thread()->is_dead) {
+ return REAL(os_unfair_lock_lock_with_options)(lock, options);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options);
+ REAL(os_unfair_lock_lock_with_options)(lock, options);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) {
+ if (!cur_thread()->is_inited || cur_thread()->is_dead) {
+ return REAL(os_unfair_lock_trylock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock);
+ bool result = REAL(os_unfair_lock_trylock)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) {
+ if (!cur_thread()->is_inited || cur_thread()->is_dead) {
+ return REAL(os_unfair_lock_unlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(os_unfair_lock_unlock)(lock);
+}
+
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
+ xpc_connection_t connection, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
+ handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_set_event_handler)(connection, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
+ Release(thr, pc, (uptr)connection);
+ dispatch_block_t new_barrier = ^() {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ barrier();
+ };
+ REAL(xpc_connection_send_barrier)(connection, new_barrier);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
+ xpc_connection_t connection, xpc_object_t message,
+ dispatch_queue_t replyq, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
+ message, replyq, handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_send_message_with_reply)
+ (connection, message, replyq, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
+ Release(thr, pc, (uptr)connection);
+ REAL(xpc_connection_cancel)(connection);
+}
+
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+// Determines whether the Obj-C object pointer is a tagged pointer. Tagged
+// pointers encode the object data directly in their pointer bits and do not
+// have an associated memory allocation. The Obj-C runtime uses tagged pointers
+// to transparently optimize small objects.
+static bool IsTaggedObjCPointer(id obj) {
+ const uptr kPossibleTaggedBits = 0x8000000000000001ull;
+ return ((uptr)obj & kPossibleTaggedBits) != 0;
+}
+
+// Returns an address which can be used to inform TSan about synchronization
+// points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid
+// address in the process space. We do a small allocation here to obtain a
+// stable address (the array backing the hash map can change). The memory is
+// never free'd (leaked) and allocation and locking are slow, but this code only
+// runs for @synchronized with tagged pointers, which is very rare.
+static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
+ typedef AddrHashMap<uptr, 5> Map;
+ static Map Addresses;
+ Map::Handle h(&Addresses, addr);
+ if (h.created()) {
+ ThreadIgnoreBegin(thr, pc);
+ *h = (uptr) user_alloc(thr, pc, /*size=*/1);
+ ThreadIgnoreEnd(thr);
+ }
+ return *h;
+}
+
+// Returns an address on which we can synchronize given an Obj-C object pointer.
+// For normal object pointers, this is just the address of the object in memory.
+// Tagged pointers are not backed by an actual memory allocation, so we need to
+// synthesize a valid address.
+static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
+ if (IsTaggedObjCPointer(obj))
+ return GetOrCreateSyncAddress((uptr)obj, thr, pc);
+ return (uptr)obj;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
+ if (!obj) return REAL(objc_sync_enter)(obj);
+ uptr addr = SyncAddressForObjCObject(obj, thr, pc);
+ MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
+ int result = REAL(objc_sync_enter)(obj);
+ CHECK_EQ(result, OBJC_SYNC_SUCCESS);
+ MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant);
+ return result;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
+ if (!obj) return REAL(objc_sync_exit)(obj);
+ uptr addr = SyncAddressForObjCObject(obj, thr, pc);
+ MutexUnlock(thr, pc, addr);
+ int result = REAL(objc_sync_exit)(obj);
+ if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
+ return result;
+}
+
+TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
+ {
+ SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
+ }
+ // Because of swapcontext() semantics we have no option but to copy its
+ // implementation here
+ if (!oucp || !ucp) {
+ errno = EINVAL;
+ return -1;
+ }
+ ThreadState *thr = cur_thread();
+ const int UCF_SWAPPED = 0x80000000;
+ oucp->uc_onstack &= ~UCF_SWAPPED;
+ thr->ignore_interceptors++;
+ int ret = getcontext(oucp);
+ if (!(oucp->uc_onstack & UCF_SWAPPED)) {
+ thr->ignore_interceptors--;
+ if (!ret) {
+ oucp->uc_onstack |= UCF_SWAPPED;
+ ret = setcontext(ucp);
+ }
+ }
+ return ret;
+}
+
+// On macOS, libc++ is always linked dynamically, so intercepting works the
+// usual way.
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+
+namespace {
+struct fake_shared_weak_count {
+ volatile a64 shared_owners;
+ volatile a64 shared_weak_owners;
+ virtual void _unused_0x0() = 0;
+ virtual void _unused_0x8() = 0;
+ virtual void on_zero_shared() = 0;
+ virtual void _unused_0x18() = 0;
+ virtual void on_zero_shared_weak() = 0;
+ virtual ~fake_shared_weak_count() = 0; // suppress -Wnon-virtual-dtor
+};
+} // namespace
+
+// The following code adds libc++ interceptors for:
+// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// bool __shared_count::__release_shared() _NOEXCEPT;
+// Shared and weak pointers in C++ maintain reference counts via atomics in
+// libc++.dylib, which are TSan-invisible, and this leads to false positives in
+// destructor code. These interceptors re-implements the whole functions so that
+// the mo_acq_rel semantics of the atomic decrement are visible.
+//
+// Unfortunately, the interceptors cannot simply Acquire/Release some sync
+// object and call the original function, because it would have a race between
+// the sync and the destruction of the object. Calling both under a lock will
+// not work because the destructor can invoke this interceptor again (and even
+// in a different thread, so recursive locks don't help).
+
+STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
+ o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
+ 0) {
+ Acquire(thr, pc, (uptr)&o->shared_weak_owners);
+ o->on_zero_shared_weak();
+ }
+ }
+}
+
+STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ return true;
+ }
+ return false;
+}
+
+namespace {
+struct call_once_callback_args {
+ void (*orig_func)(void *arg);
+ void *orig_arg;
+ void *flag;
+};
+
+void call_once_callback_wrapper(void *arg) {
+ call_once_callback_args *new_args = (call_once_callback_args *)arg;
+ new_args->orig_func(new_args->orig_arg);
+ __tsan_release(new_args->flag);
+}
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __call_once(volatile unsigned long&, void*, void(*)(void*));
+// C++11 call_once is implemented via an internal function __call_once which is
+// inside libc++.dylib, and the atomic release store inside it is thus
+// TSan-invisible. To avoid false positives, this interceptor wraps the callback
+// function and performs an explicit Release after the user code has run.
+STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
+ void *arg, void (*func)(void *arg)) {
+ call_once_callback_args new_args = {func, arg, flag};
+ REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
+ call_once_callback_wrapper);
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mach_vm.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mach_vm.cpp
new file mode 100644
index 000000000000..6d62ff6a8382
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_mach_vm.cpp
@@ -0,0 +1,53 @@
+//===-- tsan_interceptors_mach_vm.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interceptors for mach_vm_* user space memory routines on Darwin.
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_platform.h"
+
+#include <mach/mach.h>
+
+namespace __tsan {
+
+static bool intersects_with_shadow(mach_vm_address_t address,
+ mach_vm_size_t size, int flags) {
+ // VM_FLAGS_FIXED is 0x0, so we have to test for VM_FLAGS_ANYWHERE.
+ if (flags & VM_FLAGS_ANYWHERE) return false;
+ return !IsAppMem(address) || !IsAppMem(address + size - 1);
+}
+
+TSAN_INTERCEPTOR(kern_return_t, mach_vm_allocate, vm_map_t target,
+ mach_vm_address_t *address, mach_vm_size_t size, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(mach_vm_allocate, target, address, size, flags);
+ if (target != mach_task_self())
+ return REAL(mach_vm_allocate)(target, address, size, flags);
+ if (address && intersects_with_shadow(*address, size, flags))
+ return KERN_NO_SPACE;
+ kern_return_t kr = REAL(mach_vm_allocate)(target, address, size, flags);
+ if (kr == KERN_SUCCESS)
+ MemoryRangeImitateWriteOrResetRange(thr, pc, *address, size);
+ return kr;
+}
+
+TSAN_INTERCEPTOR(kern_return_t, mach_vm_deallocate, vm_map_t target,
+ mach_vm_address_t address, mach_vm_size_t size) {
+ SCOPED_TSAN_INTERCEPTOR(mach_vm_deallocate, target, address, size);
+ if (target != mach_task_self())
+ return REAL(mach_vm_deallocate)(target, address, size);
+ kern_return_t kr = REAL(mach_vm_deallocate)(target, address, size);
+ if (kr == KERN_SUCCESS && address)
+ UnmapShadow(thr, address, size);
+ return kr;
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp
new file mode 100644
index 000000000000..cf3dc90d96a1
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp
@@ -0,0 +1,3015 @@
+//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.inc
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_platform.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_fd.h"
+
+#include <stdarg.h>
+
+using namespace __tsan;
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+#if SANITIZER_NETBSD
+#define dirfd(dirp) (*(int *)(dirp))
+#define fileno_unlocked(fp) \
+ (((__sanitizer_FILE *)fp)->_file == -1 \
+ ? -1 \
+ : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
+
+#define stdout ((__sanitizer_FILE*)&__sF[1])
+#define stderr ((__sanitizer_FILE*)&__sF[2])
+
+#define nanosleep __nanosleep50
+#define vfork __vfork14
+#endif
+
+#ifdef __mips__
+const int kSigCount = 129;
+#else
+const int kSigCount = 65;
+#endif
+
+#ifdef __mips__
+struct ucontext_t {
+ u64 opaque[768 / sizeof(u64) + 1];
+};
+#else
+struct ucontext_t {
+ // The size is determined by looking at sizeof of real ucontext_t on linux.
+ u64 opaque[936 / sizeof(u64) + 1];
+};
+#endif
+
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
+ defined(__s390x__)
+#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
+#elif defined(__aarch64__) || SANITIZER_PPC64V2
+#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#endif
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
+extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void));
+extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+extern "C" int pthread_setspecific(unsigned key, const void *v);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+extern "C" int pthread_equal(void *t1, void *t2);
+extern "C" void *pthread_self();
+extern "C" void _exit(int status);
+#if !SANITIZER_NETBSD
+extern "C" int fileno_unlocked(void *stream);
+extern "C" int dirfd(void *dirp);
+#endif
+#if SANITIZER_NETBSD
+extern __sanitizer_FILE __sF[];
+#else
+extern __sanitizer_FILE *stdout, *stderr;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int PTHREAD_MUTEX_RECURSIVE = 1;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+#else
+const int PTHREAD_MUTEX_RECURSIVE = 2;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int EPOLL_CTL_ADD = 1;
+#endif
+const int SIGILL = 4;
+const int SIGTRAP = 5;
+const int SIGABRT = 6;
+const int SIGFPE = 8;
+const int SIGSEGV = 11;
+const int SIGPIPE = 13;
+const int SIGTERM = 15;
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SIGBUS = 10;
+const int SIGSYS = 12;
+#else
+const int SIGBUS = 7;
+const int SIGSYS = 31;
+#endif
+void *const MAP_FAILED = (void*)-1;
+#if SANITIZER_NETBSD
+const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
+#elif !SANITIZER_MAC
+const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+#endif
+const int MAP_FIXED = 0x10;
+typedef long long_t;
+typedef __sanitizer::u16 mode_t;
+
+// From /usr/include/unistd.h
+# define F_ULOCK 0 /* Unlock a previously locked region. */
+# define F_LOCK 1 /* Lock a region for exclusive use. */
+# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
+# define F_TEST 3 /* Test a region for other processes locks. */
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SA_SIGINFO = 0x40;
+const int SIG_SETMASK = 3;
+#elif defined(__mips__)
+const int SA_SIGINFO = 8;
+const int SIG_SETMASK = 3;
+#else
+const int SA_SIGINFO = 4;
+const int SIG_SETMASK = 2;
+#endif
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (!cur_thread_init()->is_inited)
+
+namespace __tsan {
+struct SignalDesc {
+ bool armed;
+ __sanitizer_siginfo siginfo;
+ ucontext_t ctx;
+};
+
+struct ThreadSignalContext {
+ int int_signal_send;
+ atomic_uintptr_t in_blocking_func;
+ SignalDesc pending_signals[kSigCount];
+ // emptyset and oldset are too big for stack.
+ __sanitizer_sigset_t emptyset;
+ __sanitizer_sigset_t oldset;
+};
+
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+ uptr pc;
+};
+
+// InterceptorContext holds all global data required for interceptors.
+// It's explicitly constructed in InitializeInterceptors with placement new
+// and is never destroyed. This allows usage of members with non-trivial
+// constructors and destructors.
+struct InterceptorContext {
+ // The object is 64-byte aligned, because we want hot data to be located
+ // in a single cache line if possible (it's accessed in every interceptor).
+ ALIGNED(64) LibIgnore libignore;
+ __sanitizer_sigaction sigactions[kSigCount];
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+ unsigned finalize_key;
+#endif
+
+ Mutex atexit_mu;
+ Vector<struct AtExitCtx *> AtExitStack;
+
+ InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
+};
+
+static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
+InterceptorContext *interceptor_ctx() {
+ return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
+}
+
+LibIgnore *libignore() {
+ return &interceptor_ctx()->libignore;
+}
+
+void InitializeLibIgnore() {
+ const SuppressionContext &supp = *Suppressions();
+ const uptr n = supp.SuppressionCount();
+ for (uptr i = 0; i < n; i++) {
+ const Suppression *s = supp.SuppressionAt(i);
+ if (0 == internal_strcmp(s->type, kSuppressionLib))
+ libignore()->AddIgnoredLibrary(s->templ);
+ }
+ if (flags()->ignore_noninstrumented_modules)
+ libignore()->IgnoreNoninstrumentedModules(true);
+ libignore()->OnLibraryLoaded(0);
+}
+
+// The following two hooks can be used by for cooperative scheduling when
+// locking.
+#ifdef TSAN_EXTERNAL_HOOKS
+void OnPotentiallyBlockingRegionBegin();
+void OnPotentiallyBlockingRegionEnd();
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
+#endif
+
+} // namespace __tsan
+
+static ThreadSignalContext *SigCtx(ThreadState *thr) {
+ ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ if (ctx == 0 && !thr->is_dead) {
+ ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
+ thr->signal_ctx = ctx;
+ }
+ return ctx;
+}
+
+ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
+ uptr pc)
+ : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
+ LazyInitialize(thr);
+ if (!thr_->is_inited) return;
+ if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
+ ignoring_ =
+ !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
+ libignore()->IsIgnored(pc, &in_ignored_lib_));
+ EnableIgnores();
+}
+
+ScopedInterceptor::~ScopedInterceptor() {
+ if (!thr_->is_inited) return;
+ DisableIgnores();
+ if (!thr_->ignore_interceptors) {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckedMutex::CheckNoLocks();
+ }
+}
+
+NOINLINE
+void ScopedInterceptor::EnableIgnoresImpl() {
+ ThreadIgnoreBegin(thr_, 0);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
+ }
+}
+
+NOINLINE
+void ScopedInterceptor::DisableIgnoresImpl() {
+ ThreadIgnoreEnd(thr_);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
+ }
+}
+
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#elif SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#endif
+
+#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
+ MemoryAccessRange((thr), (pc), (uptr)(s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n), false)
+
+#define READ_STRING(thr, pc, s, n) \
+ READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
+
+#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
+
+struct BlockingCall {
+ explicit BlockingCall(ThreadState *thr)
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
+ }
+
+ ~BlockingCall() {
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ }
+
+ ThreadState *thr;
+ ThreadSignalContext *ctx;
+};
+
+TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
+ SCOPED_TSAN_INTERCEPTOR(sleep, sec);
+ unsigned res = BLOCK_REAL(sleep)(sec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, usleep, long_t usec) {
+ SCOPED_TSAN_INTERCEPTOR(usleep, usec);
+ int res = BLOCK_REAL(usleep)(usec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
+ SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
+ int res = BLOCK_REAL(nanosleep)(req, rem);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pause, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(pause, fake);
+ return BLOCK_REAL(pause)(fake);
+}
+
+// Note: we specifically call the function in such strange way
+// with "installed_at" because in reports it will appear between
+// callback frames and the frame that installed the callback.
+static void at_exit_callback_installed_at() {
+ AtExitCtx *ctx;
+ {
+ // Ensure thread-safety.
+ Lock l(&interceptor_ctx()->atexit_mu);
+
+ // Pop AtExitCtx from the top of the stack of callback functions
+ uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
+ ctx = interceptor_ctx()->AtExitStack[element];
+ interceptor_ctx()->AtExitStack.PopBack();
+ }
+
+ ThreadState *thr = cur_thread();
+ Acquire(thr, ctx->pc, (uptr)ctx);
+ FuncEntry(thr, ctx->pc);
+ ((void(*)())ctx->f)();
+ FuncExit(thr);
+ Free(ctx);
+}
+
+static void cxa_at_exit_callback_installed_at(void *arg) {
+ ThreadState *thr = cur_thread();
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ FuncExit(thr);
+ Free(ctx);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
+
+#if !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ if (in_symbolizer())
+ return 0;
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
+}
+#endif
+
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ auto *ctx = New<AtExitCtx>();
+ ctx->f = f;
+ ctx->arg = arg;
+ ctx->pc = pc;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res;
+ if (!dso) {
+ // NetBSD does not preserve the 2nd argument if dso is equal to 0
+ // Store ctx in a local stack-like structure
+
+ // Ensure thread-safety.
+ Lock l(&interceptor_ctx()->atexit_mu);
+ // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
+ // due to atexit_mu held on exit from the calloc interceptor.
+ ScopedIgnoreInterceptors ignore;
+
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
+ 0, 0);
+ // Push AtExitCtx on the top of the stack of callback functions
+ if (!res) {
+ interceptor_ctx()->AtExitStack.PushBack(ctx);
+ }
+ } else {
+ res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
+ }
+ ThreadIgnoreEnd(thr);
+ return res;
+}
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+static void on_exit_callback_installed_at(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ FuncExit(thr);
+ Free(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ auto *ctx = New<AtExitCtx>();
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ ctx->pc = GET_CALLER_PC();
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
+ ThreadIgnoreEnd(thr);
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT
+#endif
+
+// Cleanup old bufs.
+static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp <= sp) {
+ uptr sz = thr->jmp_bufs.Size();
+ internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
+ thr->jmp_bufs.PopBack();
+ i--;
+ }
+ }
+}
+
+static void SetJmp(ThreadState *thr, uptr sp) {
+ if (!thr->is_inited) // called from libc guts during bootstrap
+ return;
+ // Cleanup old bufs.
+ JmpBufGarbageCollect(thr, sp);
+ // Remember the buf.
+ JmpBuf *buf = thr->jmp_bufs.PushBack();
+ buf->sp = sp;
+ buf->shadow_stack_pos = thr->shadow_stack_pos;
+ ThreadSignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
+}
+
+static void LongJmp(ThreadState *thr, uptr *env) {
+ uptr sp = ExtractLongJmpSp(env);
+ // Find the saved buf with matching sp.
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp == sp) {
+ CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
+ // Unwind the stack.
+ while (thr->shadow_stack_pos > buf->shadow_stack_pos)
+ FuncExit(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
+ JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
+ return;
+ }
+ }
+ Printf("ThreadSanitizer: can't find longjmp buf\n");
+ CHECK(0);
+}
+
+// FIXME: put everything below into a common extern "C" block?
+extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
+
+#if SANITIZER_MAC
+TSAN_INTERCEPTOR(int, setjmp, void *env);
+TSAN_INTERCEPTOR(int, _setjmp, void *env);
+TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
+#else // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define setjmp_symname __setjmp14
+#define sigsetjmp_symname __sigsetjmp14
+#else
+#define setjmp_symname setjmp
+#define sigsetjmp_symname sigsetjmp
+#endif
+
+#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
+#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
+#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
+#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
+
+#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
+#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
+
+// Not called. Merely to satisfy TSAN_INTERCEPT().
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+// FIXME: any reason to have a separate declaration?
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor__setjmp(void *env);
+extern "C" int __interceptor__setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+#if !SANITIZER_NETBSD
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor___sigsetjmp(void *env);
+extern "C" int __interceptor___sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+#endif
+
+extern "C" int setjmp_symname(void *env);
+extern "C" int _setjmp(void *env);
+extern "C" int sigsetjmp_symname(void *env);
+#if !SANITIZER_NETBSD
+extern "C" int __sigsetjmp(void *env);
+#endif
+DEFINE_REAL(int, setjmp_symname, void *env)
+DEFINE_REAL(int, _setjmp, void *env)
+DEFINE_REAL(int, sigsetjmp_symname, void *env)
+#if !SANITIZER_NETBSD
+DEFINE_REAL(int, __sigsetjmp, void *env)
+#endif
+#endif // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define longjmp_symname __longjmp14
+#define siglongjmp_symname __siglongjmp14
+#else
+#define longjmp_symname longjmp
+#define siglongjmp_symname siglongjmp
+#endif
+
+TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
+ // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
+ // bad things will happen. We will jump over ScopedInterceptor dtor and can
+ // leave thr->in_ignored_lib set.
+ {
+ SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(longjmp_symname)(env, val);
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(siglongjmp_symname)(env, val);
+}
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(_longjmp)(env, val);
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ if (in_symbolizer())
+ return InternalAlloc(size);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(malloc, size);
+ p = user_alloc(thr, pc, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
+// __libc_memalign so that (1) we can detect races (2) free will not be called
+// on libc internally allocated blocks.
+TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalCalloc(size, n);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(calloc, size, n);
+ p = user_calloc(thr, pc, size, n);
+ }
+ invoke_malloc_hook(p, n * size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (in_symbolizer())
+ return InternalRealloc(p, size);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(realloc, p, size);
+ p = user_realloc(thr, pc, p, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalReallocArray(p, size, n);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
+ p = user_reallocarray(thr, pc, p, size, n);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void, free, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void, cfree, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(cfree, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
+ SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
+ return user_alloc_usable_size(p);
+}
+#endif
+
+TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
+ SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
+ uptr srclen = internal_strlen(src);
+ MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
+ MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
+ return REAL(strcpy)(dst, src);
+}
+
+TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
+ return REAL(strncpy)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(char*, strdup, const char *str) {
+ SCOPED_TSAN_INTERCEPTOR(strdup, str);
+ // strdup will call malloc, so no instrumentation is required here.
+ return REAL(strdup)(str);
+}
+
+// Zero out addr if it points into shadow memory and was provided as a hint
+// only, i.e., MAP_FIXED is not set.
+static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
+ if (*addr) {
+ if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
+ if (flags & MAP_FIXED) {
+ errno = errno_EINVAL;
+ return false;
+ } else {
+ *addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+template <class Mmap>
+static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
+ void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
+ if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
+ void *res = real_mmap(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
+ Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
+ addr, (void*)sz, res);
+ Die();
+ }
+ if (fd > 0) FdAccess(thr, pc, fd);
+ MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ UnmapShadow(thr, (uptr)addr, sz);
+ int res = REAL(munmap)(addr, sz);
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, GetPageSizeCached());
+ SCOPED_INTERCEPTOR_RAW(valloc, sz);
+ return user_valloc(thr, pc, sz);
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ if (in_symbolizer()) {
+ uptr PageSize = GetPageSizeCached();
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return InternalAlloc(sz, nullptr, PageSize);
+ }
+ SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
+ return user_pvalloc(thr, pc, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ if (in_symbolizer()) {
+ void *p = InternalAlloc(sz, nullptr, align);
+ if (!p)
+ return errno_ENOMEM;
+ *memptr = p;
+ return 0;
+ }
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
+ return user_posix_memalign(thr, pc, memptr, align, sz);
+}
+#endif
+
+// Both __cxa_guard_acquire and pthread_once 0-initialize
+// the object initially. pthread_once does not have any
+// other ABI requirements. __cxa_guard_acquire assumes
+// that any non-0 value in the first byte means that
+// initialization is completed. Contents of the remaining
+// bytes are up to us.
+constexpr u32 kGuardInit = 0;
+constexpr u32 kGuardDone = 1;
+constexpr u32 kGuardRunning = 1 << 16;
+constexpr u32 kGuardWaiter = 1 << 17;
+
+static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ bool blocking_hooks = true) {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionBegin();
+ auto on_exit = at_scope_exit([blocking_hooks] {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionEnd();
+ });
+
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == kGuardInit) {
+ if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
+ memory_order_relaxed))
+ return 1;
+ } else if (cmp == kGuardDone) {
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ if ((cmp & kGuardWaiter) ||
+ atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
+ memory_order_relaxed))
+ FutexWait(g, cmp | kGuardWaiter);
+ }
+ }
+}
+
+static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ u32 v) {
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)g);
+ u32 old = atomic_exchange(g, v, memory_order_release);
+ if (old & kGuardWaiter)
+ FutexWake(g, 1 << 30);
+}
+
+// __cxa_guard_acquire and friends need to be intercepted in a special way -
+// regular interceptors will break statically-linked libstdc++. Linux
+// interceptors are especially defined as weak functions (so that they don't
+// cause link errors when user defines them as well). So they silently
+// auto-disable themselves when such symbol is already present in the binary. If
+// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
+// will silently replace our interceptor. That's why on Linux we simply export
+// these interceptors with INTERFACE_ATTRIBUTE.
+// On OS X, we don't support statically linking, so we just use a regular
+// interceptor.
+#if SANITIZER_MAC
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+#else
+#define STDCXX_INTERCEPTOR(rettype, name, ...) \
+ extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
+#endif
+
+// Used in thread-safe function static initialization.
+STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
+ return guard_acquire(thr, pc, g);
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
+ guard_release(thr, pc, g, kGuardDone);
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
+ guard_release(thr, pc, g, kGuardInit);
+}
+
+namespace __tsan {
+void DestroyThreadState() {
+ ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc();
+ ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
+ DTLS_Destroy();
+ cur_thread_finalize();
+}
+
+void PlatformCleanUpThreadState(ThreadState *thr) {
+ ThreadSignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ UnmapOrDie(sctx, sizeof(*sctx));
+ }
+}
+} // namespace __tsan
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void*)(iter - 1))) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ return;
+ }
+ DestroyThreadState();
+}
+#endif
+
+
+struct ThreadParam {
+ void* (*callback)(void *arg);
+ void *param;
+ Tid tid;
+ Semaphore created;
+ Semaphore started;
+};
+
+extern "C" void *__tsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ {
+ ThreadState *thr = cur_thread_init();
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ ThreadIgnoreBegin(thr, 0);
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void *)GetPthreadDestructorIterations())) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ ThreadIgnoreEnd(thr);
+#endif
+ p->created.Wait();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
+ p->started.Post();
+ }
+ void *res = callback(param);
+ // Prevent the callback from being tail called,
+ // it mixes up stack traces.
+ volatile int foo = 42;
+ foo++;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_create,
+ void *th, void *attr, void *(*callback)(void*), void * param) {
+ SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+
+ MaybeSpawnBackgroundThread();
+
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1,
+ "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %lu). Continuing because of "
+ "die_after_fork=0, but you are on your own\n",
+ internal_getpid());
+ }
+ }
+ __sanitizer_pthread_attr_t myattr;
+ if (attr == 0) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ int detached = 0;
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ AdjustStackSize(attr);
+
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ p.tid = kMainTid;
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr);
+ }
+ if (res == 0) {
+ p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
+ CHECK_NE(p.tid, kMainTid);
+ // Synchronization on p.tid serves two purposes:
+ // 1. ThreadCreate must finish before the new thread starts.
+ // Otherwise the new thread can call pthread_detach, but the pthread_t
+ // identifier is not yet registered in ThreadRegistry by ThreadCreate.
+ // 2. ThreadStart must finish before this thread continues.
+ // Otherwise, this thread can call pthread_detach and reset thr->sync
+ // before the new thread got a chance to acquire from it in ThreadStart.
+ p.created.Post();
+ p.started.Wait();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr);
+ if (res == 0) {
+ ThreadJoin(thr, pc, tid);
+ }
+ return res;
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
+TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+ SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
+ {
+ SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ CHECK_EQ(thr, &cur_thread_placeholder);
+#endif
+ }
+ REAL(pthread_exit)(retval);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(pthread_tryjoin_np)(th, ret);
+ ThreadIgnoreEnd(thr);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
+ const struct timespec *abstime) {
+ SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
+ ThreadIgnoreEnd(thr);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+#endif
+
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+namespace {
+
+template <class Fn>
+struct CondMutexUnlockCtx {
+ ScopedInterceptor *si;
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+ void *c;
+ const Fn &fn;
+
+ int Cancel() const { return fn(); }
+ void Unlock() const;
+};
+
+template <class Fn>
+void CondMutexUnlockCtx<Fn>::Unlock() const {
+ // pthread_cond_wait interceptor has enabled async signal delivery
+ // (see BlockingCall below). Disable async signals since we are running
+ // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
+ // since the thread is cancelled, so we have to manually execute them
+ // (the thread still can run some user code due to pthread_cleanup_push).
+ ThreadSignalContext *ctx = SigCtx(thr);
+ CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
+ // Undo BlockingCall ctor effects.
+ thr->ignore_interceptors--;
+ si->~ScopedInterceptor();
+}
+} // namespace
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+template <class Fn>
+int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
+ void *c, void *m) {
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = 0;
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cpp.
+ {
+ // Enable signal delivery while the thread is blocked.
+ BlockingCall bc(thr);
+ CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
+ res = call_pthread_cancel_with_cleanup(
+ [](void *arg) -> int {
+ return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
+ },
+ [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
+ &arg);
+ }
+ if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ return cond_wait(
+ thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
+ m);
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
+ m);
+}
+
+#if SANITIZER_LINUX
+INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
+ __sanitizer_clockid_t clock, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
+ cond, m);
+}
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
+#else
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
+#endif
+
+#if SANITIZER_MAC
+INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
+ void *reltime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() {
+ return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
+ },
+ cond, m);
+}
+#endif
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
+ int res = REAL(pthread_mutex_init)(m, a);
+ if (res == 0) {
+ u32 flagz = 0;
+ if (a) {
+ int type = 0;
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
+ if (type == PTHREAD_MUTEX_RECURSIVE ||
+ type == PTHREAD_MUTEX_RECURSIVE_NP)
+ flagz |= MutexFlagWriteReentrant;
+ }
+ MutexCreate(thr, pc, (uptr)m, flagz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ int res = REAL(pthread_mutex_destroy)(m);
+ if (res == 0 || res == errno_EBUSY) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ int res = REAL(pthread_mutex_timedlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ int res = REAL(pthread_spin_init)(m, pshared);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ int res = REAL(pthread_spin_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_lock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_unlock)(m);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ int res = REAL(pthread_rwlock_init)(m, a);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ int res = REAL(pthread_rwlock_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ MutexPreReadLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
+ MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_unlock)(m);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+ int res = REAL(pthread_barrier_init)(b, a, count);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
+ int res = REAL(pthread_barrier_destroy)(b);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
+ Release(thr, pc, (uptr)b);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
+ int res = REAL(pthread_barrier_wait)(b);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
+ if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
+ Acquire(thr, pc, (uptr)b);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+ SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
+ if (o == 0 || f == 0)
+ return errno_EINVAL;
+ atomic_uint32_t *a;
+
+ if (SANITIZER_MAC)
+ a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
+ else if (SANITIZER_NETBSD)
+ a = static_cast<atomic_uint32_t*>
+ ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
+ else
+ a = static_cast<atomic_uint32_t*>(o);
+
+ // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
+ // result in crashes due to too little stack space.
+ if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) {
+ (*f)();
+ guard_release(thr, pc, a, kGuardDone);
+ }
+ return 0;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
+
+TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(0, fd, buf);
+#endif
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(0, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
+
+TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open)(name, oflag, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open64)(name, oflag, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
+
+TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat64)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
+
+TSAN_INTERCEPTOR(int, dup, int oldfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
+ int newfd = REAL(dup)(oldfd);
+ if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
+ FdDup(thr, pc, oldfd, newfd, true);
+ return newfd;
+}
+
+TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
+ int newfd2 = REAL(dup2)(oldfd, newfd);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
+ int newfd2 = REAL(dup3)(oldfd, newfd, flags);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
+ int fd = REAL(eventfd)(initval, flags);
+ if (fd >= 0)
+ FdEventCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ fd = REAL(signalfd)(fd, mask, flags);
+ if (fd >= 0)
+ FdSignalCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
+ int fd = REAL(inotify_init)(fake);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
+ int fd = REAL(inotify_init1)(flags);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
+
+TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
+ SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
+ int fd = REAL(socket)(domain, type, protocol);
+ if (fd >= 0)
+ FdSocketCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
+ SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
+ int res = REAL(socketpair)(domain, type, protocol, fd);
+ if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
+ FdPipeCreate(thr, pc, fd[0], fd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
+ FdSocketConnecting(thr, pc, fd);
+ int res = REAL(connect)(fd, addr, addrlen);
+ if (res == 0 && fd >= 0)
+ FdSocketConnect(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
+ int res = REAL(bind)(fd, addr, addrlen);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
+ SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
+ int res = REAL(listen)(fd, backlog);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(close)(fd);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, __close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(__close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(__close)(fd);
+}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
+
+// glibc guts
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
+ SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ int fds[64];
+ int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++) {
+ if (fds[i] > 0)
+ FdClose(thr, pc, fds[i]);
+ }
+ REAL(__res_iclose)(state, free_addr);
+}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
+
+TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
+ SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
+ int res = REAL(pipe)(pipefd);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
+ int res = REAL(pipe2)(pipefd, flags);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, unlink, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(unlink, path);
+ Release(thr, pc, File2addr(path));
+ int res = REAL(unlink)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
+
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
+}
+
+TSAN_INTERCEPTOR(void, abort, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(abort, fake);
+ FlushStreams();
+ REAL(abort)(fake);
+}
+
+TSAN_INTERCEPTOR(int, rmdir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(rmdir, path);
+ Release(thr, pc, Dir2addr(path));
+ int res = REAL(rmdir)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, closedir, void *dirp) {
+ SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ if (dirp) {
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ }
+ return REAL(closedir)(dirp);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ if (epfd >= 0 && fd >= 0)
+ FdAccess(thr, pc, fd);
+ if (op == EPOLL_CTL_ADD && epfd >= 0)
+ FdRelease(thr, pc, epfd);
+ int res = REAL(epoll_ctl)(epfd, op, fd, ev);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
+ void *sigmask) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+#define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL
+#endif
+
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
+namespace __tsan {
+
+static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+}
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ int sig, __sanitizer_siginfo *info,
+ void *uctx) {
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Signals are generally asynchronous, so if we receive a signals when
+ // ignores are enabled we should disable ignores. This is critical for sync
+ // and interceptors, because otherwise we can miss synchronization and report
+ // false races.
+ int ignore_reads_and_writes = thr->ignore_reads_and_writes;
+ int ignore_interceptors = thr->ignore_interceptors;
+ int ignore_sync = thr->ignore_sync;
+ // For symbolizer we only process SIGSEGVs synchronously
+ // (bug in symbolizer or in tsan). But we want to reset
+ // in_symbolizer to fail gracefully. Symbolizer and user code
+ // use different memory allocators, so if we don't reset
+ // in_symbolizer we can get memory allocated with one being
+ // feed with another, which can cause more crashes.
+ int in_symbolizer = thr->in_symbolizer;
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = 0;
+ thr->fast_state.ClearIgnoreBit();
+ thr->ignore_interceptors = 0;
+ thr->ignore_sync = 0;
+ thr->in_symbolizer = 0;
+ }
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // This code races with sigaction. Be careful to not read sa_sigaction twice.
+ // Also need to remember pc for reporting before the call,
+ // because the handler can reset it.
+ volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
+ ? (uptr)sigactions[sig].sigaction
+ : (uptr)sigactions[sig].handler;
+ if (pc != sig_dfl && pc != sig_ign) {
+ // The callback can be either sa_handler or sa_sigaction.
+ // They have different signatures, but we assume that passing
+ // additional arguments to sa_handler works and is harmless.
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
+ }
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = ignore_reads_and_writes;
+ if (ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ thr->ignore_interceptors = ignore_interceptors;
+ thr->ignore_sync = ignore_sync;
+ thr->in_symbolizer = in_symbolizer;
+ }
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
+ errno != 99)
+ ReportErrnoSpoiling(thr, pc);
+ errno = saved_errno;
+}
+
+void ProcessPendingSignalsImpl(ThreadState *thr) {
+ atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx == 0)
+ return;
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ internal_sigfillset(&sctx->emptyset);
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
+ &signal->ctx);
+ }
+ }
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
+
+void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
+ ThreadState *thr = cur_thread_init();
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
+ // Don't mess with synchronous signals.
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
+ // If we are in blocking function, we can safely process it now
+ // (but check if we are in a recursive interceptor,
+ // i.e. pthread_join()->munmap()).
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sig, info, ctx);
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+ return;
+ }
+
+ if (sctx == 0)
+ return;
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed == false) {
+ signal->armed = true;
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
+ }
+}
+
+TSAN_INTERCEPTOR(int, raise, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(raise, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ sctx->int_signal_send = sig;
+ int res = REAL(raise)(sig);
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (pid == (int)internal_getpid()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(kill)(pid, sig);
+ if (pid == (int)internal_getpid()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ bool self = pthread_equal(tid, pthread_self());
+ if (self)
+ sctx->int_signal_send = sig;
+ int res = REAL(pthread_kill)(tid, sig);
+ if (self) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
+ SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
+ // It's intercepted merely to process pending signals.
+ return REAL(gettimeofday)(tv, tz);
+}
+
+TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
+ void *hints, void *rv) {
+ SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
+ // We miss atomic synchronization in getaddrinfo,
+ // and can report false race between malloc and free
+ // inside of getaddrinfo. So ignore memory accesses.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(getaddrinfo)(node, service, hints, rv);
+ ThreadIgnoreEnd(thr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (in_symbolizer())
+ return REAL(fork)(fake);
+ SCOPED_INTERCEPTOR_RAW(fork, fake);
+ return REAL(fork)(fake);
+}
+
+void atfork_prepare() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkBefore(thr, pc);
+}
+
+void atfork_parent() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkParentAfter(thr, pc);
+}
+
+void atfork_child() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkChildAfter(thr, pc, true);
+ FdOnFork(thr, pc);
+}
+
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
+ void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
+ SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
+ child_tid);
+ struct Arg {
+ int (*fn)(void *);
+ void *arg;
+ };
+ auto wrapper = +[](void *p) -> int {
+ auto *thr = cur_thread();
+ uptr pc = GET_CURRENT_PC();
+ // Start the background thread for fork, but not for clone.
+ // For fork we did this always and it's known to work (or user code has
+ // adopted). But if we do this for the new clone interceptor some code
+ // (sandbox2) fails. So model we used to do for years and don't start the
+ // background thread after clone.
+ ForkChildAfter(thr, pc, false);
+ FdOnFork(thr, pc);
+ auto *arg = static_cast<Arg *>(p);
+ return arg->fn(arg->arg);
+ };
+ ForkBefore(thr, pc);
+ Arg arg_wrapper = {fn, arg};
+ int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
+ child_tid);
+ ForkParentAfter(thr, pc);
+ return pid;
+}
+#endif
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data);
+struct dl_iterate_phdr_data {
+ ThreadState *thr;
+ uptr pc;
+ dl_iterate_phdr_cb_t cb;
+ void *data;
+};
+
+static bool IsAppNotRodata(uptr addr) {
+ return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata;
+}
+
+static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data) {
+ dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
+ // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
+ // accessible in dl_iterate_phdr callback. But we don't see synchronization
+ // inside of dynamic linker, so we "unpoison" it here in order to not
+ // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
+ // because some libc functions call __libc_dlopen.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ int res = cbdata->cb(info, size, cbdata->data);
+ // Perform the check one more time in case info->dlpi_name was overwritten
+ // by user callback.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
+ SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
+ dl_iterate_phdr_data cbdata;
+ cbdata.thr = thr;
+ cbdata.pc = pc;
+ cbdata.cb = cb;
+ cbdata.data = data;
+ int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
+ return res;
+}
+#endif
+
+static int OnExit(ThreadState *thr) {
+ int status = Finalize(thr);
+ FlushStreams();
+ return status;
+}
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr pc;
+};
+
+#if !SANITIZER_MAC
+static void HandleRecvmsg(ThreadState *thr, uptr pc,
+ __sanitizer_msghdr *msg) {
+ int fds[64];
+ int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++)
+ FdEventCreate(thr, pc, fds[i]);
+}
+#endif
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+// Causes interceptor recursion (getaddrinfo() and fopen())
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+// We define our own.
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define NEED_TLS_GET_ADDR
+#endif
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
+#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
+ (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, pc}; \
+ ctx = (void *)&_ctx; \
+ (void)ctx;
+
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, pc}; \
+ ctx = (void *)&_ctx; \
+ (void)ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ if (path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ \
+ CheckNoDeepBind(filename, flag); \
+ ThreadIgnoreBegin(thr, 0); \
+ void *res = REAL(dlopen)(filename, flag); \
+ ThreadIgnoreEnd(thr); \
+ res; \
+ })
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
+ Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
+
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
+ FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
+
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
+
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
+ else \
+ __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
+
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
+
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
+ OnExit(((TsanInterceptorContext *) ctx)->thr)
+
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
+ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
+ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
+ MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
+ MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
+ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ do { \
+ return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
+ off); \
+ } while (false)
+
+#if !SANITIZER_MAC
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, msg)
+#endif
+
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (TsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old);
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h);
+
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
+ { return sigaction_impl(signo, act, oldact); }
+
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
+ { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old) {
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through sighandler, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+ if (sig <= 0 || sig >= kSigCount) {
+ errno = errno_EINVAL;
+ return -1;
+ }
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ __sanitizer_sigaction old_stored;
+ if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
+ __sanitizer_sigaction newact;
+ if (act) {
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].handler =
+ *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
+ sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
+ internal_memcpy(&newact, act, sizeof(newact));
+ internal_sigfillset(&newact.sa_mask);
+ if ((act->sa_flags & SA_SIGINFO) ||
+ ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
+ newact.sa_flags |= SA_SIGINFO;
+ newact.sigaction = sighandler;
+ }
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
+ act = &newact;
+ }
+ int res = REAL(sigaction)(sig, act, old);
+ if (res == 0 && old && old->sigaction == sighandler)
+ internal_memcpy(old, &old_stored, sizeof(*old));
+ return res;
+}
+
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h) {
+ __sanitizer_sigaction act;
+ act.handler = h;
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ __sanitizer_sigaction old;
+ int res = sigaction_symname(sig, &act, &old);
+ if (res) return (__sanitizer_sighandler_ptr)sig_err;
+ return old.handler;
+}
+
+#define TSAN_SYSCALL() \
+ ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
+ ScopedSyscall scoped_syscall(thr)
+
+struct ScopedSyscall {
+ ThreadState *thr;
+
+ explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
+
+ ~ScopedSyscall() {
+ ProcessPendingSignals(thr);
+ }
+};
+
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
+ TSAN_SYSCALL();
+ MemoryAccessRange(thr, pc, p, s, write);
+}
+
+static USED void syscall_acquire(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Acquire(thr, pc, addr);
+ DPrintf("syscall_acquire(0x%zx))\n", addr);
+}
+
+static USED void syscall_release(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_release(0x%zx)\n", addr);
+ Release(thr, pc, addr);
+}
+
+static void syscall_fd_close(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdClose(thr, pc, fd);
+}
+
+static USED void syscall_fd_acquire(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdAcquire(thr, pc, fd);
+ DPrintf("syscall_fd_acquire(%d)\n", fd);
+}
+
+static USED void syscall_fd_release(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_fd_release(%d)\n", fd);
+ FdRelease(thr, pc, fd);
+}
+
+static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
+
+static void syscall_post_fork(uptr pc, int pid) {
+ ThreadState *thr = cur_thread();
+ if (pid == 0) {
+ // child
+ ForkChildAfter(thr, pc, true);
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
+ }
+}
+#endif
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
+
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
+
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_RELEASE(addr) \
+ syscall_release(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_PRE_FORK() \
+ syscall_pre_fork(GET_CALLER_PC())
+
+#define COMMON_SYSCALL_POST_FORK(res) \
+ syscall_post_fork(GET_CALLER_PC(), res)
+
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+#ifdef NEED_TLS_GET_ADDR
+
+static void handle_tls_addr(void *arg, void *res) {
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
+ thr->tls_addr + thr->tls_size);
+ if (!dtv)
+ return;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+}
+
+#if !SANITIZER_S390
+// Define own interceptor instead of sanitizer_common's for three reasons:
+// 1. It must not process pending signals.
+// Signal handlers may contain MOVDQA instruction (see below).
+// 2. It must be as simple as possible to not contain MOVDQA.
+// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
+// is empty for tsan (meant only for msan).
+// Note: __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// So the interceptor must work with mis-aligned stack, in particular, does not
+// execute MOVDQA with stack addresses.
+TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *res = REAL(__tls_get_addr)(arg);
+ handle_tls_addr(arg, res);
+ return res;
+}
+#else // SANITIZER_S390
+TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
+ uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
+ char *tp = static_cast<char *>(__builtin_thread_pointer());
+ handle_tls_addr(arg, res + tp);
+ return res;
+}
+#endif
+#endif
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _lwp_exit) {
+ SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
+ DestroyThreadState();
+ REAL(_lwp_exit)();
+}
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
+ SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
+ DestroyThreadState();
+ REAL(thr_exit(state));
+}
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
+ void *c)
+
+namespace __tsan {
+
+static void finalize(void *arg) {
+ ThreadState *thr = cur_thread();
+ int status = Finalize(thr);
+ // Make sure the output is not lost.
+ FlushStreams();
+ if (status)
+ Die();
+}
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+static void unreachable() {
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
+ Die();
+}
+#endif
+
+// Define default implementation since interception of libdispatch is optional.
+SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
+
+void InitializeInterceptors() {
+#if !SANITIZER_MAC
+ // We need to setup it early, because functions like dlsym() can call it.
+ REAL(memset) = internal_memset;
+ REAL(memcpy) = internal_memcpy;
+#endif
+
+ new(interceptor_ctx()) InterceptorContext();
+
+ InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
+ InitializeLibdispatchInterceptors();
+
+#if !SANITIZER_MAC
+ // We can not use TSAN_INTERCEPT to get setjmp addr,
+ // because it does &setjmp and setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+#endif
+
+ TSAN_INTERCEPT(longjmp_symname);
+ TSAN_INTERCEPT(siglongjmp_symname);
+#if SANITIZER_NETBSD
+ TSAN_INTERCEPT(_longjmp);
+#endif
+
+ TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(__libc_memalign);
+ TSAN_INTERCEPT(calloc);
+ TSAN_INTERCEPT(realloc);
+ TSAN_INTERCEPT(reallocarray);
+ TSAN_INTERCEPT(free);
+ TSAN_INTERCEPT(cfree);
+ TSAN_INTERCEPT(munmap);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
+ TSAN_INTERCEPT(valloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
+ TSAN_INTERCEPT(posix_memalign);
+
+ TSAN_INTERCEPT(strcpy);
+ TSAN_INTERCEPT(strncpy);
+ TSAN_INTERCEPT(strdup);
+
+ TSAN_INTERCEPT(pthread_create);
+ TSAN_INTERCEPT(pthread_join);
+ TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT(pthread_exit);
+ #if SANITIZER_LINUX
+ TSAN_INTERCEPT(pthread_tryjoin_np);
+ TSAN_INTERCEPT(pthread_timedjoin_np);
+ #endif
+
+ TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+
+ TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
+
+ TSAN_INTERCEPT(pthread_mutex_init);
+ TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_trylock);
+ TSAN_INTERCEPT(pthread_mutex_timedlock);
+
+ TSAN_INTERCEPT(pthread_spin_init);
+ TSAN_INTERCEPT(pthread_spin_destroy);
+ TSAN_INTERCEPT(pthread_spin_lock);
+ TSAN_INTERCEPT(pthread_spin_trylock);
+ TSAN_INTERCEPT(pthread_spin_unlock);
+
+ TSAN_INTERCEPT(pthread_rwlock_init);
+ TSAN_INTERCEPT(pthread_rwlock_destroy);
+ TSAN_INTERCEPT(pthread_rwlock_rdlock);
+ TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_wrlock);
+ TSAN_INTERCEPT(pthread_rwlock_trywrlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
+ TSAN_INTERCEPT(pthread_rwlock_unlock);
+
+ TSAN_INTERCEPT(pthread_barrier_init);
+ TSAN_INTERCEPT(pthread_barrier_destroy);
+ TSAN_INTERCEPT(pthread_barrier_wait);
+
+ TSAN_INTERCEPT(pthread_once);
+
+ TSAN_INTERCEPT(fstat);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
+ TSAN_INTERCEPT(open);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
+ TSAN_INTERCEPT(creat);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
+ TSAN_INTERCEPT(dup);
+ TSAN_INTERCEPT(dup2);
+ TSAN_INTERCEPT(dup3);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
+ TSAN_INTERCEPT(socket);
+ TSAN_INTERCEPT(socketpair);
+ TSAN_INTERCEPT(connect);
+ TSAN_INTERCEPT(bind);
+ TSAN_INTERCEPT(listen);
+ TSAN_MAYBE_INTERCEPT_EPOLL;
+ TSAN_INTERCEPT(close);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
+ TSAN_INTERCEPT(pipe);
+ TSAN_INTERCEPT(pipe2);
+
+ TSAN_INTERCEPT(unlink);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
+ TSAN_INTERCEPT(abort);
+ TSAN_INTERCEPT(rmdir);
+ TSAN_INTERCEPT(closedir);
+
+ TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
+ TSAN_INTERCEPT(raise);
+ TSAN_INTERCEPT(kill);
+ TSAN_INTERCEPT(pthread_kill);
+ TSAN_INTERCEPT(sleep);
+ TSAN_INTERCEPT(usleep);
+ TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(pause);
+ TSAN_INTERCEPT(gettimeofday);
+ TSAN_INTERCEPT(getaddrinfo);
+
+ TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(vfork);
+#if SANITIZER_LINUX
+ TSAN_INTERCEPT(clone);
+#endif
+#if !SANITIZER_ANDROID
+ TSAN_INTERCEPT(dl_iterate_phdr);
+#endif
+ TSAN_MAYBE_INTERCEPT_ON_EXIT;
+ TSAN_INTERCEPT(__cxa_atexit);
+ TSAN_INTERCEPT(_exit);
+
+#ifdef NEED_TLS_GET_ADDR
+#if !SANITIZER_S390
+ TSAN_INTERCEPT(__tls_get_addr);
+#else
+ TSAN_INTERCEPT(__tls_get_addr_internal);
+ TSAN_INTERCEPT(__tls_get_offset);
+#endif
+#endif
+
+ TSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ TSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ // Need to setup it, because interceptors check that the function is resolved.
+ // But atexit is emitted directly into the module, so can't be resolved.
+ REAL(atexit) = (int(*)(void(*)()))unreachable;
+#endif
+
+ if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
+ Printf("ThreadSanitizer: failed to setup atexit callback\n");
+ Die();
+ }
+ if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
+ Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
+ Die();
+ }
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
+ Printf("ThreadSanitizer: failed to create thread key\n");
+ Die();
+ }
+#endif
+
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
+
+ FdInit();
+}
+
+} // namespace __tsan
+
+// Invisible barrier for tests.
+// There were several unsuccessful iterations for this functionality:
+// 1. Initially it was implemented in user code using
+// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
+// MacOS. Futexes are linux-specific for this matter.
+// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
+// "as-if synchronized via sleep" messages in reports which failed some
+// output tests.
+// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
+// visible events, which lead to "failed to restore stack trace" failures.
+// Note that no_sanitize_thread attribute does not turn off atomic interception
+// so attaching it to the function defined in user code does not help.
+// That's why we now have what we have.
+constexpr u32 kBarrierThreadBits = 10;
+constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+ atomic_uint32_t *barrier, u32 num_threads) {
+ if (num_threads >= kBarrierThreads) {
+ Printf("barrier_init: count is too large (%d)\n", num_threads);
+ Die();
+ }
+ // kBarrierThreadBits lsb is thread count,
+ // the remaining are count of entered threads.
+ atomic_store(barrier, num_threads, memory_order_relaxed);
+}
+
+static u32 barrier_epoch(u32 value) {
+ return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+ atomic_uint32_t *barrier) {
+ u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
+ u32 old_epoch = barrier_epoch(old);
+ if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
+ FutexWake(barrier, (1 << 30));
+ return;
+ }
+ for (;;) {
+ u32 cur = atomic_load(barrier, memory_order_relaxed);
+ if (barrier_epoch(cur) != old_epoch)
+ return;
+ FutexWait(barrier, cur);
+ }
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interface.cpp
new file mode 100644
index 000000000000..048715185151
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface.cpp
@@ -0,0 +1,106 @@
+//===-- tsan_interface.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_init() { Initialize(cur_thread_init()); }
+
+void __tsan_flush_memory() {
+ FlushShadowMemory();
+}
+
+void __tsan_read16(void *addr) {
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+ MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
+}
+
+void __tsan_write16(void *addr) {
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+ MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
+}
+
+void __tsan_read16_pc(void *addr, void *pc) {
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
+}
+
+void __tsan_write16_pc(void *addr, void *pc) {
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
+}
+
+// __tsan_unaligned_read/write calls are emitted by compiler.
+
+void __tsan_unaligned_read16(const void *addr) {
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
+}
+
+void __tsan_unaligned_write16(void *addr) {
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_fiber() {
+ return cur_thread();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_create_fiber(unsigned flags) {
+ return FiberCreate(cur_thread(), CALLERPC, flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_destroy_fiber(void *fiber) {
+ FiberDestroy(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber));
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_switch_to_fiber(void *fiber, unsigned flags) {
+ FiberSwitch(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber), flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_set_fiber_name(void *fiber, const char *name) {
+ ThreadSetName(static_cast<ThreadState *>(fiber), name);
+}
+} // extern "C"
+
+void __tsan_acquire(void *addr) {
+ Acquire(cur_thread(), CALLERPC, (uptr)addr);
+}
+
+void __tsan_release(void *addr) {
+ Release(cur_thread(), CALLERPC, (uptr)addr);
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface.h b/compiler-rt/lib/tsan/rtl-old/tsan_interface.h
new file mode 100644
index 000000000000..711f064174c2
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface.h
@@ -0,0 +1,424 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// The functions declared in this header will be inserted by the instrumentation
+// module.
+// This header can be included by the instrumented program or by TSan tests.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_H
+#define TSAN_INTERFACE_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
+using __sanitizer::tid_t;
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !SANITIZER_GO
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_vptr_update(void **vptr_p, void *new_val);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range(void *addr, unsigned long size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range(void *addr, unsigned long size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range_pc(void *addr, unsigned long size, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range_pc(void *addr, unsigned long size, void *pc);
+
+// User may provide function that would be called right when TSan detects
+// an error. The argument 'report' is an opaque pointer that can be used to
+// gather additional information using other TSan report API functions.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_report(void *report);
+
+// If TSan is currently reporting a detected issue on the current thread,
+// returns an opaque pointer to the current report. Otherwise returns NULL.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report();
+
+// Returns a report's description (issue type), number of duplicate issues
+// found, counts of array data (stack traces, memory operations, locations,
+// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
+// one was involved in the issue).
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size);
+
+/// Retrieves the "tag" from a report (for external-race report types). External
+/// races can be associated with a tag which give them more meaning. For example
+/// tag value '1' means "Swift access race". Tag value '0' indicated a plain
+/// external race.
+///
+/// \param report opaque pointer to the current report (obtained as argument in
+/// __tsan_on_report, or from __tsan_get_current_report)
+/// \param [out] tag points to storage that will be filled with the tag value
+///
+/// \returns non-zero value on success, zero on failure
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag);
+
+// Returns information about stack traces included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size);
+
+// Returns information about memory operations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size);
+
+// Returns information about locations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type);
+
+// Returns information about mutexes included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size);
+
+// Returns information about threads included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size);
+
+// Returns information about unique thread IDs included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id);
+
+#endif // SANITIZER_GO
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+namespace __tsan {
+
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16;
+typedef unsigned int a32;
+typedef unsigned long long a64;
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
+ !defined(__mips64) && !defined(__s390x__)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
+
+struct ThreadState;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+
+} // extern "C"
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface.inc b/compiler-rt/lib/tsan/rtl-old/tsan_interface.inc
new file mode 100644
index 000000000000..0031800e851f
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface.inc
@@ -0,0 +1,182 @@
+//===-- tsan_interface.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_ptrauth.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_read1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite | kAccessExternalPC);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read2(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read4(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read8(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+extern "C" {
+// __sanitizer_unaligned_load/store are for user instrumentation.
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ *addr = v;
+ __tsan_unaligned_write2(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ *addr = v;
+ __tsan_unaligned_write4(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ *addr = v;
+ __tsan_unaligned_write8(addr);
+}
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ if (*vptr_p == new_val)
+ return;
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessWrite | kAccessVptr);
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessRead | kAccessVptr);
+}
+
+void __tsan_func_entry(void *pc) { FuncEntry(cur_thread(), STRIP_PAC_PC(pc)); }
+
+void __tsan_func_exit() { FuncExit(cur_thread()); }
+
+void __tsan_ignore_thread_begin() { ThreadIgnoreBegin(cur_thread(), CALLERPC); }
+
+void __tsan_ignore_thread_end() { ThreadIgnoreEnd(cur_thread()); }
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
+
+void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
+}
+
+void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.cpp
new file mode 100644
index 000000000000..6bd72e18d942
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.cpp
@@ -0,0 +1,438 @@
+//===-- tsan_interface_ann.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_interface_ann.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+namespace __tsan {
+
+class ScopedAnnotation {
+ public:
+ ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
+ }
+
+ ~ScopedAnnotation() {
+ FuncExit(thr_);
+ CheckedMutex::CheckNoLocks();
+ }
+ private:
+ ThreadState *const thr_;
+};
+
+#define SCOPED_ANNOTATION_RET(typ, ret) \
+ if (!flags()->enable_annotations) \
+ return ret; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc;
+
+#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
+
+static const int kMaxDescLen = 128;
+
+struct ExpectRace {
+ ExpectRace *next;
+ ExpectRace *prev;
+ atomic_uintptr_t hitcount;
+ atomic_uintptr_t addcount;
+ uptr addr;
+ uptr size;
+ char *file;
+ int line;
+ char desc[kMaxDescLen];
+};
+
+struct DynamicAnnContext {
+ Mutex mtx;
+ ExpectRace benign;
+
+ DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
+};
+
+static DynamicAnnContext *dyn_ann_ctx;
+static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
+
+static void AddExpectRace(ExpectRace *list,
+ char *f, int l, uptr addr, uptr size, char *desc) {
+ ExpectRace *race = list->next;
+ for (; race != list; race = race->next) {
+ if (race->addr == addr && race->size == size) {
+ atomic_store_relaxed(&race->addcount,
+ atomic_load_relaxed(&race->addcount) + 1);
+ return;
+ }
+ }
+ race = static_cast<ExpectRace *>(Alloc(sizeof(ExpectRace)));
+ race->addr = addr;
+ race->size = size;
+ race->file = f;
+ race->line = l;
+ race->desc[0] = 0;
+ atomic_store_relaxed(&race->hitcount, 0);
+ atomic_store_relaxed(&race->addcount, 1);
+ if (desc) {
+ int i = 0;
+ for (; i < kMaxDescLen - 1 && desc[i]; i++)
+ race->desc[i] = desc[i];
+ race->desc[i] = 0;
+ }
+ race->prev = list;
+ race->next = list->next;
+ race->next->prev = race;
+ list->next = race;
+}
+
+static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ uptr maxbegin = max(race->addr, addr);
+ uptr minend = min(race->addr + race->size, addr + size);
+ if (maxbegin < minend)
+ return race;
+ }
+ return 0;
+}
+
+static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
+ ExpectRace *race = FindRace(list, addr, size);
+ if (race == 0)
+ return false;
+ DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
+ race->desc, race->addr, (int)race->size, race->file, race->line);
+ atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
+ return true;
+}
+
+static void InitList(ExpectRace *list) {
+ list->next = list;
+ list->prev = list;
+}
+
+void InitializeDynamicAnnotations() {
+ dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
+ InitList(&dyn_ann_ctx->benign);
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ ReadLock lock(&dyn_ann_ctx->mtx);
+ return CheckContains(&dyn_ann_ctx->benign, addr, size);
+}
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+ Release(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+ Acquire(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
+ uptr lock) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreate);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockDestroy);
+ MutexDestroy(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockAcquired);
+ if (is_w)
+ MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+ else
+ MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockReleased);
+ if (is_w)
+ MutexUnlock(thr, pc, m);
+ else
+ MutexReadUnlock(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
+ uptr size) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
+ char *f, int l, int enable) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
+ char *f, int l, uptr mu) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQGet(
+ char *f, int l, uptr pcq) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQPut(
+ char *f, int l, uptr pcq) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
+ char *f, int l, uptr pcq) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
+ char *f, int l, uptr pcq) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateExpectRace(
+ char *f, int l, uptr mem, char *desc) {
+}
+
+static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->benign,
+ f, l, mem, size, desc);
+ DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, size, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateBenignRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRace);
+ BenignRaceImpl(f, l, mem, 1, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
+ ThreadIgnoreEnd(thr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
+ ThreadIgnoreEnd(thr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
+ ThreadIgnoreSyncBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
+ ThreadIgnoreSyncEnd(thr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+}
+
+void INTERFACE_ATTRIBUTE AnnotateThreadName(
+ char *f, int l, char *name) {
+ SCOPED_ANNOTATION(AnnotateThreadName);
+ ThreadSetName(thr, name);
+}
+
+// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
+// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
+// atomic operations, which should be handled by ThreadSanitizer correctly.
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr sz, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, sz, desc);
+}
+
+int INTERFACE_ATTRIBUTE RunningOnValgrind() {
+ return flags()->running_on_valgrind;
+}
+
+double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
+ return 10.0;
+}
+
+const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
+ if (internal_strcmp(query, "pure_happens_before") == 0)
+ return "1";
+ else
+ return "0";
+}
+
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_create(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_create);
+ MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_destroy(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_destroy);
+ MutexDestroy(thr, pc, (uptr)m, flagz);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
+ if (!(flagz & MutexFlagTryLock)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPreReadLock(thr, pc, (uptr)m);
+ else
+ MutexPreLock(thr, pc, (uptr)m);
+ }
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_lock);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
+ if (!(flagz & MutexFlagTryLockFailed)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPostReadLock(thr, pc, (uptr)m, flagz);
+ else
+ MutexPostLock(thr, pc, (uptr)m, flagz, rec);
+ }
+}
+
+INTERFACE_ATTRIBUTE
+int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
+ int ret = 0;
+ if (flagz & MutexFlagReadLock) {
+ CHECK(!(flagz & MutexFlagRecursiveUnlock));
+ MutexReadUnlock(thr, pc, (uptr)m);
+ } else {
+ ret = MutexUnlock(thr, pc, (uptr)m, flagz);
+ }
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ return ret;
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_signal);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
+ // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_divert);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+}
+} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.h b/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.h
new file mode 100644
index 000000000000..458d61f53356
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface_ann.h
@@ -0,0 +1,32 @@
+//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for dynamic annotations.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ANN_H
+#define TSAN_INTERFACE_ANN_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_acquire(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_release(void *addr);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ANN_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interface_atomic.cpp
new file mode 100644
index 000000000000..24ba3bb1f65d
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface_atomic.cpp
@@ -0,0 +1,920 @@
+//===-- tsan_interface_atomic.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// ThreadSanitizer atomic operations are based on C++11/C1x standards.
+// For background see C++11 standard. A slightly older, publicly
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_flags.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+#if !SANITIZER_GO && __TSAN_HAS_INT128
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
+
+#if SANITIZER_DEBUG
+static bool IsLoadOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_consume
+ || mo == mo_acquire || mo == mo_seq_cst;
+}
+
+static bool IsStoreOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+}
+#endif
+
+static bool IsReleaseOrder(morder mo) {
+ return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcquireOrder(morder mo) {
+ return mo == mo_consume || mo == mo_acquire
+ || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcqRelOrder(morder mo) {
+ return mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+template<typename T> T func_xchg(volatile T *v, T op) {
+ T res = __sync_lock_test_and_set(v, op);
+ // __sync_lock_test_and_set does not contain full barrier.
+ __sync_synchronize();
+ return res;
+}
+
+template<typename T> T func_add(volatile T *v, T op) {
+ return __sync_fetch_and_add(v, op);
+}
+
+template<typename T> T func_sub(volatile T *v, T op) {
+ return __sync_fetch_and_sub(v, op);
+}
+
+template<typename T> T func_and(volatile T *v, T op) {
+ return __sync_fetch_and_and(v, op);
+}
+
+template<typename T> T func_or(volatile T *v, T op) {
+ return __sync_fetch_and_or(v, op);
+}
+
+template<typename T> T func_xor(volatile T *v, T op) {
+ return __sync_fetch_and_xor(v, op);
+}
+
+template<typename T> T func_nand(volatile T *v, T op) {
+ // clang does not support __sync_fetch_and_nand.
+ T cmp = *v;
+ for (;;) {
+ T newv = ~(cmp & op);
+ T cur = __sync_val_compare_and_swap(v, cmp, newv);
+ if (cmp == cur)
+ return cmp;
+ cmp = cur;
+ }
+}
+
+template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+ return __sync_val_compare_and_swap(v, cmp, xch);
+}
+
+// clang does not support 128-bit atomic ops.
+// Atomic ops are executed under tsan internal mutex,
+// here we assume that the atomic variables are not accessed
+// from non-instrumented code.
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
+ && __TSAN_HAS_INT128
+a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = op;
+ return cmp;
+}
+
+a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp + op;
+ return cmp;
+}
+
+a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp - op;
+ return cmp;
+}
+
+a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp & op;
+ return cmp;
+}
+
+a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp | op;
+ return cmp;
+}
+
+a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp ^ op;
+ return cmp;
+}
+
+a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = ~(cmp & op);
+ return cmp;
+}
+
+a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
+ a128 cur = *v;
+ if (cur == cmp)
+ *v = xch;
+ return cur;
+}
+#endif
+
+template <typename T>
+static int AccessSize() {
+ if (sizeof(T) <= 1)
+ return 1;
+ else if (sizeof(T) <= 2)
+ return 2;
+ else if (sizeof(T) <= 4)
+ return 4;
+ else
+ return 8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
+
+#if !SANITIZER_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ DCHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
+template <typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+ DCHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo)) {
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessRead | kAccessAtomic);
+ return NoTsanAtomicLoad(a, mo);
+ }
+ // Don't create sync object if it does not exist yet. For example, an atomic
+ // pointer is initialized to nullptr and then periodically acquire-loaded.
+ T v = NoTsanAtomicLoad(a, mo);
+ SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
+ if (s) {
+ ReadLock l(&s->mtx);
+ AcquireImpl(thr, pc, &s->clock);
+ // Re-read under sync mutex because we need a consistent snapshot
+ // of the value and the clock we acquire.
+ v = NoTsanAtomicLoad(a, mo);
+ }
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
+ return v;
+}
+
+template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template <typename T>
+static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ DCHECK(IsStoreOrder(mo));
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
+ return;
+ }
+ __sync_synchronize();
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ NoTsanAtomicStore(a, v, mo);
+}
+
+template <typename T, T (*F)(volatile T *v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed))
+ return F(a, v);
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ return F(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
+static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template <typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
+ morder mo, morder fmo) {
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
+ // (mo_relaxed) when those are used.
+ DCHECK(IsLoadOrder(fmo));
+
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
+ }
+
+ bool release = IsReleaseOrder(mo);
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock l(&s->mtx, release);
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ bool success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
+ if (success && IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (success && IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ return success;
+}
+
+template<typename T>
+static T AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T c, T v, morder mo, morder fmo) {
+ AtomicCAS(thr, pc, a, &c, v, mo, fmo);
+ return c;
+}
+
+#if !SANITIZER_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
+static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
+ __sync_synchronize();
+}
+#endif
+
+// Interface functions follow.
+#if !SANITIZER_GO
+
+// C/C++
+
+static morder convert_morder(morder mo) {
+ if (flags()->force_seq_cst_atomics)
+ return (morder)mo_seq_cst;
+
+ // Filter out additional memory order flags:
+ // MEMMODEL_SYNC = 1 << 15
+ // __ATOMIC_HLE_ACQUIRE = 1 << 16
+ // __ATOMIC_HLE_RELEASE = 1 << 17
+ //
+ // HLE is an optimization, and we pretend that elision always fails.
+ // MEMMODEL_SYNC is used when lowering __sync_ atomics,
+ // since we use __sync_ atomics for actual atomic operations,
+ // we can safely ignore it as well. It also subtly affects semantics,
+ // but we don't model the difference.
+ return (morder)(mo & 0x7fff);
+}
+
+# define ATOMIC_IMPL(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ ProcessPendingSignals(thr); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ mo = convert_morder(mo); \
+ return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+ ATOMIC_IMPL(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+ ATOMIC_IMPL(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+ ATOMIC_IMPL(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+ ATOMIC_IMPL(Load, a, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+ ATOMIC_IMPL(Load, a, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(Store, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(Store, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(Exchange, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(Exchange, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchSub, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchSub, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchOr, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchOr, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchXor, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchXor, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+ ATOMIC_IMPL(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+ ATOMIC_IMPL(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+ ATOMIC_IMPL(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+ ATOMIC_IMPL(FetchNand, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+ ATOMIC_IMPL(FetchNand, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo) {
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo) {
+}
+} // extern "C"
+
+#else // #if !SANITIZER_GO
+
+// Go
+
+# define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ }
+
+# define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ }
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #if !SANITIZER_GO
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.cpp
new file mode 100644
index 000000000000..c090c1f08cbe
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.cpp
@@ -0,0 +1,258 @@
+//===-- tsan_interface_java.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface_java.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+using namespace __tsan;
+
+const jptr kHeapAlignment = 8;
+
+namespace __tsan {
+
+struct JavaContext {
+ const uptr heap_begin;
+ const uptr heap_size;
+
+ JavaContext(jptr heap_begin, jptr heap_size)
+ : heap_begin(heap_begin)
+ , heap_size(heap_size) {
+ }
+};
+
+static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
+static JavaContext *jctx;
+
+MBlock *JavaHeapBlock(uptr addr, uptr *start) {
+ if (!jctx || addr < jctx->heap_begin ||
+ addr >= jctx->heap_begin + jctx->heap_size)
+ return nullptr;
+ for (uptr p = RoundDown(addr, kMetaShadowCell); p >= jctx->heap_begin;
+ p -= kMetaShadowCell) {
+ MBlock *b = ctx->metamap.GetBlock(p);
+ if (!b)
+ continue;
+ if (p + b->siz <= addr)
+ return nullptr;
+ *start = p;
+ return b;
+ }
+ return nullptr;
+}
+
+} // namespace __tsan
+
+#define JAVA_FUNC_ENTER(func) \
+ ThreadState *thr = cur_thread(); \
+ (void)thr;
+
+void __tsan_java_init(jptr heap_begin, jptr heap_size) {
+ JAVA_FUNC_ENTER(__tsan_java_init);
+ Initialize(thr);
+ DPrintf("#%d: java_init(0x%zx, 0x%zx)\n", thr->tid, heap_begin, heap_size);
+ DCHECK_EQ(jctx, 0);
+ DCHECK_GT(heap_begin, 0);
+ DCHECK_GT(heap_size, 0);
+ DCHECK_EQ(heap_begin % kHeapAlignment, 0);
+ DCHECK_EQ(heap_size % kHeapAlignment, 0);
+ DCHECK_LT(heap_begin, heap_begin + heap_size);
+ jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
+}
+
+int __tsan_java_fini() {
+ JAVA_FUNC_ENTER(__tsan_java_fini);
+ DPrintf("#%d: java_fini()\n", thr->tid);
+ DCHECK_NE(jctx, 0);
+ // FIXME(dvyukov): this does not call atexit() callbacks.
+ int status = Finalize(thr);
+ DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
+ return status;
+}
+
+void __tsan_java_alloc(jptr ptr, jptr size) {
+ JAVA_FUNC_ENTER(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ OnUserAlloc(thr, 0, ptr, size, false);
+}
+
+void __tsan_java_free(jptr ptr, jptr size) {
+ JAVA_FUNC_ENTER(__tsan_java_free);
+ DPrintf("#%d: java_free(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ ctx->metamap.FreeRange(thr->proc(), ptr, size);
+}
+
+void __tsan_java_move(jptr src, jptr dst, jptr size) {
+ JAVA_FUNC_ENTER(__tsan_java_move);
+ DPrintf("#%d: java_move(0x%zx, 0x%zx, 0x%zx)\n", thr->tid, src, dst, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(src % kHeapAlignment, 0);
+ DCHECK_EQ(dst % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(src, jctx->heap_begin);
+ DCHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GE(dst, jctx->heap_begin);
+ DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_NE(dst, src);
+ DCHECK_NE(size, 0);
+
+ // Assuming it's not running concurrently with threads that do
+ // memory accesses and mutex operations (stop-the-world phase).
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Clear the destination shadow range.
+ // We used to move shadow from src to dst, but the trace format does not
+ // support that anymore as it contains addresses of accesses.
+ RawShadow *d = MemToShadow(dst);
+ RawShadow *dend = MemToShadow(dst + size);
+ internal_memset(d, 0, (dend - d) * sizeof(*d));
+}
+
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ JAVA_FUNC_ENTER(__tsan_java_find);
+ DPrintf("#%d: java_find(&0x%zx, 0x%zx)\n", thr->tid, *from_ptr, to);
+ DCHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ DCHECK_EQ(to % kHeapAlignment, 0);
+ DCHECK_GE(*from_ptr, jctx->heap_begin);
+ DCHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
+void __tsan_java_finalize() {
+ JAVA_FUNC_ENTER(__tsan_java_finalize);
+ DPrintf("#%d: java_finalize()\n", thr->tid);
+ AcquireGlobal(thr);
+}
+
+void __tsan_java_mutex_lock(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_unlock(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexUnlock(thr, 0, addr);
+}
+
+void __tsan_java_mutex_read_lock(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostReadLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_read_unlock(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexReadUnlock(thr, 0, addr);
+}
+
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(0x%zx, %d)\n", thr->tid, addr, rec);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GT(rec, 0);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock,
+ rec);
+}
+
+int __tsan_java_mutex_unlock_rec(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ return MutexUnlock(thr, 0, addr, MutexFlagRecursiveUnlock);
+}
+
+void __tsan_java_acquire(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Acquire(thr, 0, addr);
+}
+
+void __tsan_java_release(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Release(thr, 0, addr);
+}
+
+void __tsan_java_release_store(jptr addr) {
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release_store(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ ReleaseStore(thr, 0, addr);
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.h b/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.h
new file mode 100644
index 000000000000..51b445251e09
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_interface_java.h
@@ -0,0 +1,99 @@
+//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for verification of Java or mixed Java/C++ programs.
+// The interface is intended to be used from within a JVM and notify TSan
+// about such events like Java locks and GC memory compaction.
+//
+// For plain memory accesses and function entry/exit a JVM is intended to use
+// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
+//
+// For volatile memory accesses and atomic operations JVM is intended to use
+// standard atomics API: __tsan_atomicN_load/store/etc.
+//
+// For usage examples see lit_tests/java_*.cpp
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_JAVA_H
+#define TSAN_INTERFACE_JAVA_H
+
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long jptr;
+
+// Must be called before any other callback from Java.
+void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
+// Must be called when the application exits.
+// Not necessary the last callback (concurrently running threads are OK).
+// Returns exit status or 0 if tsan does not want to override it.
+int __tsan_java_fini() INTERFACE_ATTRIBUTE;
+
+// Callback for memory allocations.
+// May be omitted for allocations that are not subject to data races
+// nor contain synchronization objects (e.g. String).
+void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory free.
+// Can be aggregated for several objects (preferably).
+void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory move by GC.
+// Can be aggregated for several objects (preferably).
+// The ranges can overlap.
+void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
+
+// Mutex lock.
+// Addr is any unique address associated with the mutex.
+// Can be called on recursive reentry.
+void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex unlock.
+void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read lock.
+void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read unlock.
+void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Recursive mutex lock, intended for handling of Object.wait().
+// The 'rec' value must be obtained from the previous
+// __tsan_java_mutex_unlock_rec().
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
+// Recursive mutex unlock, intended for handling of Object.wait().
+// The return value says how many times this thread called lock()
+// w/o a pairing unlock() (i.e. how many recursive levels it unlocked).
+// It must be passed back to __tsan_java_mutex_lock_rec() to restore
+// the same recursion level.
+int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+
+// Raw acquire/release primitives.
+// Can be used to establish happens-before edges on volatile/final fields,
+// in atomic operations, etc. release_store is the same as release, but it
+// breaks release sequence on addr (see C++ standard 1.10/7 for details).
+void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#undef INTERFACE_ATTRIBUTE
+
+#endif // #ifndef TSAN_INTERFACE_JAVA_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_malloc_mac.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_malloc_mac.cpp
new file mode 100644
index 000000000000..0e861bf1f962
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_malloc_mac.cpp
@@ -0,0 +1,71 @@
+//===-- tsan_malloc_mac.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_errno.h"
+#include "tsan_interceptors.h"
+#include "tsan_stack_trace.h"
+
+using namespace __tsan;
+#define COMMON_MALLOC_ZONE_NAME "tsan"
+#define COMMON_MALLOC_ENTER()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED (cur_thread()->is_inited)
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ void *p = \
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
+#define COMMON_MALLOC_MALLOC(size) \
+ if (in_symbolizer()) return InternalAlloc(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ void *p = user_alloc(thr, pc, size)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (in_symbolizer()) return InternalRealloc(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ void *p = user_realloc(thr, pc, ptr, size)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (in_symbolizer()) return InternalCalloc(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ void *p = user_calloc(thr, pc, size, count)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ if (in_symbolizer()) { \
+ void *p = InternalAlloc(size, nullptr, alignment); \
+ if (!p) return errno_ENOMEM; \
+ *memptr = p; \
+ return 0; \
+ } \
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, alignment, size); \
+ int res = user_posix_memalign(thr, pc, memptr, alignment, size);
+#define COMMON_MALLOC_VALLOC(size) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, GetPageSizeCached()); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
+ void *p = user_valloc(thr, pc, size)
+#define COMMON_MALLOC_FREE(ptr) \
+ if (in_symbolizer()) return InternalFree(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
+ user_free(thr, pc, ptr)
+#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __tsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_md5.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_md5.cpp
new file mode 100644
index 000000000000..72857b773fed
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_md5.cpp
@@ -0,0 +1,250 @@
+//===-- tsan_md5.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+
+#define STEP(f, a, b, c, d, x, t, s) \
+ (a) += f((b), (c), (d)) + (x) + (t); \
+ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+ (a) += (b);
+
+#define SET(n) \
+ (*(const MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+ SET(n)
+
+typedef unsigned int MD5_u32plus;
+typedef unsigned long ulong_t;
+
+typedef struct {
+ MD5_u32plus lo, hi;
+ MD5_u32plus a, b, c, d;
+ unsigned char buffer[64];
+ MD5_u32plus block[16];
+} MD5_CTX;
+
+static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) {
+ const unsigned char *ptr = (const unsigned char *)data;
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+ d = ctx->d;
+
+ do {
+ saved_a = a;
+ saved_b = b;
+ saved_c = c;
+ saved_d = d;
+
+ STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+ STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+ STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+ STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+ STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+ STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+ STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+ STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+ STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+ STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+ STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+ STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+ STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+ STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+ STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+ STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+
+ STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+ STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+ STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+ STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+ STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+ STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+ STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+ STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+ STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+ STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+ STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+ STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+ STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+ STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+ STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+ STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+
+ STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+ STEP(H, d, a, b, c, GET(8), 0x8771f681, 11)
+ STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+ STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23)
+ STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+ STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+ STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+ STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23)
+ STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+ STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11)
+ STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+ STEP(H, b, c, d, a, GET(6), 0x04881d05, 23)
+ STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+ STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11)
+ STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+ STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23)
+
+ STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+ STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+ STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+ STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+ STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+ STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+ STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+ STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+ STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+ STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+ STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+ STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+ STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+ STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+ STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+ STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+
+ a += saved_a;
+ b += saved_b;
+ c += saved_c;
+ d += saved_d;
+
+ ptr += 64;
+ } while (size -= 64);
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+ ctx->d = d;
+
+ return ptr;
+}
+
+#undef F
+#undef G
+#undef H
+#undef I
+#undef STEP
+#undef SET
+#undef GET
+
+void MD5_Init(MD5_CTX *ctx) {
+ ctx->a = 0x67452301;
+ ctx->b = 0xefcdab89;
+ ctx->c = 0x98badcfe;
+ ctx->d = 0x10325476;
+
+ ctx->lo = 0;
+ ctx->hi = 0;
+}
+
+void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) {
+ MD5_u32plus saved_lo;
+ ulong_t used, free;
+
+ saved_lo = ctx->lo;
+ if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+ ctx->hi++;
+ ctx->hi += size >> 29;
+
+ used = saved_lo & 0x3f;
+
+ if (used) {
+ free = 64 - used;
+
+ if (size < free) {
+ internal_memcpy(&ctx->buffer[used], data, size);
+ return;
+ }
+
+ internal_memcpy(&ctx->buffer[used], data, free);
+ data = (const unsigned char *)data + free;
+ size -= free;
+ body(ctx, ctx->buffer, 64);
+ }
+
+ if (size >= 64) {
+ data = body(ctx, data, size & ~(ulong_t)0x3f);
+ size &= 0x3f;
+ }
+
+ internal_memcpy(ctx->buffer, data, size);
+}
+
+void MD5_Final(unsigned char *result, MD5_CTX *ctx) {
+ ulong_t used, free;
+
+ used = ctx->lo & 0x3f;
+
+ ctx->buffer[used++] = 0x80;
+
+ free = 64 - used;
+
+ if (free < 8) {
+ internal_memset(&ctx->buffer[used], 0, free);
+ body(ctx, ctx->buffer, 64);
+ used = 0;
+ free = 64;
+ }
+
+ internal_memset(&ctx->buffer[used], 0, free - 8);
+
+ ctx->lo <<= 3;
+ ctx->buffer[56] = ctx->lo;
+ ctx->buffer[57] = ctx->lo >> 8;
+ ctx->buffer[58] = ctx->lo >> 16;
+ ctx->buffer[59] = ctx->lo >> 24;
+ ctx->buffer[60] = ctx->hi;
+ ctx->buffer[61] = ctx->hi >> 8;
+ ctx->buffer[62] = ctx->hi >> 16;
+ ctx->buffer[63] = ctx->hi >> 24;
+
+ body(ctx, ctx->buffer, 64);
+
+ result[0] = ctx->a;
+ result[1] = ctx->a >> 8;
+ result[2] = ctx->a >> 16;
+ result[3] = ctx->a >> 24;
+ result[4] = ctx->b;
+ result[5] = ctx->b >> 8;
+ result[6] = ctx->b >> 16;
+ result[7] = ctx->b >> 24;
+ result[8] = ctx->c;
+ result[9] = ctx->c >> 8;
+ result[10] = ctx->c >> 16;
+ result[11] = ctx->c >> 24;
+ result[12] = ctx->d;
+ result[13] = ctx->d >> 8;
+ result[14] = ctx->d >> 16;
+ result[15] = ctx->d >> 24;
+
+ internal_memset(ctx, 0, sizeof(*ctx));
+}
+
+MD5Hash md5_hash(const void *data, uptr size) {
+ MD5Hash res;
+ MD5_CTX ctx;
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, data, size);
+ MD5_Final((unsigned char*)&res.hash[0], &ctx);
+ return res;
+}
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
new file mode 100644
index 000000000000..75044c38d5d2
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
@@ -0,0 +1,436 @@
+//===-- tsan_mman.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_report.h"
+#include "tsan_flags.h"
+
+// May be overriden by front-end.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+
+namespace __tsan {
+
+struct MapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ DontNeedShadowFor(p, size);
+ // Mark the corresponding meta shadow memory as not needed.
+ // Note the block does not contain any meta info at this point
+ // (this happens after free).
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ // Block came from LargeMmapAllocator, so must be large.
+ // We rely on this in the calculations below.
+ CHECK_GE(size, 2 * kPageSize);
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ p += diff;
+ size -= diff;
+ }
+ diff = p + size - RoundDown(p + size, kPageSize);
+ if (diff != 0)
+ size -= diff;
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
+ }
+};
+
+static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
+Allocator *allocator() {
+ return reinterpret_cast<Allocator*>(&allocator_placeholder);
+}
+
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+ // This mutex represents the internal allocator combined for
+ // the purposes of deadlock detection. The internal allocator
+ // uses multiple mutexes, moreover they are locked only occasionally
+ // and they are spin mutexes which don't support deadlock detection.
+ // So we use this fake mutex to serve as a substitute for these mutexes.
+ CheckedMutex internal_alloc_mtx;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc),
+ proc(ProcCreate()),
+ internal_alloc_mtx(MutexTypeInternalAlloc) {}
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+static void InternalAllocAccess() {
+ global_proc()->internal_alloc_mtx.Lock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
+void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Lock();
+ global_proc()->internal_alloc_mtx.Lock();
+ InternalAllocatorLock();
+}
+
+void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ InternalAllocatorUnlock();
+ global_proc()->internal_alloc_mtx.Unlock();
+ global_proc()->mtx.Unlock();
+}
+
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+static uptr max_user_defined_malloc_size;
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
+ max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
+ ? common_flags()->max_allocation_size_mb
+ << 20
+ : kMaxAllowedMallocSize;
+}
+
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorPrintStats() {
+ allocator()->PrintStats();
+}
+
+static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
+ if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
+ !ShouldReport(thr, ReportTypeSignalUnsafe))
+ return;
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
+ return;
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeSignalUnsafe);
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+}
+
+
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
+ sz > max_user_defined_malloc_size) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ uptr malloc_limit =
+ Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
+ }
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportRssLimitExceeded(&stack);
+ }
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportOutOfMemory(sz, &stack);
+ }
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportCallocOverflow(n, size, &stack);
+ }
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportReallocArrayOverflow(size, n, &stack);
+ }
+ return user_realloc(thr, pc, p, size * n);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0 && thr->is_inited)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
+ DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0 && thr->is_inited)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
+}
+
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
+ // FIXME: Handle "shrinking" more efficiently,
+ // it seems that some software actually does this.
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
+ user_free(thr, pc, p);
+ return nullptr;
+ }
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
+ user_free(thr, pc, p);
+ }
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAllocationAlignment(align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidPosixMemalignAlignment(align, &stack);
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by user_alloc_internal.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAlignedAllocAlignment(sz, align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportPvallocOverflow(sz, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
+}
+
+uptr user_alloc_usable_size(const void *p) {
+ if (p == 0)
+ return 0;
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
+void invoke_malloc_hook(void *ptr, uptr size) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
+}
+
+void invoke_free_hook(void *ptr) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
+}
+
+void *Alloc(uptr sz) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalAllocAccess();
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
+}
+
+void FreeImpl(void *p) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalAllocAccess();
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ return allocator()->GetBlockBegin(p) != 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
+}
+
+void __tsan_on_thread_idle() {
+ ThreadState *thr = cur_thread();
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
+}
+} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_mman.h b/compiler-rt/lib/tsan/rtl-old/tsan_mman.h
new file mode 100644
index 000000000000..db8488eabbe2
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_mman.h
@@ -0,0 +1,78 @@
+//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MMAN_H
+#define TSAN_MMAN_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+const uptr kDefaultAlignment = 16;
+
+void InitializeAllocator();
+void InitializeAllocatorLate();
+void ReplaceSystemMalloc();
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
+void AllocatorPrintStats();
+void AllocatorLock();
+void AllocatorUnlock();
+
+// For user allocations.
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
+// Does not accept NULL.
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr sz, uptr n);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
+uptr user_alloc_usable_size(const void *p);
+
+// Invoking malloc/free hooks that may be installed by the user.
+void invoke_malloc_hook(void *ptr, uptr size);
+void invoke_free_hook(void *ptr);
+
+// For internal data structures.
+void *Alloc(uptr sz);
+void FreeImpl(void *p);
+
+template <typename T, typename... Args>
+T *New(Args &&...args) {
+ return new (Alloc(sizeof(T))) T(static_cast<Args &&>(args)...);
+}
+
+template <typename T>
+void Free(T *&p) {
+ if (p == nullptr)
+ return;
+ FreeImpl(p);
+ p = nullptr;
+}
+
+template <typename T>
+void DestroyAndFree(T *&p) {
+ if (p == nullptr)
+ return;
+ p->~T();
+ Free(p);
+}
+
+} // namespace __tsan
+#endif // TSAN_MMAN_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.cpp
new file mode 100644
index 000000000000..735179686ba9
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.cpp
@@ -0,0 +1,132 @@
+//===-- tsan_mutexset.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_mutexset.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+MutexSet::MutexSet() {
+}
+
+void MutexSet::Add(u64 id, bool write, u64 epoch) {
+ // Look up existing mutex with the same id.
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ descs_[i].count++;
+ descs_[i].epoch = epoch;
+ return;
+ }
+ }
+ // On overflow, find the oldest mutex and drop it.
+ if (size_ == kMaxSize) {
+ u64 minepoch = (u64)-1;
+ u64 mini = (u64)-1;
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].epoch < minepoch) {
+ minepoch = descs_[i].epoch;
+ mini = i;
+ }
+ }
+ RemovePos(mini);
+ CHECK_EQ(size_, kMaxSize - 1);
+ }
+ // Add new mutex descriptor.
+ descs_[size_].addr = 0;
+ descs_[size_].stack_id = kInvalidStackID;
+ descs_[size_].id = id;
+ descs_[size_].write = write;
+ descs_[size_].epoch = epoch;
+ descs_[size_].seq = seq_++;
+ descs_[size_].count = 1;
+ size_++;
+}
+
+void MutexSet::Del(u64 id, bool write) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ if (--descs_[i].count == 0)
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::Remove(u64 id) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
+ // Look up existing mutex with the same id.
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].addr == addr) {
+ descs_[i].count++;
+ descs_[i].seq = seq_++;
+ return;
+ }
+ }
+ // On overflow, find the oldest mutex and drop it.
+ if (size_ == kMaxSize) {
+ uptr min = 0;
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].seq < descs_[min].seq)
+ min = i;
+ }
+ RemovePos(min);
+ CHECK_EQ(size_, kMaxSize - 1);
+ }
+ // Add new mutex descriptor.
+ descs_[size_].addr = addr;
+ descs_[size_].stack_id = stack_id;
+ descs_[size_].id = 0;
+ descs_[size_].write = write;
+ descs_[size_].epoch = 0;
+ descs_[size_].seq = seq_++;
+ descs_[size_].count = 1;
+ size_++;
+}
+
+void MutexSet::DelAddr(uptr addr, bool destroy) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].addr == addr) {
+ if (destroy || --descs_[i].count == 0)
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::RemovePos(uptr i) {
+ CHECK_LT(i, size_);
+ descs_[i] = descs_[size_ - 1];
+ size_--;
+}
+
+uptr MutexSet::Size() const {
+ return size_;
+}
+
+MutexSet::Desc MutexSet::Get(uptr i) const {
+ CHECK_LT(i, size_);
+ return descs_[i];
+}
+
+DynamicMutexSet::DynamicMutexSet() : ptr_(New<MutexSet>()) {}
+DynamicMutexSet::~DynamicMutexSet() { DestroyAndFree(ptr_); }
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.h b/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.h
new file mode 100644
index 000000000000..93776a664135
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_mutexset.h
@@ -0,0 +1,98 @@
+//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// MutexSet holds the set of mutexes currently held by a thread.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEXSET_H
+#define TSAN_MUTEXSET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class MutexSet {
+ public:
+ // Holds limited number of mutexes.
+ // The oldest mutexes are discarded on overflow.
+ static constexpr uptr kMaxSize = 16;
+ struct Desc {
+ uptr addr;
+ StackID stack_id;
+ u64 id;
+ u64 epoch;
+ u32 seq;
+ u32 count;
+ bool write;
+
+ Desc() { internal_memset(this, 0, sizeof(*this)); }
+ Desc(const Desc& other) { *this = other; }
+ Desc& operator=(const MutexSet::Desc& other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ return *this;
+ }
+ };
+
+ MutexSet();
+ // The 'id' is obtained from SyncVar::GetId().
+ void Add(u64 id, bool write, u64 epoch);
+ void Del(u64 id, bool write);
+ void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ void AddAddr(uptr addr, StackID stack_id, bool write);
+ void DelAddr(uptr addr, bool destroy = false);
+ uptr Size() const;
+ Desc Get(uptr i) const;
+
+ private:
+#if !SANITIZER_GO
+ u32 seq_ = 0;
+ uptr size_ = 0;
+ Desc descs_[kMaxSize];
+
+ void RemovePos(uptr i);
+#endif
+};
+
+// MutexSet is too large to live on stack.
+// DynamicMutexSet can be use used to create local MutexSet's.
+class DynamicMutexSet {
+ public:
+ DynamicMutexSet();
+ ~DynamicMutexSet();
+ MutexSet* operator->() { return ptr_; }
+ operator MutexSet*() { return ptr_; }
+ DynamicMutexSet(const DynamicMutexSet&) = delete;
+ DynamicMutexSet& operator=(const DynamicMutexSet&) = delete;
+
+ private:
+ MutexSet* ptr_;
+#if SANITIZER_GO
+ MutexSet set_;
+#endif
+};
+
+// Go does not have mutexes, so do not spend memory and time.
+// (Go sync.Mutex is actually a semaphore -- can be unlocked
+// in different goroutine).
+#if SANITIZER_GO
+MutexSet::MutexSet() {}
+void MutexSet::Add(u64 id, bool write, u64 epoch) {}
+void MutexSet::Del(u64 id, bool write) {}
+void MutexSet::Remove(u64 id) {}
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {}
+void MutexSet::DelAddr(uptr addr, bool destroy) {}
+uptr MutexSet::Size() const { return 0; }
+MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+DynamicMutexSet::DynamicMutexSet() : ptr_(&set_) {}
+DynamicMutexSet::~DynamicMutexSet() {}
+#endif
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEXSET_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_new_delete.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_new_delete.cpp
new file mode 100644
index 000000000000..fc44a5221b5b
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_new_delete.cpp
@@ -0,0 +1,199 @@
+//===-- tsan_new_delete.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: __sanitizer::uptr {};
+} // namespace std
+
+DECLARE_REAL(void *, malloc, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_alloc(thr, pc, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+#define OPERATOR_NEW_BODY_ALIGN(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, (uptr)align); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_memalign(thr, pc, (uptr)align, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size);
+void *operator new(__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znwm, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size);
+void *operator new[](__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znam, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align);
+void *operator new(__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY(mangled_name) \
+ if (ptr == 0) return; \
+ if (in_symbolizer()) \
+ return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
+ user_free(thr, pc, ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT;
+void operator delete(void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT;
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&);
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&);
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&);
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&);
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvmSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvmSt11align_val_t);
+}
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_platform.h b/compiler-rt/lib/tsan/rtl-old/tsan_platform.h
new file mode 100644
index 000000000000..7ff0acace8f6
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_platform.h
@@ -0,0 +1,988 @@
+//===-- tsan_platform.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Platform-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+enum {
+ // App memory is not mapped onto shadow memory range.
+ kBrokenMapping = 1 << 0,
+ // Mapping app memory and back does not produce the same address,
+ // this can lead to wrong addresses in reports and potentially
+ // other bad consequences.
+ kBrokenReverseMapping = 1 << 1,
+ // Mapping is non-linear for linear user range.
+ // This is bad and can lead to unpredictable memory corruptions, etc
+ // because range access functions assume linearity.
+ kBrokenLinearity = 1 << 2,
+};
+
+/*
+C/C++ on linux/x86_64 and freebsd/x86_64
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+
+C/C++ on netbsd/amd64 can reuse the same mapping:
+ * The address space starts from 0x1000 (option with 0x0) and ends with
+ 0x7f7ffffff000.
+ * LoAppMem-kHeapMemEnd can be reused as it is.
+ * No VDSO support.
+ * No MidAppMem region.
+ * No additional HeapMem region.
+ * HiAppMem contains the stack, loader, shared libraries and heap.
+ * Stack on NetBSD/amd64 has prereserved 128MB.
+ * Heap grows downwards (top-down).
+ * ASLR must be disabled per-process or globally.
+*/
+struct Mapping48AddressSpace {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kShadowMsk = 0x780000000000ull;
+ static const uptr kShadowXor = 0x040000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0xf000000000000000ull;
+};
+
+/*
+C/C++ on linux/mips64 (40-bit VMA)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary (4 GB)
+0200 0000 00 - 2000 0000 00: - (120 GB)
+2000 0000 00 - 4000 0000 00: shadow (128 GB)
+4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
+5000 0000 00 - aa00 0000 00: - (360 GB)
+aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
+ab00 0000 00 - b000 0000 00: - (20 GB)
+b000 0000 00 - b200 0000 00: traces (8 GB)
+b200 0000 00 - fe00 0000 00: - (304 GB)
+fe00 0000 00 - ff00 0000 00: heap (4 GB)
+ff00 0000 00 - ff80 0000 00: - (2 GB)
+ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
+*/
+struct MappingMips64_40 {
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kHeapMemBeg = 0xfe00000000ull;
+ static const uptr kHeapMemEnd = 0xff00000000ull;
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
+ static const uptr kHiAppMemBeg = 0xff80000000ull;
+ static const uptr kHiAppMemEnd = 0xffffffffffull;
+ static const uptr kShadowMsk = 0xf800000000ull;
+ static const uptr kShadowXor = 0x0800000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
+ static const uptr kVdsoBeg = 0xfffff00000ull;
+};
+
+/*
+C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
+0200 0000 00 - 0300 0000 00: heap (4 GB)
+0300 0000 00 - 0400 0000 00: - (4 GB)
+0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
+0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
+0e00 0000 00 - 0f00 0000 00: - (4 GB)
+0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
+0fc0 0000 00 - 1000 0000 00: -
+*/
+struct MappingAppleAarch64 {
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kHeapMemBeg = 0x0200000000ull;
+ static const uptr kHeapMemEnd = 0x0300000000ull;
+ static const uptr kShadowBeg = 0x0400000000ull;
+ static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kMetaShadowBeg = 0x0d00000000ull;
+ static const uptr kMetaShadowEnd = 0x0e00000000ull;
+ static const uptr kTraceMemBeg = 0x0f00000000ull;
+ static const uptr kTraceMemEnd = 0x0fc0000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000ull;
+ static const uptr kHiAppMemEnd = 0x0fc0000000ull;
+ static const uptr kShadowMsk = 0x0ull;
+ static const uptr kShadowXor = 0x0ull;
+ static const uptr kShadowAdd = 0x0ull;
+ static const uptr kVdsoBeg = 0x7000000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/*
+C/C++ on linux/aarch64 (39-bit VMA)
+0000 0010 00 - 0100 0000 00: main binary
+0100 0000 00 - 0800 0000 00: -
+0800 0000 00 - 2000 0000 00: shadow memory
+2000 0000 00 - 3100 0000 00: -
+3100 0000 00 - 3400 0000 00: metainfo
+3400 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5600 0000 00: main binary (PIE)
+5600 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - 7d00 0000 00: -
+7c00 0000 00 - 7d00 0000 00: heap
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack
+*/
+struct MappingAarch64_39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0100000000ull;
+ static const uptr kShadowBeg = 0x0800000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x3100000000ull;
+ static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kMidAppMemBeg = 0x5500000000ull;
+ static const uptr kMidAppMemEnd = 0x5600000000ull;
+ static const uptr kTraceMemBeg = 0x6000000000ull;
+ static const uptr kTraceMemEnd = 0x6200000000ull;
+ static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kHeapMemEnd = 0x7d00000000ull;
+ static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffull;
+ static const uptr kShadowMsk = 0x7800000000ull;
+ static const uptr kShadowXor = 0x0200000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
+ static const uptr kVdsoBeg = 0x7f00000000ull;
+};
+
+/*
+C/C++ on linux/aarch64 (42-bit VMA)
+00000 0010 00 - 01000 0000 00: main binary
+01000 0000 00 - 10000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory
+20000 0000 00 - 26000 0000 00: -
+26000 0000 00 - 28000 0000 00: metainfo
+28000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
+2ab00 0000 00 - 36200 0000 00: -
+36200 0000 00 - 36240 0000 00: traces
+36240 0000 00 - 3e000 0000 00: -
+3e000 0000 00 - 3f000 0000 00: heap
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
+*/
+struct MappingAarch64_42 {
+ static const uptr kBroken = kBrokenReverseMapping;
+ static const uptr kLoAppMemBeg = 0x00000001000ull;
+ static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kShadowBeg = 0x10000000000ull;
+ static const uptr kShadowEnd = 0x20000000000ull;
+ static const uptr kMetaShadowBeg = 0x26000000000ull;
+ static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMidAppMemBeg = 0x2aa00000000ull;
+ static const uptr kMidAppMemEnd = 0x2ab00000000ull;
+ static const uptr kTraceMemBeg = 0x36200000000ull;
+ static const uptr kTraceMemEnd = 0x36400000000ull;
+ static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kHeapMemEnd = 0x3f000000000ull;
+ static const uptr kHiAppMemBeg = 0x3f000000000ull;
+ static const uptr kHiAppMemEnd = 0x3ffffffffffull;
+ static const uptr kShadowMsk = 0x3c000000000ull;
+ static const uptr kShadowXor = 0x04000000000ull;
+ static const uptr kShadowAdd = 0x00000000000ull;
+ static const uptr kVdsoBeg = 0x37f00000000ull;
+};
+
+struct MappingAarch64_48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kShadowMsk = 0x0fff800000000ull;
+ static const uptr kShadowXor = 0x0000800000000ull;
+ static const uptr kShadowAdd = 0x0000000000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (44-bit VMA)
+0000 0000 0100 - 0001 0000 0000: main binary
+0001 0000 0000 - 0001 0000 0000: -
+0001 0000 0000 - 0b00 0000 0000: shadow
+0b00 0000 0000 - 0b00 0000 0000: -
+0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
+0d00 0000 0000 - 0d00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: traces
+0f00 0000 0000 - 0f00 0000 0000: -
+0f00 0000 0000 - 0f50 0000 0000: heap
+0f50 0000 0000 - 0f60 0000 0000: -
+0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
+*/
+struct MappingPPC64_44 {
+ static const uptr kBroken =
+ kBrokenMapping | kBrokenReverseMapping | kBrokenLinearity;
+ static const uptr kMetaShadowBeg = 0x0b0000000000ull;
+ static const uptr kMetaShadowEnd = 0x0d0000000000ull;
+ static const uptr kTraceMemBeg = 0x0d0000000000ull;
+ static const uptr kTraceMemEnd = 0x0f0000000000ull;
+ static const uptr kShadowBeg = 0x000100000000ull;
+ static const uptr kShadowEnd = 0x0b0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000000100ull;
+ static const uptr kLoAppMemEnd = 0x000100000000ull;
+ static const uptr kHeapMemBeg = 0x0f0000000000ull;
+ static const uptr kHeapMemEnd = 0x0f5000000000ull;
+ static const uptr kHiAppMemBeg = 0x0f6000000000ull;
+ static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
+ static const uptr kShadowMsk = 0x0f0000000000ull;
+ static const uptr kShadowXor = 0x002100000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x3c0000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/*
+C/C++ on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 3d00 0000 0000: -
+3d00 0000 0000 - 3e00 0000 0000: heap
+3e00 0000 0000 - 3e80 0000 0000: -
+3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
+*/
+struct MappingPPC64_46 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x3d0000000000ull;
+ static const uptr kHeapMemEnd = 0x3e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x3e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
+ static const uptr kShadowMsk = 0x3c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/*
+C/C++ on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct MappingPPC64_47 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7d0000000000ull;
+ static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
+ static const uptr kShadowMsk = 0x7c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/*
+C/C++ on linux/s390x
+While the kernel provides a 64-bit address space, we have to restrict ourselves
+to 48 bits due to how e.g. SyncVar::GetId() works.
+0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB
+0e00 0000 0000 - 4000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
+8000 0000 0000 - 9000 0000 0000: -
+9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
+9800 0000 0000 - a000 0000 0000: -
+a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
+b000 0000 0000 - be00 0000 0000: -
+be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
+*/
+struct MappingS390x {
+ static const uptr kMetaShadowBeg = 0x900000000000ull;
+ static const uptr kMetaShadowEnd = 0x980000000000ull;
+ static const uptr kTraceMemBeg = 0xa00000000000ull;
+ static const uptr kTraceMemEnd = 0xb00000000000ull;
+ static const uptr kShadowBeg = 0x400000000000ull;
+ static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kHeapMemBeg = 0xbe0000000000ull;
+ static const uptr kHeapMemEnd = 0xc00000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0e0000000000ull;
+ static const uptr kHiAppMemBeg = 0xc00000004000ull;
+ static const uptr kHiAppMemEnd = 0xc00000004000ull;
+ static const uptr kShadowMsk = 0xb00000000000ull;
+ static const uptr kShadowXor = 0x100000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0xfffffffff000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/* Go on linux, darwin and freebsd on x86_64
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct MappingGo48 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0700 0000 0000: traces
+0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+struct MappingGoWindows {
+ static const uptr kMetaShadowBeg = 0x070000000000ull;
+ static const uptr kMetaShadowEnd = 0x077000000000ull;
+ static const uptr kTraceMemBeg = 0x050000000000ull;
+ static const uptr kTraceMemEnd = 0x070000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x050000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x010000000000ull;
+};
+
+/* Go on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 3600 0000 0000: -
+3600 0000 0000 - 3800 0000 0000: traces
+3800 0000 0000 - 4000 0000 0000: -
+*/
+
+struct MappingGoPPC64_46 {
+ static const uptr kMetaShadowBeg = 0x240000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x360000000000ull;
+ static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
+/* Go on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct MappingGoPPC64_47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
+/* Go on linux/aarch64 (48-bit VMA) and darwin/aarch64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+struct MappingGoAarch64 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
+/*
+Go on linux/mips64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+struct MappingGoMips64_47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
+
+/*
+Go on linux/s390x
+0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
+1000 0000 0000 - 4000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
+8000 0000 0000 - 9000 0000 0000: -
+9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
+9800 0000 0000 - a000 0000 0000: -
+a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
+*/
+struct MappingGoS390x {
+ static const uptr kMetaShadowBeg = 0x900000000000ull;
+ static const uptr kMetaShadowEnd = 0x980000000000ull;
+ static const uptr kTraceMemBeg = 0xa00000000000ull;
+ static const uptr kTraceMemEnd = 0xb00000000000ull;
+ static const uptr kShadowBeg = 0x400000000000ull;
+ static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x100000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x400000000000ull;
+};
+
+extern uptr vmaSize;
+
+template <typename Func, typename Arg>
+ALWAYS_INLINE auto SelectMapping(Arg arg) {
+#if SANITIZER_GO
+# if defined(__powerpc64__)
+ switch (vmaSize) {
+ case 46:
+ return Func::template Apply<MappingGoPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingGoPPC64_47>(arg);
+ }
+# elif defined(__mips64)
+ return Func::template Apply<MappingGoMips64_47>(arg);
+# elif defined(__s390x__)
+ return Func::template Apply<MappingGoS390x>(arg);
+# elif defined(__aarch64__)
+ return Func::template Apply<MappingGoAarch64>(arg);
+# elif SANITIZER_WINDOWS
+ return Func::template Apply<MappingGoWindows>(arg);
+# else
+ return Func::template Apply<MappingGo48>(arg);
+# endif
+#else // SANITIZER_GO
+# if defined(__x86_64__) || SANITIZER_IOSSIM || SANITIZER_MAC && !SANITIZER_IOS
+ return Func::template Apply<Mapping48AddressSpace>(arg);
+# elif defined(__aarch64__) && defined(__APPLE__)
+ return Func::template Apply<MappingAppleAarch64>(arg);
+# elif defined(__aarch64__) && !defined(__APPLE__)
+ switch (vmaSize) {
+ case 39:
+ return Func::template Apply<MappingAarch64_39>(arg);
+ case 42:
+ return Func::template Apply<MappingAarch64_42>(arg);
+ case 48:
+ return Func::template Apply<MappingAarch64_48>(arg);
+ }
+# elif defined(__powerpc64__)
+ switch (vmaSize) {
+ case 44:
+ return Func::template Apply<MappingPPC64_44>(arg);
+ case 46:
+ return Func::template Apply<MappingPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingPPC64_47>(arg);
+ }
+# elif defined(__mips64)
+ return Func::template Apply<MappingMips64_40>(arg);
+# elif defined(__s390x__)
+ return Func::template Apply<MappingS390x>(arg);
+# else
+# error "unsupported platform"
+# endif
+#endif
+ Die();
+}
+
+template <typename Func>
+void ForEachMapping() {
+ Func::template Apply<Mapping48AddressSpace>();
+ Func::template Apply<MappingMips64_40>();
+ Func::template Apply<MappingAppleAarch64>();
+ Func::template Apply<MappingAarch64_39>();
+ Func::template Apply<MappingAarch64_42>();
+ Func::template Apply<MappingAarch64_48>();
+ Func::template Apply<MappingPPC64_44>();
+ Func::template Apply<MappingPPC64_46>();
+ Func::template Apply<MappingPPC64_47>();
+ Func::template Apply<MappingS390x>();
+ Func::template Apply<MappingGo48>();
+ Func::template Apply<MappingGoWindows>();
+ Func::template Apply<MappingGoPPC64_46>();
+ Func::template Apply<MappingGoPPC64_47>();
+ Func::template Apply<MappingGoAarch64>();
+ Func::template Apply<MappingGoMips64_47>();
+ Func::template Apply<MappingGoS390x>();
+}
+
+enum MappingType {
+ kLoAppMemBeg,
+ kLoAppMemEnd,
+ kHiAppMemBeg,
+ kHiAppMemEnd,
+ kMidAppMemBeg,
+ kMidAppMemEnd,
+ kHeapMemBeg,
+ kHeapMemEnd,
+ kShadowBeg,
+ kShadowEnd,
+ kMetaShadowBeg,
+ kMetaShadowEnd,
+ kTraceMemBeg,
+ kTraceMemEnd,
+ kVdsoBeg,
+};
+
+struct MappingField {
+ template <typename Mapping>
+ static uptr Apply(MappingType type) {
+ switch (type) {
+ case kLoAppMemBeg:
+ return Mapping::kLoAppMemBeg;
+ case kLoAppMemEnd:
+ return Mapping::kLoAppMemEnd;
+ case kMidAppMemBeg:
+ return Mapping::kMidAppMemBeg;
+ case kMidAppMemEnd:
+ return Mapping::kMidAppMemEnd;
+ case kHiAppMemBeg:
+ return Mapping::kHiAppMemBeg;
+ case kHiAppMemEnd:
+ return Mapping::kHiAppMemEnd;
+ case kHeapMemBeg:
+ return Mapping::kHeapMemBeg;
+ case kHeapMemEnd:
+ return Mapping::kHeapMemEnd;
+ case kVdsoBeg:
+ return Mapping::kVdsoBeg;
+ case kShadowBeg:
+ return Mapping::kShadowBeg;
+ case kShadowEnd:
+ return Mapping::kShadowEnd;
+ case kMetaShadowBeg:
+ return Mapping::kMetaShadowBeg;
+ case kMetaShadowEnd:
+ return Mapping::kMetaShadowEnd;
+ case kTraceMemBeg:
+ return Mapping::kTraceMemBeg;
+ case kTraceMemEnd:
+ return Mapping::kTraceMemEnd;
+ }
+ Die();
+ }
+};
+
+ALWAYS_INLINE
+uptr LoAppMemBeg(void) { return SelectMapping<MappingField>(kLoAppMemBeg); }
+ALWAYS_INLINE
+uptr LoAppMemEnd(void) { return SelectMapping<MappingField>(kLoAppMemEnd); }
+
+ALWAYS_INLINE
+uptr MidAppMemBeg(void) { return SelectMapping<MappingField>(kMidAppMemBeg); }
+ALWAYS_INLINE
+uptr MidAppMemEnd(void) { return SelectMapping<MappingField>(kMidAppMemEnd); }
+
+ALWAYS_INLINE
+uptr HeapMemBeg(void) { return SelectMapping<MappingField>(kHeapMemBeg); }
+ALWAYS_INLINE
+uptr HeapMemEnd(void) { return SelectMapping<MappingField>(kHeapMemEnd); }
+
+ALWAYS_INLINE
+uptr HiAppMemBeg(void) { return SelectMapping<MappingField>(kHiAppMemBeg); }
+ALWAYS_INLINE
+uptr HiAppMemEnd(void) { return SelectMapping<MappingField>(kHiAppMemEnd); }
+
+ALWAYS_INLINE
+uptr VdsoBeg(void) { return SelectMapping<MappingField>(kVdsoBeg); }
+
+ALWAYS_INLINE
+uptr ShadowBeg(void) { return SelectMapping<MappingField>(kShadowBeg); }
+ALWAYS_INLINE
+uptr ShadowEnd(void) { return SelectMapping<MappingField>(kShadowEnd); }
+
+ALWAYS_INLINE
+uptr MetaShadowBeg(void) { return SelectMapping<MappingField>(kMetaShadowBeg); }
+ALWAYS_INLINE
+uptr MetaShadowEnd(void) { return SelectMapping<MappingField>(kMetaShadowEnd); }
+
+ALWAYS_INLINE
+uptr TraceMemBeg(void) { return SelectMapping<MappingField>(kTraceMemBeg); }
+ALWAYS_INLINE
+uptr TraceMemEnd(void) { return SelectMapping<MappingField>(kTraceMemEnd); }
+
+struct IsAppMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
+ (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
+ (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
+ (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
+ }
+};
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) { return SelectMapping<IsAppMemImpl>(mem); }
+
+struct IsShadowMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+ }
+};
+
+ALWAYS_INLINE
+bool IsShadowMem(RawShadow *p) {
+ return SelectMapping<IsShadowMemImpl>(reinterpret_cast<uptr>(p));
+}
+
+struct IsMetaMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+ }
+};
+
+ALWAYS_INLINE
+bool IsMetaMem(const u32 *p) {
+ return SelectMapping<IsMetaMemImpl>(reinterpret_cast<uptr>(p));
+}
+
+struct MemToShadowImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (((x) & ~(Mapping::kShadowMsk | (kShadowCell - 1))) ^
+ Mapping::kShadowXor) *
+ kShadowMultiplier +
+ Mapping::kShadowAdd;
+ }
+};
+
+ALWAYS_INLINE
+RawShadow *MemToShadow(uptr x) {
+ return reinterpret_cast<RawShadow *>(SelectMapping<MemToShadowImpl>(x));
+}
+
+struct MemToMetaImpl {
+ template <typename Mapping>
+ static u32 *Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (u32 *)(((((x) & ~(Mapping::kShadowMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) |
+ Mapping::kMetaShadowBeg);
+ }
+};
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) { return SelectMapping<MemToMetaImpl>(x); }
+
+struct ShadowToMemImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr sp) {
+ if (!IsShadowMemImpl::Apply<Mapping>(sp))
+ return 0;
+ // The shadow mapping is non-linear and we've lost some bits, so we don't
+ // have an easy way to restore the original app address. But the mapping is
+ // a bijection, so we try to restore the address as belonging to
+ // low/mid/high range consecutively and see if shadow->app->shadow mapping
+ // gives us the same address.
+ uptr p =
+ ((sp - Mapping::kShadowAdd) / kShadowMultiplier) ^ Mapping::kShadowXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p) == sp)
+ return p;
+ if (Mapping::kMidAppMemBeg) {
+ uptr p_mid = p + (Mapping::kMidAppMemBeg & Mapping::kShadowMsk);
+ if (p_mid >= Mapping::kMidAppMemBeg && p_mid < Mapping::kMidAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p_mid) == sp)
+ return p_mid;
+ }
+ return p | Mapping::kShadowMsk;
+ }
+};
+
+ALWAYS_INLINE
+uptr ShadowToMem(RawShadow *s) {
+ return SelectMapping<ShadowToMemImpl>(reinterpret_cast<uptr>(s));
+}
+
+// Compresses addr to kCompressedAddrBits stored in least significant bits.
+ALWAYS_INLINE uptr CompressAddr(uptr addr) {
+ return addr & ((1ull << kCompressedAddrBits) - 1);
+}
+
+struct RestoreAddrImpl {
+ typedef uptr Result;
+ template <typename Mapping>
+ static Result Apply(uptr addr) {
+ // To restore the address we go over all app memory ranges and check if top
+ // 3 bits of the compressed addr match that of the app range. If yes, we
+ // assume that the compressed address come from that range and restore the
+ // missing top bits to match the app range address.
+ const uptr ranges[] = {
+ Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd, Mapping::kMidAppMemBeg,
+ Mapping::kMidAppMemEnd, Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd,
+ Mapping::kHeapMemBeg, Mapping::kHeapMemEnd,
+ };
+ const uptr indicator = 0x0e0000000000ull;
+ const uptr ind_lsb = 1ull << LeastSignificantSetBitIndex(indicator);
+ for (uptr i = 0; i < ARRAY_SIZE(ranges); i += 2) {
+ uptr beg = ranges[i];
+ uptr end = ranges[i + 1];
+ if (beg == end)
+ continue;
+ for (uptr p = beg; p < end; p = RoundDown(p + ind_lsb, ind_lsb)) {
+ if ((addr & indicator) == (p & indicator))
+ return addr | (p & ~(ind_lsb - 1));
+ }
+ }
+ Printf("ThreadSanitizer: failed to restore address 0x%zx\n", addr);
+ Die();
+ }
+};
+
+// Restores compressed addr from kCompressedAddrBits to full representation.
+// This is called only during reporting and is not performance-critical.
+inline uptr RestoreAddr(uptr addr) {
+ return SelectMapping<RestoreAddrImpl>(addr);
+}
+
+// The additional page is to catch shadow stack overflow as paging fault.
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
+
+struct GetThreadTraceImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr tid) {
+ uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize;
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+ }
+};
+
+ALWAYS_INLINE
+uptr GetThreadTrace(int tid) { return SelectMapping<GetThreadTraceImpl>(tid); }
+
+struct GetThreadTraceHeaderImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr tid) {
+ uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize +
+ kTraceSize * sizeof(Event);
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+ }
+};
+
+ALWAYS_INLINE
+uptr GetThreadTraceHeader(int tid) {
+ return SelectMapping<GetThreadTraceHeaderImpl>(tid);
+}
+
+void InitializePlatform();
+void InitializePlatformEarly();
+void CheckAndProtect();
+void InitializeShadowMemoryPlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns);
+int ExtractResolvFDs(void *state, int *fds, int nfd);
+int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+uptr ExtractLongJmpSp(uptr *env);
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
+
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg);
+
+void DestroyThreadState();
+void PlatformCleanUpThreadState(ThreadState *thr);
+
+} // namespace __tsan
+
+#endif // TSAN_PLATFORM_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_platform_linux.cpp
new file mode 100644
index 000000000000..73ec14892d28
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_platform_linux.cpp
@@ -0,0 +1,545 @@
+//===-- tsan_platform_linux.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Linux- and BSD-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/personality.h>
+#include <setjmp.h>
+#endif
+#include <sys/syscall.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sched.h>
+#include <dlfcn.h>
+#if SANITIZER_LINUX
+#define __need_res_state
+#include <resolv.h>
+#endif
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
+
+#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
+# define INIT_LONGJMP_XOR_KEY 1
+#else
+# define INIT_LONGJMP_XOR_KEY 0
+#endif
+
+#if INIT_LONGJMP_XOR_KEY
+#include "interception/interception.h"
+// Must be declared outside of other namespaces.
+DECLARE_REAL(int, _setjmp, void *env)
+#endif
+
+namespace __tsan {
+
+#if INIT_LONGJMP_XOR_KEY
+static void InitializeLongjmpXorKey();
+static uptr longjmp_xor_key;
+#endif
+
+// Runtime detected VMA size.
+uptr vmaSize;
+
+enum {
+ MemTotal,
+ MemShadow,
+ MemMeta,
+ MemFile,
+ MemMmap,
+ MemTrace,
+ MemHeap,
+ MemOther,
+ MemCount,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) {
+ mem[MemTotal] += rss;
+ if (p >= ShadowBeg() && p < ShadowEnd())
+ mem[MemShadow] += rss;
+ else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
+ mem[MemMeta] += rss;
+ else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) ||
+ (p >= MidAppMemBeg() && p < MidAppMemEnd()) ||
+ (p >= HiAppMemBeg() && p < HiAppMemEnd()))
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= HeapMemBeg() && p < HeapMemEnd())
+ mem[MemHeap] += rss;
+ else if (p >= TraceMemBeg() && p < TraceMemEnd())
+ mem[MemTrace] += rss;
+ else
+ mem[MemOther] += rss;
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
+ uptr mem[MemCount];
+ internal_memset(mem, 0, sizeof(mem));
+ GetMemoryProfile(FillProfileCallback, mem);
+ auto meta = ctx->metamap.GetMemoryStats();
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ uptr internal_stats[AllocatorStatCount];
+ internal_allocator()->GetStats(internal_stats);
+ // All these are allocated from the common mmap region.
+ mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated +
+ internal_stats[AllocatorStatMapped];
+ if (s64(mem[MemMmap]) < 0)
+ mem[MemMmap] = 0;
+ internal_snprintf(
+ buf, buf_size,
+ "%llus: RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
+ " stacks=%zd[%zd] nthr=%zd/%zd\n",
+ uptime_ns / (1000 * 1000 * 1000), mem[MemTotal] >> 20,
+ mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
+ mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
+ mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
+ meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20,
+ stacks.n_uniq_ids, nlive, nthread);
+}
+
+# if SANITIZER_LINUX
+void FlushShadowMemoryCallback(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
+}
+#endif
+
+void FlushShadowMemory() {
+#if SANITIZER_LINUX
+ StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
+}
+
+#if !SANITIZER_GO
+// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Accesses to .rodata can't race, so this saves time, memory and trace space.
+static void MapRodata() {
+ // First create temp file.
+ const char *tmpdir = GetEnv("TMPDIR");
+ if (tmpdir == 0)
+ tmpdir = GetEnv("TEST_TMPDIR");
+#ifdef P_tmpdir
+ if (tmpdir == 0)
+ tmpdir = P_tmpdir;
+#endif
+ if (tmpdir == 0)
+ return;
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
+ tmpdir, (int)internal_getpid());
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (internal_iserror(openrv))
+ return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
+ fd_t fd = openrv;
+ // Fill the file with kShadowRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
+ InternalMmapVector<RawShadow> marker(kMarkerSize);
+ // volatile to prevent insertion of memset
+ for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
+ p++)
+ *p = kShadowRodata;
+ internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
+ // Map the file into memory.
+ uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
+ if (internal_iserror(page)) {
+ internal_close(fd);
+ return;
+ }
+ // Map the file into shadow of .rodata sections.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ // Reusing the buffer 'name'.
+ MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ while (proc_maps.Next(&segment)) {
+ if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
+ segment.IsReadable() && segment.IsExecutable() &&
+ !segment.IsWritable() && IsAppMem(segment.start)) {
+ // Assume it's .rodata
+ char *shadow_start = (char *)MemToShadow(segment.start);
+ char *shadow_end = (char *)MemToShadow(segment.end);
+ for (char *p = shadow_start; p < shadow_end;
+ p += marker.size() * sizeof(RawShadow)) {
+ internal_mmap(
+ p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ }
+ }
+ }
+ internal_close(fd);
+}
+
+void InitializeShadowMemoryPlatform() {
+ MapRodata();
+}
+
+#endif // #if !SANITIZER_GO
+
+void InitializePlatformEarly() {
+ vmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+#if defined(__aarch64__)
+# if !SANITIZER_GO
+ if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize);
+ Die();
+ }
+#else
+ if (vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 48\n", vmaSize);
+ Die();
+ }
+#endif
+#elif defined(__powerpc64__)
+# if !SANITIZER_GO
+ if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 46 && vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize);
+ Die();
+ }
+# endif
+#elif defined(__mips64)
+# if !SANITIZER_GO
+ if (vmaSize != 40) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 40\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# endif
+#endif
+}
+
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
+
+ // Go maps shadow memory lazily and works fine with limited address space.
+ // Unlimited stack is not a problem as well, because the executable
+ // is not compiled with -pie.
+#if !SANITIZER_GO
+ {
+ bool reexec = false;
+ // TSan doesn't play well with unlimited stack size (as stack
+ // overlaps with shadow memory). If we detect unlimited stack size,
+ // we re-exec the program with limited stack size as a best effort.
+ if (StackSizeIsUnlimited()) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
+ SetStackSizeLimitInBytes(kMaxStackSize);
+ reexec = true;
+ }
+
+ if (!AddressSpaceIsUnlimited()) {
+ Report("WARNING: Program is run with limited virtual address space,"
+ " which wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ SetAddressSpaceUnlimited();
+ reexec = true;
+ }
+#if SANITIZER_LINUX && defined(__aarch64__)
+ // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
+ // linux kernel, the random gap between stack and mapped area is increased
+ // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
+ // this big range, we should disable randomized virtual space on aarch64.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is run with randomized virtual address "
+ "space, which wouldn't work with ThreadSanitizer.\n"
+ "Re-execing with fixed virtual address space.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ }
+ // Initialize the xor key used in {sig}{set,long}jump.
+ InitializeLongjmpXorKey();
+#endif
+ if (reexec)
+ ReExec();
+ }
+
+ CheckAndProtect();
+ InitTlsSize();
+#endif // !SANITIZER_GO
+}
+
+#if !SANITIZER_GO
+// Extract file descriptors passed to glibc internal __res_iclose function.
+// This is required to properly "close" the fds, because we do not see internal
+// closes within glibc. The code is a pure hack.
+int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ int cnt = 0;
+ struct __res_state *statp = (struct __res_state*)state;
+ for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+ if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+ fds[cnt++] = statp->_u._ext.nssocks[i];
+ }
+ return cnt;
+#else
+ return 0;
+#endif
+}
+
+// Extract file descriptors passed via UNIX domain sockets.
+// This is required to properly handle "open" of these fds.
+// see 'man recvmsg' and 'man 3 cmsg'.
+int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
+ int res = 0;
+ msghdr *msg = (msghdr*)msgp;
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
+ continue;
+ int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
+ for (int i = 0; i < n; i++) {
+ fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
+ if (res == nfd)
+ return res;
+ }
+ }
+ return res;
+}
+
+// Reverse operation of libc stack pointer mangling
+static uptr UnmangleLongJmpSp(uptr mangled_sp) {
+#if defined(__x86_64__)
+# if SANITIZER_LINUX
+ // Reverse of:
+ // xor %fs:0x30, %rsi
+ // rol $0x11, %rsi
+ uptr sp;
+ asm("ror $0x11, %0 \n"
+ "xor %%fs:0x30, %0 \n"
+ : "=r" (sp)
+ : "0" (mangled_sp));
+ return sp;
+# else
+ return mangled_sp;
+# endif
+#elif defined(__aarch64__)
+# if SANITIZER_LINUX
+ return mangled_sp ^ longjmp_xor_key;
+# else
+ return mangled_sp;
+# endif
+#elif defined(__powerpc64__)
+ // Reverse of:
+ // ld r4, -28696(r13)
+ // xor r4, r3, r4
+ uptr xor_key;
+ asm("ld %0, -28696(%%r13)" : "=r" (xor_key));
+ return mangled_sp ^ xor_key;
+#elif defined(__mips__)
+ return mangled_sp;
+#elif defined(__s390x__)
+ // tcbhead_t.stack_guard
+ uptr xor_key = ((uptr *)__builtin_thread_pointer())[5];
+ return mangled_sp ^ xor_key;
+#else
+ #error "Unknown platform"
+#endif
+}
+
+#if SANITIZER_NETBSD
+# ifdef __x86_64__
+# define LONG_JMP_SP_ENV_SLOT 6
+# else
+# error unsupported
+# endif
+#elif defined(__powerpc__)
+# define LONG_JMP_SP_ENV_SLOT 0
+#elif SANITIZER_FREEBSD
+# define LONG_JMP_SP_ENV_SLOT 2
+#elif SANITIZER_LINUX
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__mips64)
+# define LONG_JMP_SP_ENV_SLOT 1
+# elif defined(__s390x__)
+# define LONG_JMP_SP_ENV_SLOT 9
+# else
+# define LONG_JMP_SP_ENV_SLOT 6
+# endif
+#endif
+
+uptr ExtractLongJmpSp(uptr *env) {
+ uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
+ return UnmangleLongJmpSp(mangled_sp);
+}
+
+#if INIT_LONGJMP_XOR_KEY
+// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
+// functions) by XORing them with a random key. For AArch64 it is a global
+// variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by
+// issuing a setjmp and XORing the SP pointer values to derive the key.
+static void InitializeLongjmpXorKey() {
+ // 1. Call REAL(setjmp), which stores the mangled SP in env.
+ jmp_buf env;
+ REAL(_setjmp)(env);
+
+ // 2. Retrieve vanilla/mangled SP.
+ uptr sp;
+ asm("mov %0, sp" : "=r" (sp));
+ uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
+
+ // 3. xor SPs to obtain key.
+ longjmp_xor_key = mangled_sp ^ sp;
+}
+#endif
+
+extern "C" void __tsan_tls_initialization() {}
+
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ // Check that the thr object is in tls;
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ CHECK_GE(thr_beg, tls_addr);
+ CHECK_LE(thr_beg, tls_addr + tls_size);
+ CHECK_GE(thr_end, tls_addr);
+ CHECK_LE(thr_end, tls_addr + tls_size);
+ // Since the thr object is huge, skip it.
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end);
+}
+
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(arg);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif // !SANITIZER_GO
+
+#if !SANITIZER_GO
+void ReplaceSystemMalloc() { }
+#endif
+
+#if !SANITIZER_GO
+#if SANITIZER_ANDROID
+// On Android, one thread can call intercepted functions after
+// DestroyThreadState(), so add a fake thread state for "dead" threads.
+static ThreadState *dead_thread_state = nullptr;
+
+ThreadState *cur_thread() {
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr == nullptr) {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr == nullptr) {
+ thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
+ "ThreadState"));
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
+ if (dead_thread_state == nullptr) {
+ dead_thread_state = reinterpret_cast<ThreadState*>(
+ MmapOrDie(sizeof(ThreadState), "ThreadState"));
+ dead_thread_state->fast_state.SetIgnoreBit();
+ dead_thread_state->ignore_interceptors = 1;
+ dead_thread_state->is_dead = true;
+ *const_cast<u32*>(&dead_thread_state->tid) = -1;
+ CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
+ PROT_READ));
+ }
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+ }
+ return thr;
+}
+
+void set_cur_thread(ThreadState *thr) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
+}
+
+void cur_thread_finalize() {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr != dead_thread_state) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state);
+ UnmapOrDie(thr, sizeof(ThreadState));
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+}
+#endif // SANITIZER_ANDROID
+#endif // if !SANITIZER_GO
+
+} // namespace __tsan
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_platform_mac.cpp
new file mode 100644
index 000000000000..1465f9953c19
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_platform_mac.cpp
@@ -0,0 +1,326 @@
+//===-- tsan_platform_mac.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <limits.h>
+#include <mach/mach.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+static char main_thread_state[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
+static ThreadState *dead_thread_state;
+static pthread_key_t thread_state_key;
+
+// We rely on the following documented, but Darwin-specific behavior to keep the
+// reference to the ThreadState object alive in TLS:
+// pthread_key_create man page:
+// If, after all the destructors have been called for all non-NULL values with
+// associated destructors, there are still some non-NULL values with
+// associated destructors, then the process is repeated. If, after at least
+// [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
+// outstanding non-NULL values, there are still some non-NULL values with
+// associated destructors, the implementation stops calling destructors.
+static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations");
+static void ThreadStateDestructor(void *thr) {
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+}
+
+static void InitializeThreadStateStorage() {
+ int res;
+ CHECK_EQ(thread_state_key, 0);
+ res = pthread_key_create(&thread_state_key, ThreadStateDestructor);
+ CHECK_EQ(res, 0);
+ res = pthread_setspecific(thread_state_key, main_thread_state);
+ CHECK_EQ(res, 0);
+
+ auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ dts->fast_state.SetIgnoreBit();
+ dts->ignore_interceptors = 1;
+ dts->is_dead = true;
+ const_cast<Tid &>(dts->tid) = kInvalidTid;
+ res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ); // immutable
+ CHECK_EQ(res, 0);
+ dead_thread_state = dts;
+}
+
+ThreadState *cur_thread() {
+ // Some interceptors get called before libpthread has been initialized and in
+ // these cases we must avoid calling any pthread APIs.
+ if (UNLIKELY(!thread_state_key)) {
+ return (ThreadState *)main_thread_state;
+ }
+
+ // We only reach this line after InitializeThreadStateStorage() ran, i.e,
+ // after TSan (and therefore libpthread) have been initialized.
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ if (UNLIKELY(!thr)) {
+ thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+ }
+ return thr;
+}
+
+void set_cur_thread(ThreadState *thr) {
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+}
+
+void cur_thread_finalize() {
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ CHECK(thr);
+ if (thr == (ThreadState *)main_thread_state) {
+ // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
+ // exit the main thread. Let's keep the main thread's ThreadState.
+ return;
+ }
+ // Intercepted functions can still get called after cur_thread_finalize()
+ // (called from DestroyThreadState()), so put a fake thread state for "dead"
+ // threads. An alternative solution would be to release the ThreadState
+ // object from THREAD_DESTROY (which is delivered later and on the parent
+ // thread) instead of THREAD_TERMINATE.
+ int res = pthread_setspecific(thread_state_key, dead_thread_state);
+ CHECK_EQ(res, 0);
+ UnmapOrDie(thr, sizeof(ThreadState));
+}
+#endif
+
+void FlushShadowMemory() {
+}
+
+static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
+ vm_address_t address = start;
+ vm_address_t end_address = end;
+ uptr resident_pages = 0;
+ uptr dirty_pages = 0;
+ while (address < end_address) {
+ vm_size_t vm_region_size;
+ mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
+ vm_region_extended_info_data_t vm_region_info;
+ mach_port_t object_name;
+ kern_return_t ret = vm_region_64(
+ mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
+ (vm_region_info_t)&vm_region_info, &count, &object_name);
+ if (ret != KERN_SUCCESS) break;
+
+ resident_pages += vm_region_info.pages_resident;
+ dirty_pages += vm_region_info.pages_dirtied;
+
+ address += vm_region_size;
+ }
+ *res = resident_pages * GetPageSizeCached();
+ *dirty = dirty_pages * GetPageSizeCached();
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
+ uptr shadow_res, shadow_dirty;
+ uptr meta_res, meta_dirty;
+ uptr trace_res, trace_dirty;
+ RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
+ RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
+ RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
+
+#if !SANITIZER_GO
+ uptr low_res, low_dirty;
+ uptr high_res, high_dirty;
+ uptr heap_res, heap_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
+ RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
+ RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
+#else // !SANITIZER_GO
+ uptr app_res, app_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
+#endif
+
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ internal_snprintf(
+ buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# endif
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+ TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
+# if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+# else // !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
+# endif
+ stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
+}
+
+# if !SANITIZER_GO
+void InitializeShadowMemoryPlatform() { }
+
+// On OS X, GCD worker threads are created without a call to pthread_create. We
+// need to properly register these threads with ThreadCreate and ThreadStart.
+// These threads don't have a parent thread, as they are created "spuriously".
+// We're using a libpthread API that notifies us about a newly created thread.
+// The `thread == pthread_self()` check indicates this is actually a worker
+// thread. If it's just a regular thread, this hook is called on the parent
+// thread.
+typedef void (*pthread_introspection_hook_t)(unsigned int event,
+ pthread_t thread, void *addr,
+ size_t size);
+extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
+ pthread_introspection_hook_t hook);
+static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
+static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
+ void *addr, size_t size) {
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ if (thread == pthread_self()) {
+ // The current thread is a newly created GCD worker thread.
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, kMainTid);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
+ }
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ CHECK_EQ(thread, pthread_self());
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
+ }
+
+ if (prev_pthread_introspection_hook != nullptr)
+ prev_pthread_introspection_hook(event, thread, addr, size);
+}
+#endif
+
+void InitializePlatformEarly() {
+# if !SANITIZER_GO && SANITIZER_IOS
+ uptr max_vm = GetMaxUserVirtualAddress() + 1;
+ if (max_vm != HiAppMemEnd()) {
+ Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
+ (void *)max_vm, (void *)HiAppMemEnd());
+ Die();
+ }
+#endif
+}
+
+static uptr longjmp_xor_key = 0;
+
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
+#if !SANITIZER_GO
+ CheckAndProtect();
+
+ InitializeThreadStateStorage();
+
+ prev_pthread_introspection_hook =
+ pthread_introspection_hook_install(&my_pthread_introspection_hook);
+#endif
+
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
+ // Libsystem currently uses a process-global key; this might change.
+ const unsigned kTLSLongjmpXorKeySlot = 0x7;
+ longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot);
+ }
+}
+
+#ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT \
+ ((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13)
+#else
+# define LONG_JMP_SP_ENV_SLOT 2
+#endif
+
+uptr ExtractLongJmpSp(uptr *env) {
+ uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
+ uptr sp = mangled_sp ^ longjmp_xor_key;
+ sp = (uptr)ptrauth_auth_data((void *)sp, ptrauth_key_asdb,
+ ptrauth_string_discriminator("sp"));
+ return sp;
+}
+
+#if !SANITIZER_GO
+extern "C" void __tsan_tls_initialization() {}
+
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
+ // just mark the entire range as written to.
+ MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
+}
+#endif
+
+#if !SANITIZER_GO
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(arg);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_platform_posix.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_platform_posix.cpp
new file mode 100644
index 000000000000..763ac444377e
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_platform_posix.cpp
@@ -0,0 +1,147 @@
+//===-- tsan_platform_posix.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// POSIX-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_POSIX
+
+# include <dlfcn.h>
+
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_errno.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "tsan_platform.h"
+# include "tsan_rtl.h"
+
+namespace __tsan {
+
+static const char kShadowMemoryMappingWarning[] =
+ "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n";
+static const char kShadowMemoryMappingHint[] =
+ "HINT: if %s is not supported in your environment, you may set "
+ "TSAN_OPTIONS=%s=0\n";
+
+# if !SANITIZER_GO
+static void DontDumpShadow(uptr addr, uptr size) {
+ if (common_flags()->use_madv_dontdump)
+ if (!DontDumpShadowMemory(addr, size)) {
+ Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
+ "MADV_DONTDUMP", errno);
+ Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump");
+ Die();
+ }
+}
+
+void InitializeShadowMemory() {
+ // Map memory shadow.
+ if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
+ "shadow")) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Die();
+ }
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+ DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ ShadowBeg(), ShadowEnd(),
+ (ShadowEnd() - ShadowBeg()) >> 30);
+
+ // Map meta shadow.
+ const uptr meta = MetaShadowBeg();
+ const uptr meta_size = MetaShadowEnd() - meta;
+ if (!MmapFixedSuperNoReserve(meta, meta_size, "meta shadow")) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Die();
+ }
+ DontDumpShadow(meta, meta_size);
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ meta, meta + meta_size, meta_size >> 30);
+
+ InitializeShadowMemoryPlatform();
+
+ on_initialize = reinterpret_cast<void (*)(void)>(
+ dlsym(RTLD_DEFAULT, "__tsan_on_initialize"));
+ on_finalize =
+ reinterpret_cast<int (*)(int)>(dlsym(RTLD_DEFAULT, "__tsan_on_finalize"));
+}
+
+static bool TryProtectRange(uptr beg, uptr end) {
+ CHECK_LE(beg, end);
+ if (beg == end)
+ return true;
+ return beg == (uptr)MmapFixedNoAccess(beg, end - beg);
+}
+
+static void ProtectRange(uptr beg, uptr end) {
+ if (!TryProtectRange(beg, end)) {
+ Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
+ Printf("FATAL: Make sure you are not using unlimited stack\n");
+ Die();
+ }
+}
+
+void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (IsAppMem(segment.start)) continue;
+ if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
+ if (segment.protection == 0) // Zero page or mprotected.
+ continue;
+ if (segment.start >= VdsoBeg()) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping 0x%zx-0x%zx\n",
+ segment.start, segment.end);
+ Die();
+ }
+
+# if defined(__aarch64__) && defined(__APPLE__) && SANITIZER_IOS
+ ProtectRange(HeapMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+#else
+ ProtectRange(LoAppMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+ if (MidAppMemBeg()) {
+ ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+ ProtectRange(MidAppMemEnd(), TraceMemBeg());
+ } else {
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+ }
+ // Memory for traces is mapped lazily in MapThreadTrace.
+ // Protect the whole range for now, so that user does not map something here.
+ ProtectRange(TraceMemBeg(), TraceMemEnd());
+ ProtectRange(TraceMemEnd(), HeapMemBeg());
+ ProtectRange(HeapEnd(), HiAppMemBeg());
+#endif
+
+#if defined(__s390x__)
+ // Protect the rest of the address space.
+ const uptr user_addr_max_l4 = 0x0020000000000000ull;
+ const uptr user_addr_max_l5 = 0xfffffffffffff000ull;
+ // All the maintained s390x kernels support at least 4-level page tables.
+ ProtectRange(HiAppMemEnd(), user_addr_max_l4);
+ // Older s390x kernels may not support 5-level page tables.
+ TryProtectRange(user_addr_max_l4, user_addr_max_l5);
+#endif
+}
+#endif
+
+} // namespace __tsan
+
+#endif // SANITIZER_POSIX
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_platform_windows.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_platform_windows.cpp
new file mode 100644
index 000000000000..fea893768c79
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_platform_windows.cpp
@@ -0,0 +1,36 @@
+//===-- tsan_platform_windows.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Windows-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#include "tsan_platform.h"
+
+#include <stdlib.h>
+
+namespace __tsan {
+
+void FlushShadowMemory() {
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {}
+
+void InitializePlatformEarly() {
+}
+
+void InitializePlatform() {
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_WINDOWS
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_ppc_regs.h b/compiler-rt/lib/tsan/rtl-old/tsan_ppc_regs.h
new file mode 100644
index 000000000000..5b43f3ddada3
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_ppc_regs.h
@@ -0,0 +1,96 @@
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_preinit.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_preinit.cpp
new file mode 100644
index 000000000000..205bdbf93b20
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_preinit.cpp
@@ -0,0 +1,26 @@
+//===-- tsan_preinit.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer.
+//
+// Call __tsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interface.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// The symbol is called __local_tsan_preinit, because it's not intended to be
+// exported.
+// This code linked into the main executable when -fsanitize=thread is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used))
+void (*__local_tsan_preinit)(void) = __tsan_init;
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_report.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_report.cpp
new file mode 100644
index 000000000000..a926c3761ccf
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_report.cpp
@@ -0,0 +1,479 @@
+//===-- tsan_report.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+
+namespace __tsan {
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
+ const char *ThreadDescription() { return Cyan(); }
+ const char *Location() { return Green(); }
+ const char *Sleep() { return Yellow(); }
+ const char *Mutex() { return Magenta(); }
+};
+
+ReportDesc::ReportDesc()
+ : tag(kExternalTagNone)
+ , stacks()
+ , mops()
+ , locs()
+ , mutexes()
+ , threads()
+ , unique_tids()
+ , sleep()
+ , count() {
+}
+
+ReportMop::ReportMop()
+ : mset() {
+}
+
+ReportDesc::~ReportDesc() {
+ // FIXME(dvyukov): it must be leaking a lot of memory.
+}
+
+#if !SANITIZER_GO
+
+const int kThreadBufSize = 32;
+const char *thread_name(char *buf, Tid tid) {
+ if (tid == kMainTid)
+ return "main thread";
+ internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
+ return buf;
+}
+
+static const char *ReportTypeString(ReportType typ, uptr tag) {
+ switch (typ) {
+ case ReportTypeRace:
+ return "data race";
+ case ReportTypeVptrRace:
+ return "data race on vptr (ctor/dtor vs virtual call)";
+ case ReportTypeUseAfterFree:
+ return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree:
+ return "heap-use-after-free (virtual call vs free)";
+ case ReportTypeExternalRace: {
+ const char *str = GetReportHeaderFromTag(tag);
+ return str ? str : "race on external object";
+ }
+ case ReportTypeThreadLeak:
+ return "thread leak";
+ case ReportTypeMutexDestroyLocked:
+ return "destroy of a locked mutex";
+ case ReportTypeMutexDoubleLock:
+ return "double lock of a mutex";
+ case ReportTypeMutexInvalidAccess:
+ return "use of an invalid mutex (e.g. uninitialized or destroyed)";
+ case ReportTypeMutexBadUnlock:
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ case ReportTypeMutexBadReadLock:
+ return "read lock of a write locked mutex";
+ case ReportTypeMutexBadReadUnlock:
+ return "read unlock of a write locked mutex";
+ case ReportTypeSignalUnsafe:
+ return "signal-unsafe call inside of a signal";
+ case ReportTypeErrnoInSignal:
+ return "signal handler spoils errno";
+ case ReportTypeDeadlock:
+ return "lock-order-inversion (potential deadlock)";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+#if SANITIZER_MAC
+static const char *const kInterposedFunctionPrefix = "wrap_";
+#else
+static const char *const kInterposedFunctionPrefix = "__interceptor_";
+#endif
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+ InternalScopedString res;
+ RenderFrame(&res, common_flags()->stack_trace_format, i,
+ frame->info.address, &frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ Printf("%s\n", res.data());
+ }
+ Printf("\n");
+}
+
+static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
+ for (uptr i = 0; i < mset.Size(); i++) {
+ if (i == 0)
+ Printf(" (mutexes:");
+ const ReportMopMutex m = mset[i];
+ Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(i == mset.Size() - 1 ? ")" : ",");
+ }
+}
+
+static const char *MopDesc(bool first, bool write, bool atomic) {
+ return atomic ? (first ? (write ? "Atomic write" : "Atomic read")
+ : (write ? "Previous atomic write" : "Previous atomic read"))
+ : (first ? (write ? "Write" : "Read")
+ : (write ? "Previous write" : "Previous read"));
+}
+
+static const char *ExternalMopDesc(bool first, bool write) {
+ return first ? (write ? "Modifying" : "Read-only")
+ : (write ? "Previous modifying" : "Previous read-only");
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ Printf("%s", d.Access());
+ if (mop->external_tag == kExternalTagNone) {
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic), mop->size,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ } else {
+ const char *object_type = GetObjectTypeFromTag(mop->external_tag);
+ if (object_type == nullptr)
+ object_type = "external object";
+ Printf(" %s access of %s at %p by %s",
+ ExternalMopDesc(first, mop->write), object_type,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ }
+ PrintMutexSet(mop->mset);
+ Printf(":\n");
+ Printf("%s", d.Default());
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ bool print_stack = false;
+ Printf("%s", d.Location());
+ if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
+ if (global.size != 0)
+ Printf(" Location is global '%s' of size %zu at %p (%s+0x%zx)\n\n",
+ global.name, global.size, reinterpret_cast<void *>(global.start),
+ StripModuleName(global.module), global.module_offset);
+ else
+ Printf(" Location is global '%s' at %p (%s+0x%zx)\n\n", global.name,
+ reinterpret_cast<void *>(global.start),
+ StripModuleName(global.module), global.module_offset);
+ } else if (loc->type == ReportLocationHeap) {
+ char thrbuf[kThreadBufSize];
+ const char *object_type = GetObjectTypeFromTag(loc->external_tag);
+ if (!object_type) {
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
+ thread_name(thrbuf, loc->tid));
+ } else {
+ Printf(" Location is %s of size %zu at %p allocated by %s:\n",
+ object_type, loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
+ thread_name(thrbuf, loc->tid));
+ }
+ print_stack = true;
+ } else if (loc->type == ReportLocationStack) {
+ Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationTLS) {
+ Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationFD) {
+ Printf(" Location is file descriptor %d created by %s at:\n",
+ loc->fd, thread_name(thrbuf, loc->tid));
+ print_stack = true;
+ }
+ Printf("%s", d.Default());
+ if (print_stack)
+ PrintStack(loc->stack);
+}
+
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%lld%s%s", d.Mutex(), rm->id, d.Default(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%lld (%p)%s%s", d.Mutex(), rm->id,
+ reinterpret_cast<void *>(rm->addr), d.Default(), after);
+}
+
+static void PrintMutex(const ReportMutex *rm) {
+ Decorator d;
+ if (rm->destroyed) {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
+ Printf("%s", d.Default());
+ } else {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id,
+ reinterpret_cast<void *>(rm->addr));
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ Decorator d;
+ if (rt->id == kMainTid) // Little sense in describing the main thread.
+ return;
+ Printf("%s", d.ThreadDescription());
+ Printf(" Thread T%d", rt->id);
+ if (rt->name && rt->name[0] != '\0')
+ Printf(" '%s'", rt->name);
+ char thrbuf[kThreadBufSize];
+ const char *thread_status = rt->running ? "running" : "finished";
+ if (rt->thread_type == ThreadType::Worker) {
+ Printf(" (tid=%llu, %s) is a GCD worker thread\n", rt->os_id,
+ thread_status);
+ Printf("\n");
+ Printf("%s", d.Default());
+ return;
+ }
+ Printf(" (tid=%llu, %s) created by %s", rt->os_id, thread_status,
+ thread_name(thrbuf, rt->parent_tid));
+ if (rt->stack)
+ Printf(" at:");
+ Printf("\n");
+ Printf("%s", d.Default());
+ PrintStack(rt->stack);
+}
+
+static void PrintSleep(const ReportStack *s) {
+ Decorator d;
+ Printf("%s", d.Sleep());
+ Printf(" As if synchronized via sleep:\n");
+ Printf("%s", d.Default());
+ PrintStack(s);
+}
+
+static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
+ if (rep->mops.Size())
+ return rep->mops[0]->stack;
+ if (rep->stacks.Size())
+ return rep->stacks[0];
+ if (rep->mutexes.Size())
+ return rep->mutexes[0]->stack;
+ if (rep->threads.Size())
+ return rep->threads[0]->stack;
+ return 0;
+}
+
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ const char *module = frame->info.module;
+ if (file != 0 &&
+ (internal_strstr(file, "tsan_interceptors_posix.cpp") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_")))
+ return true;
+ if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_")))
+ return true;
+ return false;
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Decorator d;
+ Printf("==================\n");
+ const char *rep_typ_str = ReportTypeString(rep->typ, rep->tag);
+ Printf("%s", d.Warning());
+ Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
+ (int)internal_getpid());
+ Printf("%s", d.Default());
+
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.Default());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
+ }
+
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+
+ if (rep->sleep)
+ PrintSleep(rep->sleep);
+
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
+
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+
+ if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
+ Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
+
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
+ ReportErrorSummary(rep_typ_str, frame->info);
+ }
+
+ if (common_flags()->print_module_map == 2)
+ DumpProcessMap();
+
+ Printf("==================\n");
+}
+
+#else // #if !SANITIZER_GO
+
+const Tid kMainGoroutineId = 1;
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function,
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, info.module_offset);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Printf("\n");
+ Printf("%s at %p by ",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")),
+ reinterpret_cast<void *>(mop->addr));
+ if (mop->tid == kMainGoroutineId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ switch (loc->type) {
+ case ReportLocationHeap: {
+ Printf("\n");
+ Printf("Heap block of size %zu at %p allocated by ", loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start));
+ if (loc->tid == kMainGoroutineId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", loc->tid);
+ PrintStack(loc->stack);
+ break;
+ }
+ case ReportLocationGlobal: {
+ Printf("\n");
+ Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
+ loc->global.name, loc->global.size,
+ reinterpret_cast<void *>(loc->global.start), loc->global.file,
+ loc->global.line);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == kMainGoroutineId)
+ return;
+ Printf("\n");
+ Printf("Goroutine %d (%s) created at:\n",
+ rt->id, rt->running ? "running" : "finished");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Printf("==================\n");
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %llu while holding mutex %llu:\n", 999,
+ rep->mutexes[i]->id,
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %llu was previously locked here:\n",
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
+ Printf("==================\n");
+}
+
+#endif
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_report.h b/compiler-rt/lib/tsan/rtl-old/tsan_report.h
new file mode 100644
index 000000000000..d68c2db88828
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_report.h
@@ -0,0 +1,127 @@
+//===-- tsan_report.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_REPORT_H
+#define TSAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum ReportType {
+ ReportTypeRace,
+ ReportTypeVptrRace,
+ ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
+ ReportTypeExternalRace,
+ ReportTypeThreadLeak,
+ ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexInvalidAccess,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
+ ReportTypeSignalUnsafe,
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
+};
+
+struct ReportStack {
+ SymbolizedStack *frames = nullptr;
+ bool suppressable = false;
+};
+
+struct ReportMopMutex {
+ u64 id;
+ bool write;
+};
+
+struct ReportMop {
+ int tid;
+ uptr addr;
+ int size;
+ bool write;
+ bool atomic;
+ uptr external_tag;
+ Vector<ReportMopMutex> mset;
+ ReportStack *stack;
+
+ ReportMop();
+};
+
+enum ReportLocationType {
+ ReportLocationGlobal,
+ ReportLocationHeap,
+ ReportLocationStack,
+ ReportLocationTLS,
+ ReportLocationFD
+};
+
+struct ReportLocation {
+ ReportLocationType type = ReportLocationGlobal;
+ DataInfo global = {};
+ uptr heap_chunk_start = 0;
+ uptr heap_chunk_size = 0;
+ uptr external_tag = 0;
+ Tid tid = kInvalidTid;
+ int fd = 0;
+ bool suppressable = false;
+ ReportStack *stack = nullptr;
+};
+
+struct ReportThread {
+ Tid id;
+ tid_t os_id;
+ bool running;
+ ThreadType thread_type;
+ char *name;
+ Tid parent_tid;
+ ReportStack *stack;
+};
+
+struct ReportMutex {
+ u64 id;
+ uptr addr;
+ bool destroyed;
+ ReportStack *stack;
+};
+
+class ReportDesc {
+ public:
+ ReportType typ;
+ uptr tag;
+ Vector<ReportStack*> stacks;
+ Vector<ReportMop*> mops;
+ Vector<ReportLocation*> locs;
+ Vector<ReportMutex*> mutexes;
+ Vector<ReportThread*> threads;
+ Vector<Tid> unique_tids;
+ ReportStack *sleep;
+ int count;
+
+ ReportDesc();
+ ~ReportDesc();
+
+ private:
+ ReportDesc(const ReportDesc&);
+ void operator = (const ReportDesc&);
+};
+
+// Format and output the report to the console/log. No additional logic.
+void PrintReport(const ReportDesc *rep);
+void PrintStack(const ReportStack *stack);
+
+} // namespace __tsan
+
+#endif // TSAN_REPORT_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
new file mode 100644
index 000000000000..c14af9788e32
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
@@ -0,0 +1,811 @@
+//===-- tsan_rtl.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main file (entry points) for the TSan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_defs.h"
+#include "tsan_interface.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "ubsan/ubsan_init.h"
+
+volatile int __tsan_resumed = 0;
+
+extern "C" void __tsan_resume() {
+ __tsan_resumed = 1;
+}
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_test_only_on_fork() {}
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+void (*on_initialize)(void);
+int (*on_finalize)(int);
+#endif
+
+#if !SANITIZER_GO && !SANITIZER_MAC
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
+#endif
+static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
+Context *ctx;
+
+// Can be overriden by a front-end.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnFinalize(bool failed);
+void OnInitialize();
+#else
+#include <dlfcn.h>
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnFinalize(bool failed) {
+#if !SANITIZER_GO
+ if (on_finalize)
+ return on_finalize(failed);
+#endif
+ return failed;
+}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+void OnInitialize() {
+#if !SANITIZER_GO
+ if (on_initialize)
+ on_initialize();
+#endif
+}
+#endif
+
+static ThreadContextBase *CreateThreadContext(Tid tid) {
+ // Map thread trace when context is created.
+ char name[50];
+ internal_snprintf(name, sizeof(name), "trace %u", tid);
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
+ const uptr hdr = GetThreadTraceHeader(tid);
+ internal_snprintf(name, sizeof(name), "trace header %u", tid);
+ MapThreadTrace(hdr, sizeof(Trace), name);
+ new((void*)hdr) Trace();
+ // We are going to use only a small part of the trace with the default
+ // value of history_size. However, the constructor writes to the whole trace.
+ // Release the unused part.
+ uptr hdr_end = hdr + sizeof(Trace);
+ hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
+ hdr_end = RoundUp(hdr_end, GetPageSizeCached());
+ if (hdr_end < hdr + sizeof(Trace)) {
+ ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
+ uptr unused = hdr + sizeof(Trace) - hdr_end;
+ if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
+ Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
+ unused);
+ CHECK("unable to mprotect" && 0);
+ }
+ }
+ return New<ThreadContext>(tid);
+}
+
+#if !SANITIZER_GO
+static const u32 kThreadQuarantineSize = 16;
+#else
+static const u32 kThreadQuarantineSize = 64;
+#endif
+
+Context::Context()
+ : initialized(),
+ report_mtx(MutexTypeReport),
+ nreported(),
+ thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
+ kMaxTidReuse),
+ racy_mtx(MutexTypeRacy),
+ racy_stacks(),
+ racy_addresses(),
+ fired_suppressions_mtx(MutexTypeFired),
+ clock_alloc(LINKER_INITIALIZED, "clock allocator") {
+ fired_suppressions.reserve(8);
+}
+
+// The objects are allocated in TLS, so one may rely on zero-initialization.
+ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , ignore_interceptors()
+ ,
+ clock(tid, reuse_count)
+#if !SANITIZER_GO
+ ,
+ jmp_bufs()
+#endif
+ ,
+ tid(tid),
+ unique_id(unique_id),
+ stk_addr(stk_addr),
+ stk_size(stk_size),
+ tls_addr(tls_addr),
+ tls_size(tls_size)
+#if !SANITIZER_GO
+ ,
+ last_sleep_clock(tid)
+#endif
+{
+ CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
+#if !SANITIZER_GO
+ // C/C++ uses fixed size shadow stack.
+ const int kInitStackSize = kShadowStackSize;
+ shadow_stack = static_cast<uptr *>(
+ MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
+ SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
+ kInitStackSize * sizeof(uptr));
+#else
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ const int kInitStackSize = 8;
+ shadow_stack = static_cast<uptr *>(Alloc(kInitStackSize * sizeof(uptr)));
+#endif
+ shadow_stack_pos = shadow_stack;
+ shadow_stack_end = shadow_stack + kInitStackSize;
+}
+
+#if !SANITIZER_GO
+void MemoryProfiler(u64 uptime) {
+ if (ctx->memprof_fd == kInvalidFd)
+ return;
+ InternalMmapVector<char> buf(4096);
+ WriteMemoryProfile(buf.data(), buf.size(), uptime);
+ WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
+}
+
+void InitializeMemoryProfiler() {
+ ctx->memprof_fd = kInvalidFd;
+ const char *fname = flags()->profile_memory;
+ if (!fname || !fname[0])
+ return;
+ if (internal_strcmp(fname, "stdout") == 0) {
+ ctx->memprof_fd = 1;
+ } else if (internal_strcmp(fname, "stderr") == 0) {
+ ctx->memprof_fd = 2;
+ } else {
+ InternalScopedString filename;
+ filename.append("%s.%d", fname, (int)internal_getpid());
+ ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
+ if (ctx->memprof_fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ filename.data());
+ return;
+ }
+ }
+ MemoryProfiler(0);
+ MaybeSpawnBackgroundThread();
+}
+
+static void *BackgroundThread(void *arg) {
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread_init()->ignore_interceptors++;
+ const u64 kMs2Ns = 1000 * 1000;
+ const u64 start = NanoTime();
+
+ u64 last_flush = NanoTime();
+ uptr last_rss = 0;
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
+ u64 now = NanoTime();
+
+ // Flush memory if requested.
+ if (flags()->flush_memory_ms > 0) {
+ if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ FlushShadowMemory();
+ last_flush = NanoTime();
+ }
+ }
+ if (flags()->memory_limit_mb > 0) {
+ uptr rss = GetRSS();
+ uptr limit = uptr(flags()->memory_limit_mb) << 20;
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
+ if (2 * rss > limit + last_rss) {
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ FlushShadowMemory();
+ rss = GetRSS();
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ }
+ last_rss = rss;
+ }
+
+ MemoryProfiler(now - start);
+
+ // Flush symbolizer cache if requested.
+ if (flags()->flush_symbolizer_ms > 0) {
+ u64 last = atomic_load(&ctx->last_symbolize_time_ns,
+ memory_order_relaxed);
+ if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
+ Lock l(&ctx->report_mtx);
+ ScopedErrorReportLock l2;
+ SymbolizeFlush();
+ atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
+ }
+ }
+ }
+ return nullptr;
+}
+
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef __mips__
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+#endif
+
+void DontNeedShadowFor(uptr addr, uptr size) {
+ ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
+ reinterpret_cast<uptr>(MemToShadow(addr + size)));
+}
+
+#if !SANITIZER_GO
+// We call UnmapShadow before the actual munmap, at that point we don't yet
+// know if the provided address/size are sane. We can't call UnmapShadow
+// after the actual munmap becuase at that point the memory range can
+// already be reused for something else, so we can't rely on the munmap
+// return value to understand is the values are sane.
+// While calling munmap with insane values (non-canonical address, negative
+// size, etc) is an error, the kernel won't crash. We must also try to not
+// crash as the failure mode is very confusing (paging fault inside of the
+// runtime on some derived shadow address).
+static bool IsValidMmapRange(uptr addr, uptr size) {
+ if (size == 0)
+ return true;
+ if (static_cast<sptr>(size) < 0)
+ return false;
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return false;
+ // Check that if the start of the region belongs to one of app ranges,
+ // end of the region belongs to the same region.
+ const uptr ranges[][2] = {
+ {LoAppMemBeg(), LoAppMemEnd()},
+ {MidAppMemBeg(), MidAppMemEnd()},
+ {HiAppMemBeg(), HiAppMemEnd()},
+ };
+ for (auto range : ranges) {
+ if (addr >= range[0] && addr < range[1])
+ return addr + size <= range[1];
+ }
+ return false;
+}
+
+void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
+ if (size == 0 || !IsValidMmapRange(addr, size))
+ return;
+ DontNeedShadowFor(addr, size);
+ ScopedGlobalProcessor sgp;
+ ctx->metamap.ResetRange(thr->proc(), addr, size);
+}
+#endif
+
+void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
+ Die();
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
+ Die();
+ } else {
+ // Mapping continuous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
+ Die();
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
+ addr + size, meta_begin, meta_end);
+}
+
+void MapThreadTrace(uptr addr, uptr size, const char *name) {
+ DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
+ CHECK_GE(addr, TraceMemBeg());
+ CHECK_LE(addr + size, TraceMemEnd());
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ if (!MmapFixedSuperNoReserve(addr, size, name)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
+ addr, size);
+ Die();
+ }
+}
+
+#if !SANITIZER_GO
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+#endif
+
+void CheckUnwind() {
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+
+bool is_initialized;
+
+void Initialize(ThreadState *thr) {
+ // Thread safe because done before all threads exist.
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
+ SanitizerToolName = "ThreadSanitizer";
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckUnwindCallback(CheckUnwind);
+
+ ctx = new(ctx_placeholder) Context;
+ const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
+ const char *options = GetEnv(env_name);
+ CacheBinaryName();
+ CheckASLR();
+ InitializeFlags(&ctx->flags, options, env_name);
+ AvoidCVE_2016_2143();
+ __sanitizer::InitializePlatformEarly();
+ __tsan::InitializePlatformEarly();
+
+#if !SANITIZER_GO
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ InitializeInterceptors();
+ InitializePlatform();
+ InitializeDynamicAnnotations();
+#if !SANITIZER_GO
+ InitializeShadowMemory();
+ InitializeAllocatorLate();
+ InstallDeadlySignalHandlers(TsanOnDeadlySignal);
+#endif
+ // Setup correct file descriptor for error reports.
+ __sanitizer_set_report_path(common_flags()->log_path);
+ InitializeSuppressions();
+#if !SANITIZER_GO
+ InitializeLibIgnore();
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+#endif
+
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
+
+ // Initialize thread 0.
+ Tid tid = ThreadCreate(thr, 0, 0, true);
+ CHECK_EQ(tid, kMainTid);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+ ctx->initialized = true;
+
+#if !SANITIZER_GO
+ Symbolizer::LateInitialize();
+ InitializeMemoryProfiler();
+#endif
+
+ if (flags()->stop_on_start) {
+ Printf("ThreadSanitizer is suspended at startup (pid %d)."
+ " Call __tsan_resume().\n",
+ (int)internal_getpid());
+ while (__tsan_resumed == 0) {}
+ }
+
+ OnInitialize();
+}
+
+void MaybeSpawnBackgroundThread() {
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#if !SANITIZER_GO && !defined(__mips__)
+ static atomic_uint32_t bg_thread = {};
+ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
+ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
+ }
+#endif
+}
+
+
+int Finalize(ThreadState *thr) {
+ bool failed = false;
+
+ if (common_flags()->print_module_map == 1)
+ DumpProcessMap();
+
+ if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
+ SleepForMillis(flags()->atexit_sleep_ms);
+
+ // Wait for pending reports.
+ ctx->report_mtx.Lock();
+ { ScopedErrorReportLock l; }
+ ctx->report_mtx.Unlock();
+
+#if !SANITIZER_GO
+ if (Verbosity()) AllocatorPrintStats();
+#endif
+
+ ThreadFinalize(thr);
+
+ if (ctx->nreported) {
+ failed = true;
+#if !SANITIZER_GO
+ Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
+#else
+ Printf("Found %d data race(s)\n", ctx->nreported);
+#endif
+ }
+
+ if (common_flags()->print_suppressions)
+ PrintMatchedSuppressions();
+
+ failed = OnFinalize(failed);
+
+ return failed ? common_flags()->exitcode : 0;
+}
+
+#if !SANITIZER_GO
+void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+ ctx->thread_registry.Lock();
+ ctx->report_mtx.Lock();
+ ScopedErrorReportLock::Lock();
+ AllocatorLock();
+ // Suppress all reports in the pthread_atfork callbacks.
+ // Reports will deadlock on the report_mtx.
+ // We could ignore sync operations as well,
+ // but so far it's unclear if it will do more good or harm.
+ // Unnecessarily ignoring things can lead to false positives later.
+ thr->suppress_reports++;
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ // On OS X libSystem_atfork_prepare/parent/child callbacks are called
+ // after/before our callbacks and they call free.
+ thr->ignore_interceptors++;
+ // Disables memory write in OnUserAlloc/Free.
+ thr->ignore_reads_and_writes++;
+
+ __tsan_test_only_on_fork();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
+ ScopedErrorReportLock::Unlock();
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry.Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc,
+ bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
+ ScopedErrorReportLock::Unlock();
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry.Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ if (start_thread)
+ StartBackgroundThread();
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#if SANITIZER_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ Free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
+StackID CurrentStackId(ThreadState *thr, uptr pc) {
+ if (!thr->is_inited) // May happen during bootstrap.
+ return kInvalidStackID;
+ if (pc != 0) {
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+ }
+ StackID id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
+ thr->shadow_stack_pos--;
+ return id;
+}
+
+namespace v3 {
+
+NOINLINE
+void TraceSwitchPart(ThreadState *thr) {
+ Trace *trace = &thr->tctx->trace;
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+ DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
+ auto *part = trace->parts.Back();
+ DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
+ if (part) {
+ // We can get here when we still have space in the current trace part.
+ // The fast-path check in TraceAcquire has false positives in the middle of
+ // the part. Check if we are indeed at the end of the current part or not,
+ // and fill any gaps with NopEvent's.
+ Event *end = &part->events[TracePart::kSize];
+ DCHECK_GE(pos, &part->events[0]);
+ DCHECK_LE(pos, end);
+ if (pos + 1 < end) {
+ if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
+ TracePart::kAlignment)
+ *pos++ = NopEvent;
+ *pos++ = NopEvent;
+ DCHECK_LE(pos + 2, end);
+ atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
+ // Ensure we setup trace so that the next TraceAcquire
+ // won't detect trace part end.
+ Event *ev;
+ CHECK(TraceAcquire(thr, &ev));
+ return;
+ }
+ // We are indeed at the end.
+ for (; pos < end; pos++) *pos = NopEvent;
+ }
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ // We just need to survive till exec.
+ CHECK(part);
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
+ return;
+ }
+#endif
+ part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
+ part->trace = trace;
+ thr->trace_prev_pc = 0;
+ {
+ Lock lock(&trace->mtx);
+ trace->parts.PushBack(part);
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
+ }
+ // Make this part self-sufficient by restoring the current stack
+ // and mutex set in the beginning of the trace.
+ TraceTime(thr);
+ for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
+ CHECK(TryTraceFunc(thr, *pos));
+ for (uptr i = 0; i < thr->mset.Size(); i++) {
+ MutexSet::Desc d = thr->mset.Get(i);
+ TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
+ d.addr, d.stack_id);
+ }
+}
+
+} // namespace v3
+
+void TraceSwitch(ThreadState *thr) {
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork)
+ return;
+#endif
+ thr->nomalloc++;
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ Lock l(&thr_trace->mtx);
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
+ TraceHeader *hdr = &thr_trace->headers[trace];
+ hdr->epoch0 = thr->fast_state.epoch();
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
+ hdr->mset0 = thr->mset;
+ thr->nomalloc--;
+}
+
+Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
+
+uptr TraceTopPC(ThreadState *thr) {
+ Event *events = (Event*)GetThreadTrace(thr->tid);
+ uptr pc = events[thr->fast_state.GetTracePos()];
+ return pc;
+}
+
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
+uptr TraceParts() {
+ return TraceSize() / kTracePartSize;
+}
+
+#if !SANITIZER_GO
+extern "C" void __tsan_trace_switch() {
+ TraceSwitch(cur_thread());
+}
+
+extern "C" void __tsan_report_race() {
+ ReportRace(cur_thread());
+}
+#endif
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
+ thr->ignore_reads_and_writes++;
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->fast_state.SetIgnoreBit();
+#if !SANITIZER_GO
+ if (pc && !ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreEnd(ThreadState *thr) {
+ DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->ignore_reads_and_writes--;
+ if (thr->ignore_reads_and_writes == 0) {
+ thr->fast_state.ClearIgnoreBit();
+#if !SANITIZER_GO
+ thr->mop_ignore_set.Reset();
+#endif
+ }
+}
+
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
+ thr->ignore_sync++;
+ CHECK_GT(thr->ignore_sync, 0);
+#if !SANITIZER_GO
+ if (pc && !ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreSyncEnd(ThreadState *thr) {
+ DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_sync, 0);
+ thr->ignore_sync--;
+#if !SANITIZER_GO
+ if (thr->ignore_sync == 0)
+ thr->sync_ignore_set.Reset();
+#endif
+}
+
+bool MD5Hash::operator==(const MD5Hash &other) const {
+ return hash[0] == other.hash[0] && hash[1] == other.hash[1];
+}
+
+#if SANITIZER_DEBUG
+void build_consistency_debug() {}
+#else
+void build_consistency_release() {}
+#endif
+
+} // namespace __tsan
+
+#if SANITIZER_CHECK_DEADLOCKS
+namespace __sanitizer {
+using namespace __tsan;
+MutexMeta mutex_meta[] = {
+ {MutexInvalid, "Invalid", {}},
+ {MutexThreadRegistry, "ThreadRegistry", {}},
+ {MutexTypeTrace, "Trace", {}},
+ {MutexTypeReport,
+ "Report",
+ {MutexTypeSyncVar, MutexTypeGlobalProc, MutexTypeTrace}},
+ {MutexTypeSyncVar, "SyncVar", {MutexTypeTrace}},
+ {MutexTypeAnnotations, "Annotations", {}},
+ {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+ {MutexTypeFired, "Fired", {MutexLeaf}},
+ {MutexTypeRacy, "Racy", {MutexLeaf}},
+ {MutexTypeGlobalProc, "GlobalProc", {}},
+ {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
+ {},
+};
+
+void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
+} // namespace __sanitizer
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.h b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.h
new file mode 100644
index 000000000000..c71b27e1cbf5
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.h
@@ -0,0 +1,796 @@
+//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main internal TSan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __tsan, except for those
+// declared in tsan_interface.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headres included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_RTL_H
+#define TSAN_RTL_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_asm.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common/sanitizer_libignore.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_flags.h"
+#include "tsan_ignoreset.h"
+#include "tsan_mman.h"
+#include "tsan_mutexset.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_shadow.h"
+#include "tsan_stack_trace.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+
+#if SANITIZER_WORDSIZE != 64
+# error "ThreadSanitizer is supported only on 64-bit platforms"
+#endif
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+struct MapUnmapCallback;
+#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#else
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+# if defined(__s390x__)
+ typedef MappingS390x Mapping;
+# else
+ typedef Mapping48AddressSpace Mapping;
+# endif
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+Allocator *allocator();
+#endif
+
+struct ThreadSignalContext;
+
+struct JmpBuf {
+ uptr sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
+ uptr *shadow_stack_pos;
+};
+
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#if !SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
+#if !SANITIZER_GO
+// ScopedGlobalProcessor temporary setups a global processor for the current
+// thread, if it does not have one. Intended for interceptors that can run
+// at the very thread end, when we already destroyed the thread processor.
+struct ScopedGlobalProcessor {
+ ScopedGlobalProcessor();
+ ~ScopedGlobalProcessor();
+};
+#endif
+
+// This struct is stored in TLS.
+struct ThreadState {
+ FastState fast_state;
+ // Synch epoch represents the threads's epoch before the last synchronization
+ // action. It allows to reduce number of shadow state updates.
+ // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
+ // if we are processing write to X from the same thread at epoch=200,
+ // we do nothing, because both writes happen in the same 'synch epoch'.
+ // That is, if another memory access does not race with the former write,
+ // it does not race with the latter as well.
+ // QUESTION: can we can squeeze this into ThreadState::Fast?
+ // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
+ // taken by epoch between synchs.
+ // This way we can save one load from tls.
+ u64 fast_synch_epoch;
+ // Technically `current` should be a separate THREADLOCAL variable;
+ // but it is placed here in order to share cache line with previous fields.
+ ThreadState* current;
+ // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
+ // We do not distinguish beteween ignoring reads and writes
+ // for better performance.
+ int ignore_reads_and_writes;
+ atomic_sint32_t pending_signals;
+ int ignore_sync;
+ int suppress_reports;
+ // Go does not support ignores.
+#if !SANITIZER_GO
+ IgnoreSet mop_ignore_set;
+ IgnoreSet sync_ignore_set;
+#endif
+ uptr *shadow_stack;
+ uptr *shadow_stack_end;
+ uptr *shadow_stack_pos;
+ RawShadow *racy_shadow_addr;
+ RawShadow racy_state[2];
+ MutexSet mset;
+ ThreadClock clock;
+#if !SANITIZER_GO
+ Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
+#endif
+ const Tid tid;
+ const int unique_id;
+ bool in_symbolizer;
+ bool in_ignored_lib;
+ bool is_inited;
+ bool is_dead;
+ bool is_freeing;
+ bool is_vptr_access;
+ const uptr stk_addr;
+ const uptr stk_size;
+ const uptr tls_addr;
+ const uptr tls_size;
+ ThreadContext *tctx;
+
+ DDLogicalThread *dd_lt;
+
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc1;
+#if !SANITIZER_GO
+ Processor *proc() { return proc1; }
+#else
+ Processor *proc();
+#endif
+
+ atomic_uintptr_t in_signal_handler;
+ ThreadSignalContext *signal_ctx;
+
+#if !SANITIZER_GO
+ StackID last_sleep_stack_id;
+ ThreadClock last_sleep_clock;
+#endif
+
+ // Set in regions of runtime that must be signal-safe and fork-safe.
+ // If set, malloc must not be called.
+ int nomalloc;
+
+ const ReportDesc *current_report;
+
+ // Current position in tctx->trace.Back()->events (Event*).
+ atomic_uintptr_t trace_pos;
+ // PC of the last memory access, used to compute PC deltas in the trace.
+ uptr trace_prev_pc;
+ Sid sid;
+ Epoch epoch;
+
+ explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size);
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
+
+#if !SANITIZER_GO
+#if SANITIZER_MAC || SANITIZER_ANDROID
+ThreadState *cur_thread();
+void set_cur_thread(ThreadState *thr);
+void cur_thread_finalize();
+inline ThreadState *cur_thread_init() { return cur_thread(); }
+# else
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL char cur_thread_placeholder[];
+inline ThreadState *cur_thread() {
+ return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
+}
+inline ThreadState *cur_thread_init() {
+ ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
+ if (UNLIKELY(!thr->current))
+ thr->current = thr;
+ return thr->current;
+}
+inline void set_cur_thread(ThreadState *thr) {
+ reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
+}
+inline void cur_thread_finalize() { }
+# endif // SANITIZER_MAC || SANITIZER_ANDROID
+#endif // SANITIZER_GO
+
+class ThreadContext final : public ThreadContextBase {
+ public:
+ explicit ThreadContext(Tid tid);
+ ~ThreadContext();
+ ThreadState *thr;
+ StackID creation_stack_id;
+ SyncClock sync;
+ // Epoch at which the thread had started.
+ // If we see an event from the thread stamped by an older epoch,
+ // the event is from a dead thread that shared tid with this thread.
+ u64 epoch0;
+ u64 epoch1;
+
+ v3::Trace trace;
+
+ // Override superclass callbacks.
+ void OnDead() override;
+ void OnJoined(void *arg) override;
+ void OnFinished() override;
+ void OnStarted(void *arg) override;
+ void OnCreated(void *arg) override;
+ void OnReset() override;
+ void OnDetached(void *arg) override;
+};
+
+struct RacyStacks {
+ MD5Hash hash[2];
+ bool operator==(const RacyStacks &other) const;
+};
+
+struct RacyAddress {
+ uptr addr_min;
+ uptr addr_max;
+};
+
+struct FiredSuppression {
+ ReportType type;
+ uptr pc_or_addr;
+ Suppression *supp;
+};
+
+struct Context {
+ Context();
+
+ bool initialized;
+#if !SANITIZER_GO
+ bool after_multithreaded_fork;
+#endif
+
+ MetaMap metamap;
+
+ Mutex report_mtx;
+ int nreported;
+ atomic_uint64_t last_symbolize_time_ns;
+
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
+ ThreadRegistry thread_registry;
+
+ Mutex racy_mtx;
+ Vector<RacyStacks> racy_stacks;
+ Vector<RacyAddress> racy_addresses;
+ // Number of fired suppressions may be large enough.
+ Mutex fired_suppressions_mtx;
+ InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
+
+ Flags flags;
+ fd_t memprof_fd;
+
+ Mutex slot_mtx;
+};
+
+extern Context *ctx; // The one and the only global runtime context.
+
+ALWAYS_INLINE Flags *flags() {
+ return &ctx->flags;
+}
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
+};
+
+const char *GetObjectTypeFromTag(uptr tag);
+const char *GetReportHeaderFromTag(uptr tag);
+uptr TagFromShadowStackFrame(uptr pc);
+
+class ScopedReportBase {
+ public:
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
+ const MutexSet *mset);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(Tid unique_tid, bool suppressable = false);
+ void AddUniqueTid(Tid unique_tid);
+ void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
+ void AddLocation(uptr addr, uptr size);
+ void AddSleep(StackID stack_id);
+ void SetCount(int count);
+
+ const ReportDesc *GetReport() const;
+
+ protected:
+ ScopedReportBase(ReportType typ, uptr tag);
+ ~ScopedReportBase();
+
+ private:
+ ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
+
+ void AddDeadMutex(u64 id);
+
+ ScopedReportBase(const ScopedReportBase &) = delete;
+ void operator=(const ScopedReportBase &) = delete;
+};
+
+class ScopedReport : public ScopedReportBase {
+ public:
+ explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
+ ~ScopedReport();
+
+ private:
+ ScopedErrorReportLock lock_;
+};
+
+bool ShouldReport(ThreadState *thr, ReportType typ);
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
+void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag = nullptr);
+
+// The stack could look like:
+// <start> | <main> | <foo> | tag | <bar>
+// This will extract the tag and keep:
+// <start> | <main> | <foo> | <bar>
+template<typename StackTraceTy>
+void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
+ if (stack->size < 2) return;
+ uptr possible_tag_pc = stack->trace[stack->size - 2];
+ uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
+ if (possible_tag == kExternalTagNone) return;
+ stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
+ stack->size -= 1;
+ if (tag) *tag = possible_tag;
+}
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
+ uptr *tag = nullptr) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+ ExtractTagFromStack(stack, tag);
+}
+
+#define GET_STACK_TRACE_FATAL(thr, pc) \
+ VarSizeStackTrace stack; \
+ ObtainCurrentStack(thr, pc, &stack); \
+ stack.ReverseOrder();
+
+void MapShadow(uptr addr, uptr size);
+void MapThreadTrace(uptr addr, uptr size, const char *name);
+void DontNeedShadowFor(uptr addr, uptr size);
+void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
+void InitializeShadowMemory();
+void InitializeInterceptors();
+void InitializeLibIgnore();
+void InitializeDynamicAnnotations();
+
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
+
+void ReportRace(ThreadState *thr);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
+bool IsExpectedReport(uptr addr, uptr size);
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
+# define DPrintf Printf
+#else
+# define DPrintf(...)
+#endif
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
+# define DPrintf2 Printf
+#else
+# define DPrintf2(...)
+#endif
+
+StackID CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(StackID stack_id);
+void PrintCurrentStack(ThreadState *thr, uptr pc);
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
+MBlock *JavaHeapBlock(uptr addr, uptr *start);
+
+void Initialize(ThreadState *thr);
+void MaybeSpawnBackgroundThread();
+int Finalize(ThreadState *thr);
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
+
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur);
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+ALWAYS_INLINE
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ int size_log;
+ switch (size) {
+ case 1:
+ size_log = kSizeLog1;
+ break;
+ case 2:
+ size_log = kSizeLog2;
+ break;
+ case 4:
+ size_log = kSizeLog4;
+ break;
+ default:
+ DCHECK_EQ(size, 8);
+ size_log = kSizeLog8;
+ break;
+ }
+ bool is_write = !(typ & kAccessRead);
+ bool is_atomic = typ & kAccessAtomic;
+ if (typ & kAccessVptr)
+ thr->is_vptr_access = true;
+ if (typ & kAccessFree)
+ thr->is_freeing = true;
+ MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
+ if (typ & kAccessVptr)
+ thr->is_vptr_access = false;
+ if (typ & kAccessFree)
+ thr->is_freeing = false;
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size);
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr);
+
+void FuncEntry(ThreadState *thr, uptr pc);
+void FuncExit(ThreadState *thr);
+
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
+ ThreadType thread_type);
+void ThreadFinish(ThreadState *thr);
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
+void ThreadFinalize(ThreadState *thr);
+void ThreadSetName(ThreadState *thr, const char *name);
+int ThreadCount(ThreadState *thr);
+void ProcessPendingSignalsImpl(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
+
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
+ int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr);
+// AcquireGlobal synchronizes the current thread with all other threads.
+// In terms of happens-before relation, it draws a HB edge from all threads
+// (where they happen to execute right now) to the current thread. We use it to
+// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
+// right before executing finalizers. This provides a coarse, but simple
+// approximation of the actual required synchronization.
+void AcquireGlobal(ThreadState *thr);
+void Release(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
+void AfterSleep(ThreadState *thr, uptr pc);
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+
+// The hacky call uses custom calling convention and an assembly thunk.
+// It is considerably faster that a normal call for the caller
+// if it is not executed (it is intended for slow paths from hot functions).
+// The trick is that the call preserves all registers and the compiler
+// does not treat it as a call.
+// If it does not work for you, use normal call.
+#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
+// The caller may not create the stack frame for itself at all,
+// so we create a reserve stack frame for it (1024b must be enough).
+#define HACKY_CALL(f) \
+ __asm__ __volatile__("sub $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(1024) \
+ ".hidden " #f "_thunk;" \
+ "call " #f "_thunk;" \
+ "add $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(-1024) \
+ ::: "memory", "cc");
+#else
+#define HACKY_CALL(f) f()
+#endif
+
+void TraceSwitch(ThreadState *thr);
+uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
+uptr TraceParts();
+Trace *ThreadTrace(Tid tid);
+
+extern "C" void __tsan_trace_switch();
+void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
+ EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
+ // TraceSwitch accesses shadow_stack, but it's called infrequently,
+ // so we check it here proactively.
+ DCHECK(thr->shadow_stack);
+ DCHECK_GE((int)typ, 0);
+ DCHECK_LE((int)typ, 7);
+ DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
+ u64 pos = fs.GetTracePos();
+ if (UNLIKELY((pos % kTracePartSize) == 0)) {
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_trace_switch);
+#else
+ TraceSwitch(thr);
+#endif
+ }
+ Event *trace = (Event*)GetThreadTrace(fs.tid());
+ Event *evp = &trace[pos];
+ Event ev = (u64)addr | ((u64)typ << kEventPCBits);
+ *evp = ev;
+}
+
+#if !SANITIZER_GO
+uptr ALWAYS_INLINE HeapEnd() {
+ return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
+}
+#endif
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
+void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
+
+// These need to match __tsan_switch_to_fiber_* flags defined in
+// tsan_interface.h. See documentation there as well.
+enum FiberSwitchFlags {
+ FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
+};
+
+ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
+ if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
+ ProcessPendingSignalsImpl(thr);
+}
+
+extern bool is_initialized;
+
+ALWAYS_INLINE
+void LazyInitialize(ThreadState *thr) {
+ // If we can use .preinit_array, assume that __tsan_init
+ // called from .preinit_array initializes runtime before
+ // any instrumented code.
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+ if (UNLIKELY(!is_initialized))
+ Initialize(thr);
+#endif
+}
+
+namespace v3 {
+
+void TraceSwitchPart(ThreadState *thr);
+bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
+ uptr size, AccessType typ, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag);
+
+template <typename EventT>
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
+ EventT **ev) {
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+#if SANITIZER_DEBUG
+ // TraceSwitch acquires these mutexes,
+ // so we lock them here to detect deadlocks more reliably.
+ { Lock lock(&ctx->slot_mtx); }
+ { Lock lock(&thr->tctx->trace.mtx); }
+ TracePart *current = thr->tctx->trace.parts.Back();
+ if (current) {
+ DCHECK_GE(pos, &current->events[0]);
+ DCHECK_LE(pos, &current->events[TracePart::kSize]);
+ } else {
+ DCHECK_EQ(pos, nullptr);
+ }
+#endif
+ // TracePart is allocated with mmap and is at least 4K aligned.
+ // So the following check is a faster way to check for part end.
+ // It may have false positives in the middle of the trace,
+ // they are filtered out in TraceSwitch.
+ if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
+ return false;
+ *ev = reinterpret_cast<EventT *>(pos);
+ return true;
+}
+
+template <typename EventT>
+ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
+ DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
+ atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
+}
+
+template <typename EventT>
+void TraceEvent(ThreadState *thr, EventT ev) {
+ EventT *evp;
+ if (!TraceAcquire(thr, &evp)) {
+ TraceSwitchPart(thr);
+ UNUSED bool res = TraceAcquire(thr, &evp);
+ DCHECK(res);
+ }
+ *evp = ev;
+ TraceRelease(thr, evp);
+}
+
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
+ uptr pc = 0) {
+ if (!kCollectHistory)
+ return true;
+ EventFunc *ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ ev->is_access = 0;
+ ev->is_func = 1;
+ ev->pc = pc;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceFunc(ThreadState *thr, uptr pc = 0);
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+ StackID stk);
+void TraceMutexUnlock(ThreadState *thr, uptr addr);
+void TraceTime(ThreadState *thr);
+
+} // namespace v3
+
+void GrowShadowStack(ThreadState *thr);
+
+ALWAYS_INLINE
+void FuncEntry(ThreadState *thr, uptr pc) {
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void *)pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
+
+ // Shadow stack maintenance can be replaced with
+ // stack unwinding during trace switch (which presumably must be faster).
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE
+void FuncExit(ThreadState *thr) {
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
+
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+#if !SANITIZER_GO
+extern void (*on_initialize)(void);
+extern int (*on_finalize)(int);
+#endif
+
+} // namespace __tsan
+
+#endif // TSAN_RTL_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_aarch64.S b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_aarch64.S
new file mode 100644
index 000000000000..e0b4c71dfed9
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_aarch64.S
@@ -0,0 +1,245 @@
+// The content of this file is AArch64-only:
+#if defined(__aarch64__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__APPLE__)
+.align 2
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _setjmp$non_lazy_ptr
+_setjmp$non_lazy_ptr:
+.indirect_symbol _setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long __setjmp$non_lazy_ptr
+__setjmp$non_lazy_ptr:
+.indirect_symbol __setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _sigsetjmp$non_lazy_ptr
+_sigsetjmp$non_lazy_ptr:
+.indirect_symbol _sigsetjmp
+.long 0
+#endif
+
+#if !defined(__APPLE__)
+.section .text
+#else
+.section __TEXT,__text
+.align 3
+#endif
+
+ASM_HIDDEN(__tsan_setjmp)
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env parameter
+ str x0, [sp, 16]
+ CFI_OFFSET (0, -16)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ldr x0, [sp, 16]
+ CFI_RESTORE (0)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+#if !defined(__APPLE__)
+ adrp x1, :got:_ZN14__interception11real_setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
+ ldr x1, [x1]
+#else
+ adrp x1, _setjmp$non_lazy_ptr@page
+ add x1, x1, _setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
+ br x1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env parameter
+ str x0, [sp, 16]
+ CFI_OFFSET (0, -16)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ldr x0, [sp, 16]
+ CFI_RESTORE (0)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+#if !defined(__APPLE__)
+ adrp x1, :got:_ZN14__interception12real__setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
+ ldr x1, [x1]
+#else
+ adrp x1, __setjmp$non_lazy_ptr@page
+ add x1, x1, __setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
+ br x1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env and savesigs parameter
+ stp x0, x1, [sp, 16]
+ CFI_OFFSET (0, -16)
+ CFI_OFFSET (1, -8)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env and savesigs parameter
+ ldp x0, x1, [sp, 16]
+ CFI_RESTORE (0)
+ CFI_RESTORE (1)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc sigsetjmp
+#if !defined(__APPLE__)
+ adrp x2, :got:_ZN14__interception14real_sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
+ ldr x2, [x2]
+#else
+ adrp x2, _sigsetjmp$non_lazy_ptr@page
+ add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
+ ldr x2, [x2]
+#endif
+ br x2
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+
+#if !defined(__APPLE__)
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env and savesigs parameter
+ stp x0, x1, [sp, 16]
+ CFI_OFFSET (0, -16)
+ CFI_OFFSET (1, -8)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env and savesigs parameter
+ ldp x0, x1, [sp, 16]
+ CFI_RESTORE (0)
+ CFI_RESTORE (1)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc __sigsetjmp
+#if !defined(__APPLE__)
+ adrp x2, :got:_ZN14__interception16real___sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
+ ldr x2, [x2]
+#else
+ adrp x2, ASM_SYMBOL(__sigsetjmp)@page
+ add x2, x2, ASM_SYMBOL(__sigsetjmp)@pageoff
+#endif
+ br x2
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_access.cpp
new file mode 100644
index 000000000000..7365fdaa3038
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_access.cpp
@@ -0,0 +1,604 @@
+//===-- tsan_rtl_access.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Definitions of memory access and function entry/exit entry points.
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+namespace v3 {
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
+ if (!kCollectHistory)
+ return true;
+ EventAccess *ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
+ uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
+ thr->trace_prev_pc = pc;
+ if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
+ ev->is_access = 1;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_atomic = !!(typ & kAccessAtomic);
+ ev->size_log = size_log;
+ ev->pc_delta = pc_delta;
+ DCHECK_EQ(ev->pc_delta, pc_delta);
+ ev->addr = CompressAddr(addr);
+ TraceRelease(thr, ev);
+ return true;
+ }
+ auto *evex = reinterpret_cast<EventAccessExt *>(ev);
+ evex->is_access = 0;
+ evex->is_func = 0;
+ evex->type = EventType::kAccessExt;
+ evex->is_read = !!(typ & kAccessRead);
+ evex->is_atomic = !!(typ & kAccessAtomic);
+ evex->size_log = size_log;
+ evex->addr = CompressAddr(addr);
+ evex->pc = pc;
+ TraceRelease(thr, evex);
+ return true;
+}
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ if (!kCollectHistory)
+ return true;
+ EventAccessRange *ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ thr->trace_prev_pc = pc;
+ ev->is_access = 0;
+ ev->is_func = 0;
+ ev->type = EventType::kAccessRange;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_free = !!(typ & kAccessFree);
+ ev->size_lo = size;
+ ev->pc = CompressAddr(pc);
+ ev->addr = CompressAddr(addr);
+ ev->size_hi = size >> EventAccessRange::kSizeLoBits;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
+ DCHECK(res);
+}
+
+void TraceFunc(ThreadState *thr, uptr pc) {
+ if (LIKELY(TryTraceFunc(thr, pc)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceFunc(thr, pc);
+ DCHECK(res);
+}
+
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+ StackID stk) {
+ DCHECK(type == EventType::kLock || type == EventType::kRLock);
+ if (!kCollectHistory)
+ return;
+ EventLock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = type;
+ ev.pc = CompressAddr(pc);
+ ev.stack_lo = stk;
+ ev.stack_hi = stk >> EventLock::kStackIDLoBits;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceMutexUnlock(ThreadState *thr, uptr addr) {
+ if (!kCollectHistory)
+ return;
+ EventUnlock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kUnlock;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceTime(ThreadState *thr) {
+ if (!kCollectHistory)
+ return;
+ EventTime ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kTime;
+ ev.sid = static_cast<u64>(thr->sid);
+ ev.epoch = static_cast<u64>(thr->epoch);
+ ev._ = 0;
+ TraceEvent(thr, ev);
+}
+
+} // namespace v3
+
+ALWAYS_INLINE
+Shadow LoadShadow(u64 *p) {
+ u64 raw = atomic_load((atomic_uint64_t *)p, memory_order_relaxed);
+ return Shadow(raw);
+}
+
+ALWAYS_INLINE
+void StoreShadow(u64 *sp, u64 s) {
+ atomic_store((atomic_uint64_t *)sp, s, memory_order_relaxed);
+}
+
+ALWAYS_INLINE
+void StoreIfNotYetStored(u64 *sp, u64 *s) {
+ StoreShadow(sp, *s);
+ *s = 0;
+}
+
+extern "C" void __tsan_report_race();
+
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) {
+ thr->racy_state[0] = cur.raw();
+ thr->racy_state[1] = old.raw();
+ thr->racy_shadow_addr = shadow_mem;
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_report_race);
+#else
+ ReportRace(thr);
+#endif
+}
+
+static inline bool HappensBefore(Shadow old, ThreadState *thr) {
+ return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
+}
+
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog,
+ bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem,
+ Shadow cur) {
+ // This potentially can live in an MMX/SSE scratch register.
+ // The required intrinsics are:
+ // __m128i _mm_move_epi64(__m128i*);
+ // _mm_storel_epi64(u64*, __m128i);
+ u64 store_word = cur.raw();
+ bool stored = false;
+
+ // scan all the shadow values and dispatch to 4 categories:
+ // same, replace, candidate and race (see comments below).
+ // we consider only 3 cases regarding access sizes:
+ // equal, intersect and not intersect. initially I considered
+ // larger and smaller as well, it allowed to replace some
+ // 'candidates' with 'same' or 'replace', but I think
+ // it's just not worth it (performance- and complexity-wise).
+
+ Shadow old(0);
+
+ // It release mode we manually unroll the loop,
+ // because empirically gcc generates better code this way.
+ // However, we can't afford unrolling in debug mode, because the function
+ // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
+ // threads, which is not enough for the unrolled loop.
+#if SANITIZER_DEBUG
+ for (int idx = 0; idx < 4; idx++) {
+# include "tsan_update_shadow_word.inc"
+ }
+#else
+ int idx = 0;
+# include "tsan_update_shadow_word.inc"
+ idx = 1;
+ if (stored) {
+# include "tsan_update_shadow_word.inc"
+ } else {
+# include "tsan_update_shadow_word.inc"
+ }
+ idx = 2;
+ if (stored) {
+# include "tsan_update_shadow_word.inc"
+ } else {
+# include "tsan_update_shadow_word.inc"
+ }
+ idx = 3;
+ if (stored) {
+# include "tsan_update_shadow_word.inc"
+ } else {
+# include "tsan_update_shadow_word.inc"
+ }
+#endif
+
+ // we did not find any races and had already stored
+ // the current access info, so we are done
+ if (LIKELY(stored))
+ return;
+ // choose a random candidate slot and replace it
+ StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
+ return;
+RACE:
+ HandleRace(thr, shadow_mem, cur, old);
+ return;
+}
+
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK(!(typ & kAccessAtomic));
+ const bool kAccessIsWrite = !(typ & kAccessRead);
+ const bool kIsAtomic = false;
+ while (size) {
+ int size1 = 1;
+ int kAccessSizeLog = kSizeLog1;
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
+ size1 = 8;
+ kAccessSizeLog = kSizeLog8;
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
+ size1 = 4;
+ kAccessSizeLog = kSizeLog4;
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
+ size1 = 2;
+ kAccessSizeLog = kSizeLog2;
+ }
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
+ addr += size1;
+ size -= size1;
+ }
+}
+
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if TSAN_VECTORIZE
+# define SHUF(v0, v1, i0, i1, i2, i3) \
+ _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(v0), \
+ _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i *)s);
+ const m128 shadow1 = _mm_load_si128((__m128i *)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1 << 15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if TSAN_VECTORIZE
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads. But it still can be useful if you modify
+ // ContainsSameAccessFast and want to ensure that it's not completely broken.
+ // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
+ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite,
+ bool kIsAtomic) {
+ RawShadow *shadow_mem = MemToShadow(addr);
+ DPrintf2(
+ "#%d: MemoryAccess: @%p %p size=%d"
+ " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
+ (int)thr->fast_state.tid(), (void *)pc, (void *)addr,
+ (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
+ (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2],
+ (uptr)shadow_mem[3]);
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsShadowMem(shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem(shadow_mem));
+ }
+#endif
+
+ if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit())) {
+ return;
+ }
+
+ Shadow cur(fast_state);
+ cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
+ cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
+
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
+ kAccessIsWrite))) {
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cpp
+ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog,
+ bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
+ kAccessIsWrite))) {
+ return;
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ u64 val) {
+ (void)thr;
+ (void)pc;
+ if (size == 0)
+ return;
+ // FIXME: fix me.
+ uptr offset = addr % kShadowCell;
+ if (offset) {
+ offset = kShadowCell - offset;
+ if (size <= offset)
+ return;
+ addr += offset;
+ size -= offset;
+ }
+ DCHECK_EQ(addr % 8, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
+ RawShadow *p = MemToShadow(addr);
+ CHECK(IsShadowMem(p));
+ CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
+ p[i++] = val;
+ for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0;
+ }
+ } else {
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ RawShadow *begin = MemToShadow(addr);
+ RawShadow *end = begin + size / kShadowCell * kShadowCnt;
+ RawShadow *p = begin;
+ // Set at least first kPageSize/2 to page boundary.
+ while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
+ }
+ // Reset middle part.
+ RawShadow *p1 = p;
+ p = RoundDown(end, kPageSize);
+ if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ Die();
+ // Set the ending.
+ while (p < end) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
+ }
+ }
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryRangeSet(thr, pc, addr, size, 0);
+}
+
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ // Processing more than 1k (4k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ if (size > 1024)
+ size = 1024;
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
+ MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.MarkAsFreed();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, addr, size);
+ else
+ MemoryResetRange(thr, pc, addr, size);
+}
+
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ bool is_write) {
+ if (size == 0)
+ return;
+
+ RawShadow *shadow_mem = MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid,
+ (void *)pc, (void *)addr, (int)size, is_write);
+
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem(shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem(shadow_mem));
+ }
+ if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1,
+ addr + size - 1);
+ DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1));
+ }
+#endif
+
+ if (*shadow_mem == kShadowRodata) {
+ DCHECK(!is_write);
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+
+ bool unaligned = (addr % kShadowCell) != 0;
+
+ // Handle unaligned beginning, if any.
+ for (; addr % kShadowCell && size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
+ cur);
+ }
+ if (unaligned)
+ shadow_mem += kShadowCnt;
+ // Handle middle part, if any.
+ for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
+ int const kAccessSizeLog = 3;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
+ cur);
+ shadow_mem += kShadowCnt;
+ }
+ // Handle ending, if any.
+ for (; size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
+ cur);
+ }
+}
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+// Must be included in this file to make sure everything is inlined.
+# include "tsan_interface.inc"
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_amd64.S b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_amd64.S
new file mode 100644
index 000000000000..632b19d18158
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_amd64.S
@@ -0,0 +1,446 @@
+// The content of this file is x86_64-only:
+#if defined(__x86_64__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if !defined(__APPLE__)
+.section .text
+#else
+.section __TEXT,__text
+#endif
+
+ASM_HIDDEN(__tsan_trace_switch)
+.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
+ASM_SYMBOL(__tsan_trace_switch_thunk):
+ CFI_STARTPROC
+ _CET_ENDBR
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # All XMM registers are caller-saved.
+ sub $0x100, %rsp
+ CFI_ADJUST_CFA_OFFSET(0x100)
+ vmovdqu %xmm0, 0x0(%rsp)
+ vmovdqu %xmm1, 0x10(%rsp)
+ vmovdqu %xmm2, 0x20(%rsp)
+ vmovdqu %xmm3, 0x30(%rsp)
+ vmovdqu %xmm4, 0x40(%rsp)
+ vmovdqu %xmm5, 0x50(%rsp)
+ vmovdqu %xmm6, 0x60(%rsp)
+ vmovdqu %xmm7, 0x70(%rsp)
+ vmovdqu %xmm8, 0x80(%rsp)
+ vmovdqu %xmm9, 0x90(%rsp)
+ vmovdqu %xmm10, 0xa0(%rsp)
+ vmovdqu %xmm11, 0xb0(%rsp)
+ vmovdqu %xmm12, 0xc0(%rsp)
+ vmovdqu %xmm13, 0xd0(%rsp)
+ vmovdqu %xmm14, 0xe0(%rsp)
+ vmovdqu %xmm15, 0xf0(%rsp)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call ASM_SYMBOL(__tsan_trace_switch)
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ vmovdqu 0x0(%rsp), %xmm0
+ vmovdqu 0x10(%rsp), %xmm1
+ vmovdqu 0x20(%rsp), %xmm2
+ vmovdqu 0x30(%rsp), %xmm3
+ vmovdqu 0x40(%rsp), %xmm4
+ vmovdqu 0x50(%rsp), %xmm5
+ vmovdqu 0x60(%rsp), %xmm6
+ vmovdqu 0x70(%rsp), %xmm7
+ vmovdqu 0x80(%rsp), %xmm8
+ vmovdqu 0x90(%rsp), %xmm9
+ vmovdqu 0xa0(%rsp), %xmm10
+ vmovdqu 0xb0(%rsp), %xmm11
+ vmovdqu 0xc0(%rsp), %xmm12
+ vmovdqu 0xd0(%rsp), %xmm13
+ vmovdqu 0xe0(%rsp), %xmm14
+ vmovdqu 0xf0(%rsp), %xmm15
+ add $0x100, %rsp
+ CFI_ADJUST_CFA_OFFSET(-0x100)
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+ASM_HIDDEN(__tsan_report_race)
+.globl ASM_SYMBOL(__tsan_report_race_thunk)
+ASM_SYMBOL(__tsan_report_race_thunk):
+ CFI_STARTPROC
+ _CET_ENDBR
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # All XMM registers are caller-saved.
+ sub $0x100, %rsp
+ CFI_ADJUST_CFA_OFFSET(0x100)
+ vmovdqu %xmm0, 0x0(%rsp)
+ vmovdqu %xmm1, 0x10(%rsp)
+ vmovdqu %xmm2, 0x20(%rsp)
+ vmovdqu %xmm3, 0x30(%rsp)
+ vmovdqu %xmm4, 0x40(%rsp)
+ vmovdqu %xmm5, 0x50(%rsp)
+ vmovdqu %xmm6, 0x60(%rsp)
+ vmovdqu %xmm7, 0x70(%rsp)
+ vmovdqu %xmm8, 0x80(%rsp)
+ vmovdqu %xmm9, 0x90(%rsp)
+ vmovdqu %xmm10, 0xa0(%rsp)
+ vmovdqu %xmm11, 0xb0(%rsp)
+ vmovdqu %xmm12, 0xc0(%rsp)
+ vmovdqu %xmm13, 0xd0(%rsp)
+ vmovdqu %xmm14, 0xe0(%rsp)
+ vmovdqu %xmm15, 0xf0(%rsp)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call ASM_SYMBOL(__tsan_report_race)
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ vmovdqu 0x0(%rsp), %xmm0
+ vmovdqu 0x10(%rsp), %xmm1
+ vmovdqu 0x20(%rsp), %xmm2
+ vmovdqu 0x30(%rsp), %xmm3
+ vmovdqu 0x40(%rsp), %xmm4
+ vmovdqu 0x50(%rsp), %xmm5
+ vmovdqu 0x60(%rsp), %xmm6
+ vmovdqu 0x70(%rsp), %xmm7
+ vmovdqu 0x80(%rsp), %xmm8
+ vmovdqu 0x90(%rsp), %xmm9
+ vmovdqu 0xa0(%rsp), %xmm10
+ vmovdqu 0xb0(%rsp), %xmm11
+ vmovdqu 0xc0(%rsp), %xmm12
+ vmovdqu 0xd0(%rsp), %xmm13
+ vmovdqu 0xe0(%rsp), %xmm14
+ vmovdqu 0xf0(%rsp), %xmm15
+ add $0x100, %rsp
+ CFI_ADJUST_CFA_OFFSET(-0x100)
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+ASM_HIDDEN(__tsan_setjmp)
+#if defined(__NetBSD__)
+.comm _ZN14__interception15real___setjmp14E,8,8
+#elif !defined(__APPLE__)
+.comm _ZN14__interception11real_setjmpE,8,8
+#endif
+#if defined(__NetBSD__)
+.globl ASM_SYMBOL_INTERCEPTOR(__setjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+ASM_SYMBOL_INTERCEPTOR(__setjmp14):
+#else
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+#endif
+ CFI_STARTPROC
+ _CET_ENDBR
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 8(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 16(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+#if defined(__NetBSD__)
+ movq _ZN14__interception15real___setjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
+ movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(setjmp)
+#endif
+ CFI_ENDPROC
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+#endif
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+ _CET_ENDBR
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 8(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 16(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+#if !defined(__APPLE__)
+ movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(_setjmp)
+#endif
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+#if defined(__NetBSD__)
+.comm _ZN14__interception18real___sigsetjmp14E,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14):
+#else
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+#endif
+ CFI_STARTPROC
+ _CET_ENDBR
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 24(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 32(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+#if defined(__NetBSD__)
+ movq _ZN14__interception18real___sigsetjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
+ movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(sigsetjmp)
+#endif
+ CFI_ENDPROC
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+#endif
+
+#if !defined(__APPLE__) && !defined(__NetBSD__)
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+ _CET_ENDBR
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+#else
+ lea 32(%rsp), %rdi
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif // !defined(__APPLE__) && !defined(__NetBSD__)
+
+NO_EXEC_STACK_DIRECTIVE
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mips64.S b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mips64.S
new file mode 100644
index 000000000000..d0f7a3f9af98
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mips64.S
@@ -0,0 +1,214 @@
+.section .text
+.set noreorder
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+
+ // save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc setjmp to t9
+ dla $t9,(_ZN14__interception11real_setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size setjmp, .-setjmp
+
+.hidden __tsan_setjmp
+.globl _setjmp
+.comm _ZN14__interception12real__setjmpE,8,8
+.type _setjmp, @function
+_setjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(_setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc _setjmp to t9
+ dla $t9,(_ZN14__interception12real__setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc _setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size _setjmp, .-_setjmp
+
+.hidden __tsan_setjmp
+.globl sigsetjmp
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.type sigsetjmp, @function
+sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc sigsetjmp to t9
+ dla $t9,(_ZN14__interception14real_sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size sigsetjmp, .-sigsetjmp
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer to libc __sigsetjmp in t9
+ dla $t9,(_ZN14__interception16real___sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc __sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size __sigsetjmp, .-__sigsetjmp
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mutex.cpp
new file mode 100644
index 000000000000..7d6b41116aa6
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_mutex.cpp
@@ -0,0 +1,555 @@
+//===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_report.h"
+#include "tsan_symbolize.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback final : public DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->proc()->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ StackID Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->unique_id; }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (SANITIZER_GO)
+ return;
+ if (!ShouldReport(thr, typ))
+ return;
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryAccess(thr, pc, addr, 1, kAccessWrite);
+ thr->is_freeing = false;
+ }
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ s->SetFlags(flagz & MutexCreationFlagMask);
+ // Save stack in the case the sync object was created before as atomic.
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
+}
+
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
+ bool unlock_locked = false;
+ u64 mid = 0;
+ u64 last_lock = 0;
+ {
+ SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+ if (s == 0)
+ return;
+ Lock l(&s->mtx);
+ if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
+ ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ mid = s->GetId();
+ last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr->proc()); // must not reset it before the report is printed
+ }
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ FastState last(last_lock);
+ RestoreStack(last.tid(), last.epoch(), &trace, 0);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+
+ SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+ if (s != 0) {
+ Lock l(&s->mtx);
+ s->Reset(thr->proc());
+ }
+ }
+ thr->mset.Remove(mid);
+ // Imitate a memory write to catch unlock-destroy races.
+ // Do this outside of sync mutex, because it can report a race which locks
+ // sync mutexes.
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
+ // s will be destroyed and freed in MetaMap::FreeBlock.
+}
+
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ {
+ ReadLock l(&s->mtx);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ }
+ }
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+ DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
+ thr->tid, addr, flagz, rec);
+ if (flagz & MutexFlagRecursiveLock)
+ CHECK_GT(rec, 0);
+ else
+ rec = 1;
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ u64 mid = 0;
+ bool pre_lock = false;
+ bool first = false;
+ bool report_double_lock = false;
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ if (s->owner_tid == kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state.raw();
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ first = s->recursion == 0;
+ s->recursion += rec;
+ if (first) {
+ AcquireImpl(thr, pc, &s->clock);
+ AcquireImpl(thr, pc, &s->read_clock);
+ } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
+ }
+ thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock =
+ (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
+ mid = s->GetId();
+ }
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (first && pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ u64 mid = 0;
+ bool report_bad_unlock = false;
+ int rec = 0;
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ } else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ }
+ }
+ thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ mid = s->GetId();
+ }
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+ return rec;
+}
+
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock l(&s->mtx);
+ s->UpdateFlags(flagz);
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ }
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ u64 mid = 0;
+ bool report_bad_lock = false;
+ bool pre_lock = false;
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock l(&s->mtx);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ AcquireImpl(thr, pc, &s->clock);
+ s->last_lock = thr->fast_state.raw();
+ thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks) {
+ pre_lock =
+ (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ }
+ mid = s->GetId();
+ }
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ u64 mid = 0;
+ bool report_bad_unlock = false;
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ mid = s->GetId();
+ }
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
+ if (IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ u64 mid = 0;
+ bool report_bad_unlock = false;
+ {
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ bool write = true;
+ if (s->owner_tid == kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ ReleaseImpl(thr, pc, &s->read_clock);
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ mid = s->GetId();
+ }
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock l(&s->mtx);
+ s->owner_tid = kInvalidTid;
+ s->recursion = 0;
+}
+
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId());
+}
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+ if (!s)
+ return;
+ ReadLock l(&s->mtx);
+ AcquireImpl(thr, pc, &s->clock);
+}
+
+static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning) {
+ epoch = tctx->thr->fast_state.epoch();
+ tctx->thr->clock.NoteGlobalAcquire(epoch);
+ }
+ thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AcquireGlobal(ThreadState *thr) {
+ DPrintf("#%d: AcquireGlobal\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateClockCallback, thr);
+}
+
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreAcquireImpl(thr, pc, &s->clock);
+}
+
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
+}
+
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock l(&s->mtx);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+}
+
+#if !SANITIZER_GO
+static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning)
+ epoch = tctx->thr->fast_state.epoch();
+ thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AfterSleep(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AfterSleep\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ thr->last_sleep_stack_id = CurrentStackId(thr, pc);
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateSleepClockCallback,
+ thr);
+}
+#endif
+
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->proc()->clock_cache, c);
+}
+
+void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
+}
+
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&thr->proc()->clock_cache, c);
+}
+
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
+}
+
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.acq_rel(&thr->proc()->clock_cache, c);
+}
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
+ return;
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk && stk != 0xffffffff) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_ppc64.S b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_ppc64.S
new file mode 100644
index 000000000000..8285e21aa1ec
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_ppc64.S
@@ -0,0 +1,288 @@
+#include "tsan_ppc_regs.h"
+
+ .section .text
+ .hidden __tsan_setjmp
+ .globl _setjmp
+ .type _setjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+_setjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+_setjmp:
+ .quad .L._setjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L._setjmp:
+ mflr r0
+ stdu r1,-48(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r0,40(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,48
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,0f
+0:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-0b@ha
+ addi r2,r2,.TOC.-0b@l
+#else
+ addis r2,r2,_setjmp-0b@ha
+ addi r2,r2,_setjmp-0b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for setjmp.
+ ld r3,32(r1)
+ ld r0,40(r1)
+ // Emulate the real setjmp function. We do this because we can't
+ // perform a sibcall: The real setjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,48 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Clear the "mask-saved" slot.
+ li r4,0
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,40(r1)
+ addi r1,r1,48
+ li r3,0 // This is the setjmp return path
+ mtlr r0
+ blr
+ .size _setjmp, .-.L._setjmp
+
+ .globl setjmp
+ .type setjmp, @function
+ .align 4
+setjmp:
+ b _setjmp
+ .size setjmp, .-setjmp
+
+ // sigsetjmp is like setjmp, except that the mask in r4 needs
+ // to be saved at offset 512 of the jump buffer.
+ .globl __sigsetjmp
+ .type __sigsetjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+__sigsetjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+__sigsetjmp:
+ .quad .L.__sigsetjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L.__sigsetjmp:
+ mflr r0
+ stdu r1,-64(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r4,40(r1)
+ std r0,48(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,64
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,1f
+1:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-1b@ha
+ addi r2,r2,.TOC.-1b@l
+#else
+ addis r2,r2,_setjmp-1b@ha
+ addi r2,r2,_setjmp-1b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for __sigsetjmp.
+ ld r3,32(r1)
+ ld r4,40(r1)
+ ld r0,48(r1)
+ // Emulate the real sigsetjmp function. We do this because we can't
+ // perform a sibcall: The real sigsetjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,64 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Save into the "mask-saved" slot.
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,48(r1)
+ addi r1,r1,64
+ li r3,0 // This is the sigsetjmp return path
+ mtlr r0
+ blr
+ .size __sigsetjmp, .-.L.__sigsetjmp
+
+ .globl sigsetjmp
+ .type sigsetjmp, @function
+ .align 4
+sigsetjmp:
+ b __sigsetjmp
+ .size sigsetjmp, .-sigsetjmp
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_proc.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_proc.cpp
new file mode 100644
index 000000000000..def61cca14d5
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_proc.cpp
@@ -0,0 +1,60 @@
+//===-- tsan_rtl_proc.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+Processor *ProcCreate() {
+ void *mem = InternalAlloc(sizeof(Processor));
+ internal_memset(mem, 0, sizeof(Processor));
+ Processor *proc = new(mem) Processor;
+ proc->thr = nullptr;
+#if !SANITIZER_GO
+ AllocatorProcStart(proc);
+#endif
+ if (common_flags()->detect_deadlocks)
+ proc->dd_pt = ctx->dd->CreatePhysicalThread();
+ return proc;
+}
+
+void ProcDestroy(Processor *proc) {
+ CHECK_EQ(proc->thr, nullptr);
+#if !SANITIZER_GO
+ AllocatorProcFinish(proc);
+#endif
+ ctx->clock_alloc.FlushCache(&proc->clock_cache);
+ ctx->metamap.OnProcIdle(proc);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyPhysicalThread(proc->dd_pt);
+ proc->~Processor();
+ InternalFree(proc);
+}
+
+void ProcWire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, nullptr);
+ CHECK_EQ(proc->thr, nullptr);
+ thr->proc1 = proc;
+ proc->thr = thr;
+}
+
+void ProcUnwire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, proc);
+ CHECK_EQ(proc->thr, thr);
+ thr->proc1 = nullptr;
+ proc->thr = nullptr;
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_report.cpp
new file mode 100644
index 000000000000..f332a6a8d1d8
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_report.cpp
@@ -0,0 +1,984 @@
+//===-- tsan_rtl_report.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_fd.h"
+
+namespace __tsan {
+
+using namespace __sanitizer;
+
+static ReportStack *SymbolizeStack(StackTrace trace);
+
+// Can be overriden by an application/test to intercept reports.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnReport(const ReportDesc *rep, bool suppressed);
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnReport(const ReportDesc *rep, bool suppressed) {
+ (void)rep;
+ return suppressed;
+}
+#endif
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_on_report(const ReportDesc *rep) {
+ (void)rep;
+}
+
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ last_frame2 = last_frame;
+ last_frame = cur;
+ }
+
+ if (last_frame2 == 0)
+ return;
+#if !SANITIZER_GO
+ const char *last = last_frame->info.function;
+ const char *last2 = last_frame2->info.function;
+ // Strip frame above 'main'
+ if (last2 && 0 == internal_strcmp(last2, "main")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip our internal thread start routine.
+ } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip global ctors init, .preinit_array and main caller.
+ } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
+ 0 == internal_strcmp(last, "__libc_csu_init") ||
+ 0 == internal_strcmp(last, "__libc_start_main"))) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // If both are 0, then we probably just failed to symbolize.
+ } else if (last || last2) {
+ // Ensure that we recovered stack completely. Trimmed stack
+ // can actually happen if we do not instrument some code,
+ // so it's only a debug print. However we must try hard to not miss it
+ // due to our fault.
+ DPrintf("Bottom stack frame is missed\n");
+ }
+#else
+ // The last frame always point into runtime (gosched0, goexit0, runtime.main).
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+#endif
+}
+
+ReportStack *SymbolizeStackId(u32 stack_id) {
+ if (stack_id == 0)
+ return 0;
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
+}
+
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
+ return 0;
+ SymbolizedStack *top = nullptr;
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
+ uptr pc1 = pc;
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
+ CHECK_NE(ent, 0);
+ SymbolizedStack *last = ent;
+ while (last->next) {
+ last->info.address = pc; // restore original pc for report
+ last = last->next;
+ }
+ last->info.address = pc; // restore original pc for report
+ last->next = top;
+ top = ent;
+ }
+ StackStripMain(top);
+
+ auto *stack = New<ReportStack>();
+ stack->frames = top;
+ return stack;
+}
+
+bool ShouldReport(ThreadState *thr, ReportType typ) {
+ // We set thr->suppress_reports in the fork context.
+ // Taking any locking in the fork context can lead to deadlocks.
+ // If any locks are already taken, it's too late to do this check.
+ CheckedMutex::CheckNoLocks();
+ // For the same reason check we didn't lock thread_registry yet.
+ if (SANITIZER_DEBUG)
+ ThreadRegistryLock l(&ctx->thread_registry);
+ if (!flags()->report_bugs || thr->suppress_reports)
+ return false;
+ switch (typ) {
+ case ReportTypeSignalUnsafe:
+ return flags()->report_signal_unsafe;
+ case ReportTypeThreadLeak:
+#if !SANITIZER_GO
+ // It's impossible to join phantom threads
+ // in the child after fork.
+ if (ctx->after_multithreaded_fork)
+ return false;
+#endif
+ return flags()->report_thread_leaks;
+ case ReportTypeMutexDestroyLocked:
+ return flags()->report_destroy_locked;
+ default:
+ return true;
+ }
+}
+
+ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
+ ctx->thread_registry.CheckLocked();
+ rep_ = New<ReportDesc>();
+ rep_->typ = typ;
+ rep_->tag = tag;
+ ctx->report_mtx.Lock();
+}
+
+ScopedReportBase::~ScopedReportBase() {
+ ctx->report_mtx.Unlock();
+ DestroyAndFree(rep_);
+}
+
+void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
+ ReportStack **rs = rep_->stacks.PushBack();
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
+}
+
+void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
+ auto *mop = New<ReportMop>();
+ rep_->mops.PushBack(mop);
+ mop->tid = s.tid();
+ mop->addr = addr + s.addr0();
+ mop->size = s.size();
+ mop->write = s.IsWrite();
+ mop->atomic = s.IsAtomic();
+ mop->stack = SymbolizeStack(stack);
+ mop->external_tag = external_tag;
+ if (mop->stack)
+ mop->stack->suppressable = true;
+ for (uptr i = 0; i < mset->Size(); i++) {
+ MutexSet::Desc d = mset->Get(i);
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
+ }
+}
+
+void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
+ for (uptr i = 0; i < rep_->threads.Size(); i++) {
+ if ((u32)rep_->threads[i]->id == tctx->tid)
+ return;
+ }
+ auto *rt = New<ReportThread>();
+ rep_->threads.PushBack(rt);
+ rt->id = tctx->tid;
+ rt->os_id = tctx->os_id;
+ rt->running = (tctx->status == ThreadStatusRunning);
+ rt->name = internal_strdup(tctx->name);
+ rt->parent_tid = tctx->parent_tid;
+ rt->thread_type = tctx->thread_type;
+ rt->stack = 0;
+ rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
+}
+
+#if !SANITIZER_GO
+static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
+ int unique_id = *(int *)arg;
+ return tctx->unique_id == (u32)unique_id;
+}
+
+static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
+ ctx->thread_registry.CheckLocked();
+ return static_cast<ThreadContext *>(
+ ctx->thread_registry.FindThreadContextLocked(
+ FindThreadByUidLockedCallback, &unique_id));
+}
+
+static ThreadContext *FindThreadByTidLocked(Tid tid) {
+ ctx->thread_registry.CheckLocked();
+ return static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(tid));
+}
+
+static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
+ uptr addr = (uptr)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status != ThreadStatusRunning)
+ return false;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
+ (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
+}
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
+ ctx->thread_registry.CheckLocked();
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
+ IsInStackOrTls, (void *)addr));
+ if (!tctx)
+ return 0;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
+ return tctx;
+}
+#endif
+
+void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
+#if !SANITIZER_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
+#endif
+}
+
+void ScopedReportBase::AddMutex(const SyncVar *s) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == s->uid)
+ return;
+ }
+ auto *rm = New<ReportMutex>();
+ rep_->mutexes.PushBack(rm);
+ rm->id = s->uid;
+ rm->addr = s->addr;
+ rm->destroyed = false;
+ rm->stack = SymbolizeStackId(s->creation_stack_id);
+}
+
+u64 ScopedReportBase::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ Lock l(&s->mtx);
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ return mid;
+}
+
+void ScopedReportBase::AddDeadMutex(u64 id) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == id)
+ return;
+ }
+ auto *rm = New<ReportMutex>();
+ rep_->mutexes.PushBack(rm);
+ rm->id = id;
+ rm->addr = 0;
+ rm->destroyed = true;
+ rm->stack = 0;
+}
+
+void ScopedReportBase::AddLocation(uptr addr, uptr size) {
+ if (addr == 0)
+ return;
+#if !SANITIZER_GO
+ int fd = -1;
+ Tid creat_tid = kInvalidTid;
+ StackID creat_stack = 0;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationFD;
+ loc->fd = fd;
+ loc->tid = creat_tid;
+ loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
+ ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ MBlock *b = 0;
+ uptr block_begin = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ block_begin = (uptr)a->GetBlockBegin((void *)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock(block_begin);
+ }
+ if (!b)
+ b = JavaHeapBlock(addr, &block_begin);
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationHeap;
+ loc->heap_chunk_start = block_begin;
+ loc->heap_chunk_size = b->siz;
+ loc->external_tag = b->tag;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
+ rep_->locs.PushBack(loc);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ bool is_stack = false;
+ if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
+ auto *loc = New<ReportLocation>();
+ loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
+ AddThread(tctx);
+ }
+#endif
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
+ rep_->locs.PushBack(loc);
+ return;
+ }
+}
+
+#if !SANITIZER_GO
+void ScopedReportBase::AddSleep(StackID stack_id) {
+ rep_->sleep = SymbolizeStackId(stack_id);
+}
+#endif
+
+void ScopedReportBase::SetCount(int count) { rep_->count = count; }
+
+const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
+
+ScopedReport::ScopedReport(ReportType typ, uptr tag)
+ : ScopedReportBase(typ, tag) {}
+
+ScopedReport::~ScopedReport() {}
+
+void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag) {
+ // This function restores stack trace and mutex set for the thread/epoch.
+ // It does so by getting stack trace and mutex set at the beginning of
+ // trace part, and then replaying the trace till the given epoch.
+ Trace* trace = ThreadTrace(tid);
+ ReadLock l(&trace->mtx);
+ const int partidx = (epoch / kTracePartSize) % TraceParts();
+ TraceHeader* hdr = &trace->headers[partidx];
+ if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
+ return;
+ CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
+ const u64 epoch0 = RoundDown(epoch, TraceSize());
+ const u64 eend = epoch % TraceSize();
+ const u64 ebegin = RoundDown(eend, kTracePartSize);
+ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
+ tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
+ Vector<uptr> stack;
+ stack.Resize(hdr->stack0.size + 64);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
+ DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
+ }
+ if (mset)
+ *mset = hdr->mset0;
+ uptr pos = hdr->stack0.size;
+ Event *events = (Event*)GetThreadTrace(tid);
+ for (uptr i = ebegin; i <= eend; i++) {
+ Event ev = events[i];
+ EventType typ = (EventType)(ev >> kEventPCBits);
+ uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
+ DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
+ if (typ == EventTypeMop) {
+ stack[pos] = pc;
+ } else if (typ == EventTypeFuncEnter) {
+ if (stack.Size() < pos + 2)
+ stack.Resize(pos + 2);
+ stack[pos++] = pc;
+ } else if (typ == EventTypeFuncExit) {
+ if (pos > 0)
+ pos--;
+ }
+ if (mset) {
+ if (typ == EventTypeLock) {
+ mset->Add(pc, true, epoch0 + i);
+ } else if (typ == EventTypeUnlock) {
+ mset->Del(pc, true);
+ } else if (typ == EventTypeRLock) {
+ mset->Add(pc, false, epoch0 + i);
+ } else if (typ == EventTypeRUnlock) {
+ mset->Del(pc, false);
+ }
+ }
+ for (uptr j = 0; j <= pos; j++)
+ DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ }
+ if (pos == 0 && stack[0] == 0)
+ return;
+ pos++;
+ stk->Init(&stack[0], pos);
+ ExtractTagFromStack(stk, tag);
+}
+
+namespace v3 {
+
+// Replays the trace up to last_pos position in the last part
+// or up to the provided epoch/sid (whichever is earlier)
+// and calls the provided function f for each event.
+template <typename Func>
+void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
+ Epoch epoch, Func f) {
+ TracePart *part = trace->parts.Front();
+ Sid ev_sid = kFreeSid;
+ Epoch ev_epoch = kEpochOver;
+ for (;;) {
+ DCHECK_EQ(part->trace, trace);
+ // Note: an event can't start in the last element.
+ // Since an event can take up to 2 elements,
+ // we ensure we have at least 2 before adding an event.
+ Event *end = &part->events[TracePart::kSize - 1];
+ if (part == last)
+ end = last_pos;
+ for (Event *evp = &part->events[0]; evp < end; evp++) {
+ Event *evp0 = evp;
+ if (!evp->is_access && !evp->is_func) {
+ switch (evp->type) {
+ case EventType::kTime: {
+ auto *ev = reinterpret_cast<EventTime *>(evp);
+ ev_sid = static_cast<Sid>(ev->sid);
+ ev_epoch = static_cast<Epoch>(ev->epoch);
+ if (ev_sid == sid && ev_epoch > epoch)
+ return;
+ break;
+ }
+ case EventType::kAccessExt:
+ FALLTHROUGH;
+ case EventType::kAccessRange:
+ FALLTHROUGH;
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock:
+ // These take 2 Event elements.
+ evp++;
+ break;
+ case EventType::kUnlock:
+ // This takes 1 Event element.
+ break;
+ }
+ }
+ CHECK_NE(ev_sid, kFreeSid);
+ CHECK_NE(ev_epoch, kEpochOver);
+ f(ev_sid, ev_epoch, evp0);
+ }
+ if (part == last)
+ return;
+ part = trace->parts.Next(part);
+ CHECK(part);
+ }
+ CHECK(0);
+}
+
+static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
+ Vector<uptr> *stack, MutexSet *mset, uptr pc,
+ bool *found) {
+ DPrintf2(" MATCHED\n");
+ *pmset = *mset;
+ stack->PushBack(pc);
+ pstk->Init(&(*stack)[0], stack->Size());
+ stack->PopBack();
+ *found = true;
+}
+
+// Checks if addr1|size1 is fully contained in addr2|size2.
+// We check for fully contained instread of just overlapping
+// because a memory access is always traced once, but can be
+// split into multiple accesses in the shadow.
+static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
+ uptr size2) {
+ return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
+}
+
+// Replays the trace of thread tid up to the target event identified
+// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
+// and tag for that event. If there are multiple such events, it returns
+// the last one. Returns false if the event is not present in the trace.
+bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
+ uptr size, AccessType typ, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag) {
+ // This function restores stack trace and mutex set for the thread/epoch.
+ // It does so by getting stack trace and mutex set at the beginning of
+ // trace part, and then replaying the trace till the given epoch.
+ DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
+ static_cast<int>(sid), static_cast<int>(epoch), addr, size,
+ static_cast<int>(typ));
+ ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
+ ctx->thread_registry.CheckLocked();
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
+ Trace *trace = &tctx->trace;
+ // Snapshot first/last parts and the current position in the last part.
+ TracePart *first_part;
+ TracePart *last_part;
+ Event *last_pos;
+ {
+ Lock lock(&trace->mtx);
+ first_part = trace->parts.Front();
+ if (!first_part)
+ return false;
+ last_part = trace->parts.Back();
+ last_pos = trace->final_pos;
+ if (tctx->thr)
+ last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
+ }
+ DynamicMutexSet mset;
+ Vector<uptr> stack;
+ uptr prev_pc = 0;
+ bool found = false;
+ bool is_read = typ & kAccessRead;
+ bool is_atomic = typ & kAccessAtomic;
+ bool is_free = typ & kAccessFree;
+ TraceReplay(
+ trace, last_part, last_pos, sid, epoch,
+ [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
+ bool match = ev_sid == sid && ev_epoch == epoch;
+ if (evp->is_access) {
+ if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
+ evp->_ == 0) // NopEvent
+ return;
+ auto *ev = reinterpret_cast<EventAccess *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ uptr ev_pc =
+ prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
+ prev_pc = ev_pc;
+ DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ return;
+ }
+ if (evp->is_func) {
+ auto *ev = reinterpret_cast<EventFunc *>(evp);
+ if (ev->pc) {
+ DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
+ stack.PushBack(ev->pc);
+ } else {
+ DPrintf2(" FuncExit\n");
+ CHECK(stack.Size());
+ stack.PopBack();
+ }
+ return;
+ }
+ switch (evp->type) {
+ case EventType::kAccessExt: {
+ auto *ev = reinterpret_cast<EventAccessExt *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ prev_pc = ev->pc;
+ DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
+ ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic &&
+ !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
+ break;
+ }
+ case EventType::kAccessRange: {
+ auto *ev = reinterpret_cast<EventAccessRange *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size =
+ (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
+ uptr ev_pc = RestoreAddr(ev->pc);
+ prev_pc = ev_pc;
+ DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_free);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock: {
+ auto *ev = reinterpret_cast<EventLock *>(evp);
+ bool is_write = ev->type == EventType::kLock;
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_pc = RestoreAddr(ev->pc);
+ StackID stack_id =
+ (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
+ DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
+ ev_addr, stack_id, is_write);
+ mset->AddAddr(ev_addr, stack_id, is_write);
+ // Events with ev_pc == 0 are written to the beginning of trace
+ // part as initial mutex set (are not real).
+ if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kUnlock: {
+ auto *ev = reinterpret_cast<EventUnlock *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
+ mset->DelAddr(ev_addr);
+ break;
+ }
+ case EventType::kTime:
+ // TraceReplay already extracted sid/epoch from it,
+ // nothing else to do here.
+ break;
+ }
+ });
+ ExtractTagFromStack(pstk, ptag);
+ return found;
+}
+
+} // namespace v3
+
+bool RacyStacks::operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
+}
+
+static bool FindRacyStacks(const RacyStacks &hash) {
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
+ if (!flags()->suppress_equal_stacks)
+ return false;
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
+ }
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
+ ctx->racy_stacks.PushBack(hash);
+ return false;
+}
+
+static bool FindRacyAddress(const RacyAddress &ra0) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
+ if (!flags()->suppress_equal_addresses)
+ return false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
+ }
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
+ ctx->racy_addresses.PushBack(ra0);
+ return false;
+}
+
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
+ // These should have been checked in ShouldReport.
+ // It's too late to check them here, we have already taken locks.
+ CHECK(flags()->report_bugs);
+ CHECK(!thr->suppress_reports);
+ atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
+ const ReportDesc *rep = srep.GetReport();
+ CHECK_EQ(thr->current_report, nullptr);
+ thr->current_report = rep;
+ Suppression *supp = 0;
+ uptr pc_or_addr = 0;
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
+ if (pc_or_addr != 0) {
+ Lock lock(&ctx->fired_suppressions_mtx);
+ FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
+ ctx->fired_suppressions.push_back(s);
+ }
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, pc_or_addr != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed) {
+ thr->current_report = nullptr;
+ return false;
+ }
+ }
+ PrintReport(rep);
+ __tsan_on_report(rep);
+ ctx->nreported++;
+ if (flags()->halt_on_error)
+ Die();
+ thr->current_report = nullptr;
+ return true;
+}
+
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ for (uptr j = 0; j < trace.size; j++) {
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (trace.trace[j] == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (addr == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
+ return false;
+}
+
+void ReportRace(ThreadState *thr) {
+ CheckedMutex::CheckNoLocks();
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
+ if (!ShouldReport(thr, ReportTypeRace))
+ return;
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
+
+ bool freed = false;
+ {
+ Shadow s(thr->racy_state[1]);
+ freed = s.GetFreedAndReset();
+ thr->racy_state[1] = s.raw();
+ }
+
+ uptr addr = ShadowToMem(thr->racy_shadow_addr);
+ uptr addr_min = 0;
+ uptr addr_max = 0;
+ {
+ uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
+ uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
+ uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
+ uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
+ addr_min = min(a0, a1);
+ addr_max = max(e0, e1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
+ }
+ if (HandleRacyAddress(thr, addr_min, addr_max))
+ return;
+
+ ReportType typ = ReportTypeRace;
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
+ typ = ReportTypeVptrRace;
+ else if (freed)
+ typ = ReportTypeUseAfterFree;
+
+ if (IsFiredSuppression(ctx, typ, addr))
+ return;
+
+ const uptr kMop = 2;
+ VarSizeStackTrace traces[kMop];
+ uptr tags[kMop] = {kExternalTagNone};
+ uptr toppc = TraceTopPC(thr);
+ if (toppc >> kEventPCBits) {
+ // This is a work-around for a known issue.
+ // The scenario where this happens is rather elaborate and requires
+ // an instrumented __sanitizer_report_error_summary callback and
+ // a __tsan_symbolize_external callback and a race during a range memory
+ // access larger than 8 bytes. MemoryAccessRange adds the current PC to
+ // the trace and starts processing memory accesses. A first memory access
+ // triggers a race, we report it and call the instrumented
+ // __sanitizer_report_error_summary, which adds more stuff to the trace
+ // since it is intrumented. Then a second memory access in MemoryAccessRange
+ // also triggers a race and we get here and call TraceTopPC to get the
+ // current PC, however now it contains some unrelated events from the
+ // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
+ // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
+ // and the resulting PC has kExternalPCBit set, so we pass it to
+ // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
+ // rights to crash since the PC is completely bogus.
+ // test/tsan/double_race.cpp contains a test case for this.
+ toppc = 0;
+ }
+ ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, typ, traces[0]))
+ return;
+
+ DynamicMutexSet mset2;
+ Shadow s2(thr->racy_state[1]);
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
+ if (IsFiredSuppression(ctx, typ, traces[1]))
+ return;
+
+ if (HandleRacyStacks(thr, traces))
+ return;
+
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
+ }
+
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ ScopedReport rep(typ, tag);
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ rep.AddMemoryAccess(addr, tags[i], s, traces[i],
+ i == 0 ? &thr->mset : mset2);
+ }
+
+ for (uptr i = 0; i < kMop; i++) {
+ FastState s(thr->racy_state[i]);
+ ThreadContext *tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(s.tid()));
+ if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
+ continue;
+ rep.AddThread(tctx);
+ }
+
+ rep.AddLocation(addr_min, addr_max - addr_min);
+
+#if !SANITIZER_GO
+ {
+ Shadow s(thr->racy_state[1]);
+ if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+ }
+#endif
+
+ OutputReport(thr, rep);
+}
+
+void PrintCurrentStack(ThreadState *thr, uptr pc) {
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ PrintStack(SymbolizeStack(trace));
+}
+
+// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
+// tail-call to PrintCurrentStackSlow breaks this assumption because
+// __sanitizer_print_stack_trace disappears after tail-call.
+// However, this solution is not reliable enough, please see dvyukov's comment
+// http://reviews.llvm.org/D19148#406208
+// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
+ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
+#if !SANITIZER_GO
+ uptr bp = GET_CURRENT_FRAME();
+ auto *ptrace = New<BufferedStackTrace>();
+ ptrace->Unwind(pc, bp, nullptr, false);
+
+ for (uptr i = 0; i < ptrace->size / 2; i++) {
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
+ }
+ PrintStack(SymbolizeStack(*ptrace));
+#endif
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_s390x.S b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_s390x.S
new file mode 100644
index 000000000000..fcff35fbc7e0
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_s390x.S
@@ -0,0 +1,47 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+#define CFA_OFFSET 160
+#define R2_REL_OFFSET 16
+#define R3_REL_OFFSET 24
+#define R14_REL_OFFSET 112
+#define R15_REL_OFFSET 120
+#define FRAME_SIZE 160
+
+.text
+
+ASM_HIDDEN(__tsan_setjmp)
+
+.macro intercept symbol, real
+.comm \real, 8, 8
+.globl ASM_SYMBOL_INTERCEPTOR(\symbol)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(\symbol))
+ASM_SYMBOL_INTERCEPTOR(\symbol):
+ CFI_STARTPROC
+ stmg %r2, %r3, R2_REL_OFFSET(%r15)
+ CFI_REL_OFFSET(%r2, R2_REL_OFFSET)
+ CFI_REL_OFFSET(%r3, R3_REL_OFFSET)
+ stmg %r14, %r15, R14_REL_OFFSET(%r15)
+ CFI_REL_OFFSET(%r14, R14_REL_OFFSET)
+ CFI_REL_OFFSET(%r15, R15_REL_OFFSET)
+ aghi %r15, -FRAME_SIZE
+ CFI_ADJUST_CFA_OFFSET(FRAME_SIZE)
+ la %r2, FRAME_SIZE(%r15)
+ brasl %r14, ASM_SYMBOL(__tsan_setjmp)
+ lmg %r14, %r15, FRAME_SIZE + R14_REL_OFFSET(%r15)
+ CFI_RESTORE(%r14)
+ CFI_RESTORE(%r15)
+ CFI_DEF_CFA_OFFSET(CFA_OFFSET)
+ lmg %r2, %r3, R2_REL_OFFSET(%r15)
+ CFI_RESTORE(%r2)
+ CFI_RESTORE(%r3)
+ larl %r1, \real
+ lg %r1, 0(%r1)
+ br %r1
+ CFI_ENDPROC
+ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(\symbol))
+.endm
+
+intercept setjmp, _ZN14__interception11real_setjmpE
+intercept _setjmp, _ZN14__interception12real__setjmpE
+intercept sigsetjmp, _ZN14__interception14real_sigsetjmpE
+intercept __sigsetjmp, _ZN14__interception16real___sigsetjmpE
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_thread.cpp
new file mode 100644
index 000000000000..c8f7124c009d
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl_thread.cpp
@@ -0,0 +1,349 @@
+//===-- tsan_rtl_thread.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+// ThreadContext implementation.
+
+ThreadContext::ThreadContext(Tid tid)
+ : ThreadContextBase(tid), thr(), sync(), epoch0(), epoch1() {}
+
+#if !SANITIZER_GO
+ThreadContext::~ThreadContext() {
+}
+#endif
+
+void ThreadContext::OnReset() {
+ CHECK_EQ(sync.size(), 0);
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
+}
+
+#if !SANITIZER_GO
+struct ThreadLeak {
+ ThreadContext *tctx;
+ int count;
+};
+
+static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
+ auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
+ auto *tctx = static_cast<ThreadContext *>(tctx_base);
+ if (tctx->detached || tctx->status != ThreadStatusFinished)
+ return;
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
+ leaks[i].count++;
+ return;
+ }
+ }
+ leaks.PushBack({tctx, 1});
+}
+#endif
+
+#if !SANITIZER_GO
+static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
+ if (tctx->tid == kMainTid) {
+ Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
+ } else {
+ Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
+ " created at:\n", tctx->tid, tctx->name);
+ PrintStack(SymbolizeStackId(tctx->creation_stack_id));
+ }
+ Printf(" One of the following ignores was not ended"
+ " (in order of probability)\n");
+ for (uptr i = 0; i < set->Size(); i++) {
+ Printf(" Ignore was enabled at:\n");
+ PrintStack(SymbolizeStackId(set->At(i)));
+ }
+ Die();
+}
+
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
+ if (thr->ignore_reads_and_writes)
+ ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
+ if (thr->ignore_sync)
+ ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
+}
+#else
+static void ThreadCheckIgnore(ThreadState *thr) {}
+#endif
+
+void ThreadFinalize(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+#if !SANITIZER_GO
+ if (!ShouldReport(thr, ReportTypeThreadLeak))
+ return;
+ ThreadRegistryLock l(&ctx->thread_registry);
+ Vector<ThreadLeak> leaks;
+ ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+ &leaks);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(leaks[i].tctx, true);
+ rep.SetCount(leaks[i].count);
+ OutputReport(thr, rep);
+ }
+#endif
+}
+
+int ThreadCount(ThreadState *thr) {
+ uptr result;
+ ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
+ return (int)result;
+}
+
+struct OnCreatedArgs {
+ ThreadState *thr;
+ uptr pc;
+};
+
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ OnCreatedArgs args = { thr, pc };
+ u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
+ Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent_tid, &args);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+ return tid;
+}
+
+void ThreadContext::OnCreated(void *arg) {
+ thr = 0;
+ if (tid == kMainTid)
+ return;
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ if (!args->thr) // GCD workers don't have a parent thread.
+ return;
+ args->thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(args->thr, 0, &sync);
+ creation_stack_id = CurrentStackId(args->thr, args->pc);
+}
+
+extern "C" void __tsan_stack_initialization() {}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
+ ThreadType thread_type) {
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+#if !SANITIZER_GO
+ if (thread_type != ThreadType::Fiber)
+ GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
+ &tls_size);
+#endif
+
+ ThreadRegistry *tr = &ctx->thread_registry;
+ OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
+ tr->StartThread(tid, os_id, thread_type, &args);
+
+ while (!thr->tctx->trace.parts.Empty()) thr->tctx->trace.parts.PopBack();
+
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
+
+#if !SANITIZER_GO
+ // Don't imitate stack/TLS writes for the main thread,
+ // because its initialization is synchronized with all
+ // subsequent threads anyway.
+ if (tid != kMainTid) {
+ if (stk_addr && stk_size) {
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_stack_initialization));
+ MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
+ }
+
+ if (tls_addr && tls_size)
+ ImitateTlsWrite(thr, tls_addr, tls_size);
+ }
+#endif
+}
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = static_cast<OnStartedArgs *>(arg);
+ thr = args->thr;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+ epoch1 = (u64)-1;
+ new (thr)
+ ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr,
+ args->stk_size, args->tls_addr, args->tls_size);
+ if (common_flags()->detect_deadlocks)
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ // Commit switch to the new part of the trace.
+ // TraceAddEvent will reset stack0/mset0 in the new part for us.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
+ thr->fast_synch_epoch = epoch0;
+ AcquireImpl(thr, 0, &sync);
+ sync.Reset(&thr->proc()->clock_cache);
+ thr->tctx = this;
+ thr->is_inited = true;
+ DPrintf(
+ "#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)epoch0, args->stk_addr, args->stk_size, args->tls_addr,
+ args->tls_size);
+}
+
+void ThreadFinish(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+ if (thr->stk_addr && thr->stk_size)
+ DontNeedShadowFor(thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size)
+ DontNeedShadowFor(thr->tls_addr, thr->tls_size);
+ thr->is_dead = true;
+ thr->is_inited = false;
+#if !SANITIZER_GO
+ thr->ignore_interceptors++;
+#endif
+ ctx->thread_registry.FinishThread(thr->tid);
+}
+
+void ThreadContext::OnFinished() {
+ if (!detached) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, 0, &sync);
+ }
+ epoch1 = thr->fast_state.epoch();
+
+#if !SANITIZER_GO
+ UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
+#else
+ Free(thr->shadow_stack);
+#endif
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+#if !SANITIZER_GO
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+#endif
+#if !SANITIZER_GO
+ PlatformCleanUpThreadState(thr);
+#endif
+ thr->~ThreadState();
+ thr = 0;
+}
+
+struct ConsumeThreadContext {
+ uptr uid;
+ ThreadContextBase *tctx;
+};
+
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
+ return ctx->thread_registry.ConsumeThreadUserId(uid);
+}
+
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
+ ctx->thread_registry.JoinThread(tid, thr);
+}
+
+void ThreadContext::OnJoined(void *arg) {
+ ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+ AcquireImpl(caller_thr, 0, &sync);
+ sync.Reset(&caller_thr->proc()->clock_cache);
+}
+
+void ThreadContext::OnDead() { CHECK_EQ(sync.size(), 0); }
+
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry.DetachThread(tid, thr);
+}
+
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState *>(arg);
+ sync.Reset(&thr1->proc()->clock_cache);
+}
+
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry.SetThreadUserId(tid, uid);
+}
+
+void ThreadSetName(ThreadState *thr, const char *name) {
+ ctx->thread_registry.SetThreadName(thr->tid, name);
+}
+
+#if !SANITIZER_GO
+void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
+ Processor *proc = from->proc();
+ ProcUnwire(proc, from);
+ ProcWire(proc, to);
+ set_cur_thread(to);
+}
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
+ void *mem = Alloc(sizeof(ThreadState));
+ ThreadState *fiber = static_cast<ThreadState *>(mem);
+ internal_memset(fiber, 0, sizeof(*fiber));
+ Tid tid = ThreadCreate(thr, pc, 0, true);
+ FiberSwitchImpl(thr, fiber);
+ ThreadStart(fiber, tid, 0, ThreadType::Fiber);
+ FiberSwitchImpl(fiber, thr);
+ return fiber;
+}
+
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
+ FiberSwitchImpl(thr, fiber);
+ ThreadFinish(fiber);
+ FiberSwitchImpl(fiber, thr);
+ Free(fiber);
+}
+
+void FiberSwitch(ThreadState *thr, uptr pc,
+ ThreadState *fiber, unsigned flags) {
+ if (!(flags & FiberSwitchFlagNoSync))
+ Release(thr, pc, (uptr)fiber);
+ FiberSwitchImpl(thr, fiber);
+ if (!(flags & FiberSwitchFlagNoSync))
+ Acquire(fiber, pc, (uptr)fiber);
+}
+#endif
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_shadow.h b/compiler-rt/lib/tsan/rtl-old/tsan_shadow.h
new file mode 100644
index 000000000000..8b7bc341713e
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_shadow.h
@@ -0,0 +1,233 @@
+//===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_SHADOW_H
+#define TSAN_SHADOW_H
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+// FastState (from most significant bit):
+// ignore : 1
+// tid : kTidBits
+// unused : -
+// history_size : 3
+// epoch : kClkBits
+class FastState {
+ public:
+ FastState(u64 tid, u64 epoch) {
+ x_ = tid << kTidShift;
+ x_ |= epoch;
+ DCHECK_EQ(tid, this->tid());
+ DCHECK_EQ(epoch, this->epoch());
+ DCHECK_EQ(GetIgnoreBit(), false);
+ }
+
+ explicit FastState(u64 x) : x_(x) {}
+
+ u64 raw() const { return x_; }
+
+ u64 tid() const {
+ u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+ return res;
+ }
+
+ u64 TidWithIgnore() const {
+ u64 res = x_ >> kTidShift;
+ return res;
+ }
+
+ u64 epoch() const {
+ u64 res = x_ & ((1ull << kClkBits) - 1);
+ return res;
+ }
+
+ void IncrementEpoch() {
+ u64 old_epoch = epoch();
+ x_ += 1;
+ DCHECK_EQ(old_epoch + 1, epoch());
+ (void)old_epoch;
+ }
+
+ void SetIgnoreBit() { x_ |= kIgnoreBit; }
+ void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+ bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
+ }
+
+ ALWAYS_INLINE
+ int GetHistorySize() const {
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
+ }
+
+ void ClearHistorySize() { SetHistorySize(0); }
+
+ ALWAYS_INLINE
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
+
+ private:
+ friend class Shadow;
+ static const int kTidShift = 64 - kTidBits - 1;
+ static const u64 kIgnoreBit = 1ull << 63;
+ static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
+ u64 x_;
+};
+
+// Shadow (from most significant bit):
+// freed : 1
+// tid : kTidBits
+// is_atomic : 1
+// is_read : 1
+// size_log : 2
+// addr0 : 3
+// epoch : kClkBits
+class Shadow : public FastState {
+ public:
+ explicit Shadow(u64 x) : FastState(x) {}
+
+ explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
+
+ void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
+ DCHECK_LE(addr0, 7);
+ DCHECK_LE(kAccessSizeLog, 3);
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
+ DCHECK_EQ(kAccessSizeLog, size_log());
+ DCHECK_EQ(addr0, this->addr0());
+ }
+
+ void SetWrite(unsigned kAccessIsWrite) {
+ DCHECK_EQ(x_ & kReadBit, 0);
+ if (!kAccessIsWrite)
+ x_ |= kReadBit;
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
+ }
+
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
+ }
+
+ bool IsAtomic() const { return x_ & kAtomicBit; }
+
+ bool IsZero() const { return x_ == 0; }
+
+ static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+ u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+ DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
+ return shifted_xor == 0;
+ }
+
+ static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
+ const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
+ return masked_xor == 0;
+ }
+
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ unsigned kS2AccessSize) {
+ bool res = false;
+ u64 diff = s1.addr0() - s2.addr0();
+ if ((s64)diff < 0) { // s1.addr0 < s2.addr0
+ // if (s1.addr0() + size1) > s2.addr0()) return true;
+ if (s1.size() > -diff)
+ res = true;
+ } else {
+ // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+ if (kS2AccessSize > diff)
+ res = true;
+ }
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
+ return res;
+ }
+
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
+
+ // The idea behind the freed bit is as follows.
+ // When the memory is freed (or otherwise unaccessible) we write to the shadow
+ // values with tid/epoch related to the free and the freed bit set.
+ // During memory accesses processing the freed bit is considered
+ // as msb of tid. So any access races with shadow with freed bit set
+ // (it is as if write from a thread with which we never synchronized before).
+ // This allows us to detect accesses to freed memory w/o additional
+ // overheads in memory access processing and at the same time restore
+ // tid/epoch of free.
+ void MarkAsFreed() { x_ |= kFreedBit; }
+
+ bool IsFreed() const { return x_ & kFreedBit; }
+
+ bool GetFreedAndReset() {
+ bool res = x_ & kFreedBit;
+ x_ &= ~kFreedBit;
+ return res;
+ }
+
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
+ (u64(kIsAtomic) << kAtomicShift));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
+ private:
+ static const u64 kReadShift = 5 + kClkBits;
+ static const u64 kReadBit = 1ull << kReadShift;
+ static const u64 kAtomicShift = 6 + kClkBits;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
+
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
+ if (s1.addr0() == s2.addr0())
+ return true;
+ if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+ return true;
+ if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+ return true;
+ return false;
+ }
+};
+
+const RawShadow kShadowRodata = (RawShadow)-1; // .rodata shadow marker
+
+} // namespace __tsan
+
+#endif
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.cpp
new file mode 100644
index 000000000000..9bbaafb3a85f
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.cpp
@@ -0,0 +1,57 @@
+//===-- tsan_stack_trace.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ Free(trace_buffer);
+ trace_buffer = (new_size > 0)
+ ? (uptr *)Alloc(new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+void VarSizeStackTrace::ReverseOrder() {
+ for (u32 i = 0; i < (size >> 1); i++)
+ Swap(trace_buffer[i], trace_buffer[size - 1 - i]);
+}
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ uptr top = 0;
+ uptr bottom = 0;
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, top, bottom, fast);
+}
+#endif // SANITIZER_GO
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.h b/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.h
new file mode 100644
index 000000000000..3eb8ce156e83
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_stack_trace.h
@@ -0,0 +1,42 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ // Reverses the current stack trace order, the top frame goes to the bottom,
+ // the last frame goes to the top.
+ void ReverseOrder();
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.cpp
new file mode 100644
index 000000000000..a1c1bf81bf67
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.cpp
@@ -0,0 +1,161 @@
+//===-- tsan_suppressions.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+#if !SANITIZER_GO
+// Suppressions for true/false positives in standard libraries.
+static const char *const std_suppressions =
+// Libstdc++ 4.4 has data races in std::string.
+// See http://crbug.com/181502 for an example.
+"race:^_M_rep$\n"
+"race:^_M_is_leaked$\n"
+// False positive when using std <thread>.
+// Happens because we miss atomic synchronization in libstdc++.
+// See http://llvm.org/bugs/show_bug.cgi?id=17066 for details.
+"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
+
+// Can be overriden in frontend.
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_suppressions() {
+ return 0;
+}
+#endif
+
+namespace __tsan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char *kSuppressionTypes[] = {
+ kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex,
+ kSuppressionThread, kSuppressionSignal, kSuppressionLib,
+ kSuppressionDeadlock};
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder)
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+#if !SANITIZER_GO
+ suppression_ctx->Parse(__tsan_default_suppressions());
+ suppression_ctx->Parse(std_suppressions);
+#endif
+}
+
+SuppressionContext *Suppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static const char *conv(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace:
+ case ReportTypeVptrRace:
+ case ReportTypeUseAfterFree:
+ case ReportTypeVptrUseAfterFree:
+ case ReportTypeExternalRace:
+ return kSuppressionRace;
+ case ReportTypeThreadLeak:
+ return kSuppressionThread;
+ case ReportTypeMutexDestroyLocked:
+ case ReportTypeMutexDoubleLock:
+ case ReportTypeMutexInvalidAccess:
+ case ReportTypeMutexBadUnlock:
+ case ReportTypeMutexBadReadLock:
+ case ReportTypeMutexBadReadUnlock:
+ return kSuppressionMutex;
+ case ReportTypeSignalUnsafe:
+ case ReportTypeErrnoInSignal:
+ return kSuppressionSignal;
+ case ReportTypeDeadlock:
+ return kSuppressionDeadlock;
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static uptr IsSuppressed(const char *stype, const AddressInfo &info,
+ Suppression **sp) {
+ if (suppression_ctx->Match(info.function, stype, sp) ||
+ suppression_ctx->Match(info.file, stype, sp) ||
+ suppression_ctx->Match(info.module, stype, sp)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
+ atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed);
+ return info.address;
+ }
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
+ uptr pc = IsSuppressed(stype, frame->info, sp);
+ if (pc != 0)
+ return pc;
+ }
+ if (0 == internal_strcmp(stype, kSuppressionRace) && stack->frames != nullptr)
+ return IsSuppressed(kSuppressionRaceTop, stack->frames->info, sp);
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ Suppression *s;
+ const DataInfo &global = loc->global;
+ if (suppression_ctx->Match(global.name, stype, &s) ||
+ suppression_ctx->Match(global.module, stype, &s)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed);
+ *sp = s;
+ return global.start;
+ }
+ return 0;
+}
+
+void PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ CHECK(suppression_ctx);
+ suppression_ctx->GetMatched(&matched);
+ if (!matched.size())
+ return;
+ int hit_count = 0;
+ for (uptr i = 0; i < matched.size(); i++)
+ hit_count += atomic_load_relaxed(&matched[i]->hit_count);
+ Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
+ (int)internal_getpid());
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%d %s:%s\n", atomic_load_relaxed(&matched[i]->hit_count),
+ matched[i]->type, matched[i]->templ);
+ }
+}
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.h b/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.h
new file mode 100644
index 000000000000..f430aeb6c4cf
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_suppressions.h
@@ -0,0 +1,37 @@
+//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SUPPRESSIONS_H
+#define TSAN_SUPPRESSIONS_H
+
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+const char kSuppressionNone[] = "none";
+const char kSuppressionRace[] = "race";
+const char kSuppressionRaceTop[] = "race_top";
+const char kSuppressionMutex[] = "mutex";
+const char kSuppressionThread[] = "thread";
+const char kSuppressionSignal[] = "signal";
+const char kSuppressionLib[] = "called_from_lib";
+const char kSuppressionDeadlock[] = "deadlock";
+
+void InitializeSuppressions();
+SuppressionContext *Suppressions();
+void PrintMatchedSuppressions();
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
+
+} // namespace __tsan
+
+#endif // TSAN_SUPPRESSIONS_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.cpp
new file mode 100644
index 000000000000..2e2744d2eae7
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.cpp
@@ -0,0 +1,123 @@
+//===-- tsan_symbolize.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_symbolize.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_flags.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void EnterSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(!thr->in_symbolizer);
+ thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
+}
+
+void ExitSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(thr->in_symbolizer);
+ thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
+}
+
+// Legacy API.
+// May be overriden by JIT/JAVA/etc,
+// whatever produces PCs marked with kExternalPCBit.
+SANITIZER_WEAK_DEFAULT_IMPL
+bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz, int *line,
+ int *col) {
+ return false;
+}
+
+// New API: call __tsan_symbolize_external_ex only when it exists.
+// Once old clients are gone, provide dummy implementation.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_symbolize_external_ex(uptr pc,
+ void (*add_frame)(void *, const char *,
+ const char *, int, int),
+ void *ctx) {}
+
+struct SymbolizedStackBuilder {
+ SymbolizedStack *head;
+ SymbolizedStack *tail;
+ uptr addr;
+};
+
+static void AddFrame(void *ctx, const char *function_name, const char *file,
+ int line, int column) {
+ SymbolizedStackBuilder *ssb = (struct SymbolizedStackBuilder *)ctx;
+ if (ssb->tail) {
+ ssb->tail->next = SymbolizedStack::New(ssb->addr);
+ ssb->tail = ssb->tail->next;
+ } else {
+ ssb->head = ssb->tail = SymbolizedStack::New(ssb->addr);
+ }
+ AddressInfo *info = &ssb->tail->info;
+ if (function_name) {
+ info->function = internal_strdup(function_name);
+ }
+ if (file) {
+ info->file = internal_strdup(file);
+ }
+ info->line = line;
+ info->column = column;
+}
+
+SymbolizedStack *SymbolizeCode(uptr addr) {
+ // Check if PC comes from non-native land.
+ if (addr & kExternalPCBit) {
+ SymbolizedStackBuilder ssb = {nullptr, nullptr, addr};
+ __tsan_symbolize_external_ex(addr, AddFrame, &ssb);
+ if (ssb.head)
+ return ssb.head;
+ // Legacy code: remove along with the declaration above
+ // once all clients using this API are gone.
+ // Declare static to not consume too much stack space.
+ // We symbolize reports in a single thread, so this is fine.
+ static char func_buf[1024];
+ static char file_buf[1024];
+ int line, col;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
+ }
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
+}
+
+ReportLocation *SymbolizeData(uptr addr) {
+ DataInfo info;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
+ return 0;
+ auto *ent = New<ReportLocation>();
+ ent->type = ReportLocationGlobal;
+ internal_memcpy(&ent->global, &info, sizeof(info));
+ return ent;
+}
+
+void SymbolizeFlush() {
+ Symbolizer::GetOrInit()->Flush();
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.h b/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.h
new file mode 100644
index 000000000000..7adaa04dc273
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_symbolize.h
@@ -0,0 +1,30 @@
+//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYMBOLIZE_H
+#define TSAN_SYMBOLIZE_H
+
+#include "tsan_defs.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+SymbolizedStack *SymbolizeCode(uptr addr);
+ReportLocation *SymbolizeData(uptr addr);
+void SymbolizeFlush();
+
+ReportStack *NewReportStackEntry(uptr addr);
+
+} // namespace __tsan
+
+#endif // TSAN_SYMBOLIZE_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_sync.cpp
new file mode 100644
index 000000000000..f042abab74e5
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_sync.cpp
@@ -0,0 +1,279 @@
+//===-- tsan_sync.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid,
+ bool save_stack) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = kInvalidStackID;
+ if (save_stack && !SANITIZER_GO) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(Processor *proc) {
+ uid = 0;
+ creation_stack_id = kInvalidStackID;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ atomic_store_relaxed(&flags, 0);
+
+ if (proc == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
+ }
+}
+
+MetaMap::MetaMap()
+ : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
+ sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
+}
+
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tag = 0;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
+}
+
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(proc, p, sz);
+ return sz;
+}
+
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+ bool has_something = false;
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ if (idx == 0) {
+ // Note: don't write to meta in this case -- the block can be huge.
+ continue;
+ }
+ *meta = 0;
+ has_something = true;
+ while (idx != 0) {
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
+ }
+ }
+ return has_something;
+}
+
+// ResetRange removes all meta objects from the range.
+// It is called for large mmap-ed regions. The function is best-effort wrt
+// freeing of meta objects, because we don't want to page in the whole range
+// which can be huge. The function probes pages one-by-one until it finds a page
+// without meta objects, at this point it stops freeing meta objects. Because
+// thread stacks grow top-down, we do the same starting from end as well.
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+ if (SANITIZER_GO) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do the optimization only for C/C++.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ if (sz <= 4 * kPageSize) {
+ // If the range is small, just do the normal free procedure.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ // First, round both ends of the range to page size.
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ FreeRange(proc, p, diff);
+ p += diff;
+ sz -= diff;
+ }
+ diff = p + sz - RoundDown(p + sz, kPageSize);
+ if (diff != 0) {
+ FreeRange(proc, p + sz - diff, diff);
+ sz -= diff;
+ }
+ // Now we must have a non-empty page-aligned range.
+ CHECK_GT(sz, 0);
+ CHECK_EQ(p, RoundUp(p, kPageSize));
+ CHECK_EQ(sz, RoundUp(sz, kPageSize));
+ const uptr p0 = p;
+ const uptr sz0 = sz;
+ // Probe start of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p, kPageSize);
+ p += kPageSize;
+ sz -= kPageSize;
+ if (!has_something && checked > (128 << 10))
+ break;
+ }
+ // Probe end of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ sz -= kPageSize;
+ // Stacks grow down, so sync object are most likely at the end of the region
+ // (if it is a stack). The very end of the stack is TLS and tsan increases
+ // TLS by at least 256K, so check at least 512K.
+ if (!has_something && checked > (512 << 10))
+ break;
+ }
+ // Finally, page out the whole range (including the parts that we've just
+ // freed). Note: we can't simply madvise, because we need to leave a zeroed
+ // range (otherwise __tsan_java_move can crash if it encounters a left-over
+ // meta objects in java heap).
+ uptr metap = (uptr)MemToMeta(p0);
+ uptr metasz = sz0 / kMetaRatio;
+ UnmapOrDie((void*)metap, metasz);
+ if (!MmapFixedSuperNoReserve(metap, metasz))
+ Die();
+}
+
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
+}
+
+SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack) {
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = nullptr;
+ for (;;) {
+ for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (LIKELY(s->addr == addr)) {
+ if (UNLIKELY(myidx != 0)) {
+ mys->Reset(thr->proc());
+ sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
+ }
+ return s;
+ }
+ idx = s->next;
+ }
+ if (!create)
+ return nullptr;
+ if (UNLIKELY(*meta != idx0)) {
+ idx0 = *meta;
+ continue;
+ }
+
+ if (LIKELY(myidx == 0)) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid, save_stack);
+ }
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ return mys;
+ }
+ }
+}
+
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
+ }
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
+ }
+ }
+}
+
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
+}
+
+MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
+ MemoryStats stats;
+ stats.mem_block = block_alloc_.AllocatedMemory();
+ stats.sync_obj = sync_alloc_.AllocatedMemory();
+ return stats;
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_sync.h b/compiler-rt/lib/tsan/rtl-old/tsan_sync.h
new file mode 100644
index 000000000000..fc8fa288a841
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_sync.h
@@ -0,0 +1,153 @@
+//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYNC_H
+#define TSAN_SYNC_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "tsan_defs.h"
+#include "tsan_clock.h"
+#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
+// See documentation there as well.
+enum MutexFlags {
+ MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
+ MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
+ MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
+ MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
+ MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
+ MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
+ MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
+ MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+ MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static
+
+ // The following flags are runtime private.
+ // Mutex API misuse was detected, so don't report any more.
+ MutexFlagBroken = 1 << 30,
+ // We did not intercept pre lock event, so handle it on post lock.
+ MutexFlagDoPreLockOnPostLock = 1 << 29,
+ // Must list all mutex creation flags.
+ MutexCreationFlagMask = MutexFlagLinkerInit |
+ MutexFlagWriteReentrant |
+ MutexFlagReadReentrant |
+ MutexFlagNotStatic,
+};
+
+// SyncVar is a descriptor of a user synchronization object
+// (mutex or an atomic variable).
+struct SyncVar {
+ SyncVar();
+
+ uptr addr; // overwritten by DenseSlabAlloc freelist
+ Mutex mtx;
+ u64 uid; // Globally unique id.
+ StackID creation_stack_id;
+ Tid owner_tid; // Set only by exclusive owners.
+ u64 last_lock;
+ int recursion;
+ atomic_uint32_t flags;
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack);
+ void Reset(Processor *proc);
+
+ u64 GetId() const {
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
+ }
+ bool CheckId(u64 uid) const {
+ CHECK_EQ(uid, GetLsb(uid, 14));
+ return GetLsb(this->uid, 14) == uid;
+ }
+ static uptr SplitId(u64 id, u64 *uid) {
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
+ }
+
+ bool IsFlagSet(u32 f) const {
+ return atomic_load_relaxed(&flags) & f;
+ }
+
+ void SetFlags(u32 f) {
+ atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
+ }
+
+ void UpdateFlags(u32 flagz) {
+ // Filter out operation flags.
+ if (!(flagz & MutexCreationFlagMask))
+ return;
+ u32 current = atomic_load_relaxed(&flags);
+ if (current & MutexCreationFlagMask)
+ return;
+ // Note: this can be called from MutexPostReadLock which holds only read
+ // lock on the SyncVar.
+ atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
+ }
+};
+
+// MetaMap maps app addresses to heap block (MBlock) and sync var (SyncVar)
+// descriptors. It uses 1/2 direct shadow, see tsan_platform.h for the mapping.
+class MetaMap {
+ public:
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
+
+ SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool save_stack) {
+ return GetSync(thr, pc, addr, true, save_stack);
+ }
+ SyncVar *GetSyncIfExists(uptr addr) {
+ return GetSync(nullptr, 0, addr, false, false);
+ }
+
+ void MoveMemory(uptr src, uptr dst, uptr sz);
+
+ void OnProcIdle(Processor *proc);
+
+ struct MemoryStats {
+ uptr mem_block;
+ uptr sync_obj;
+ };
+
+ MemoryStats GetMemoryStats() const;
+
+ private:
+ static const u32 kFlagMask = 3u << 30;
+ static const u32 kFlagBlock = 1u << 30;
+ static const u32 kFlagSync = 2u << 30;
+ typedef DenseSlabAlloc<MBlock, 1 << 18, 1 << 12, kFlagMask> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
+ atomic_uint64_t uid_gen_;
+
+ SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_SYNC_H
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_trace.h b/compiler-rt/lib/tsan/rtl-old/tsan_trace.h
new file mode 100644
index 000000000000..ffc8c991ece0
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_trace.h
@@ -0,0 +1,252 @@
+//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TRACE_H
+#define TSAN_TRACE_H
+
+#include "tsan_defs.h"
+#include "tsan_ilist.h"
+#include "tsan_mutexset.h"
+#include "tsan_stack_trace.h"
+
+namespace __tsan {
+
+const int kTracePartSizeBits = 13;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
+const int kTraceSize = kTracePartSize * kTraceParts;
+
+// Must fit into 3 bits.
+enum EventType {
+ EventTypeMop,
+ EventTypeFuncEnter,
+ EventTypeFuncExit,
+ EventTypeLock,
+ EventTypeUnlock,
+ EventTypeRLock,
+ EventTypeRUnlock
+};
+
+// Represents a thread event (from most significant bit):
+// u64 typ : 3; // EventType.
+// u64 addr : 61; // Associated pc.
+typedef u64 Event;
+
+const uptr kEventPCBits = 61;
+
+struct TraceHeader {
+#if !SANITIZER_GO
+ BufferedStackTrace stack0; // Start stack for the trace.
+#else
+ VarSizeStackTrace stack0;
+#endif
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+
+ TraceHeader() : stack0(), epoch0() {}
+};
+
+struct Trace {
+ Mutex mtx;
+#if !SANITIZER_GO
+ // Must be last to catch overflow as paging fault.
+ // Go shadow stack is dynamically allocated.
+ uptr shadow_stack[kShadowStackSize];
+#endif
+ // Must be the last field, because we unmap the unused part in
+ // CreateThreadContext.
+ TraceHeader headers[kTraceParts];
+
+ Trace() : mtx(MutexTypeTrace) {}
+};
+
+namespace v3 {
+
+enum class EventType : u64 {
+ kAccessExt,
+ kAccessRange,
+ kLock,
+ kRLock,
+ kUnlock,
+ kTime,
+};
+
+// "Base" type for all events for type dispatch.
+struct Event {
+ // We use variable-length type encoding to give more bits to some event
+ // types that need them. If is_access is set, this is EventAccess.
+ // Otherwise, if is_func is set, this is EventFunc.
+ // Otherwise type denotes the type.
+ u64 is_access : 1;
+ u64 is_func : 1;
+ EventType type : 3;
+ u64 _ : 59;
+};
+static_assert(sizeof(Event) == 8, "bad Event size");
+
+// Nop event used as padding and does not affect state during replay.
+static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
+
+// Compressed memory access can represent only some events with PCs
+// close enough to each other. Otherwise we fall back to EventAccessExt.
+struct EventAccess {
+ static constexpr uptr kPCBits = 15;
+ static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
+ "unused bits in EventAccess");
+
+ u64 is_access : 1; // = 1
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
+
+// Function entry (pc != 0) or exit (pc == 0).
+struct EventFunc {
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 1
+ u64 pc : 62;
+};
+static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
+
+// Extended memory access with full PC.
+struct EventAccessExt {
+ // Note: precisely specifying the unused parts of the bitfield is critical for
+ // performance. If we don't specify them, compiler will generate code to load
+ // the old value and shuffle it to extract the unused bits to apply to the new
+ // value. If we specify the unused part and store 0 in there, all that
+ // unnecessary code goes away (store of the 0 const is combined with other
+ // constant parts).
+ static constexpr uptr kUnusedBits = 11;
+ static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
+ "unused bits in EventAccessExt");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessExt
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+ u64 pc;
+};
+static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
+
+// Access to a memory range.
+struct EventAccessRange {
+ static constexpr uptr kSizeLoBits = 13;
+ static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
+ "unused bits in EventAccessRange");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessRange
+ u64 is_read : 1;
+ u64 is_free : 1;
+ u64 size_lo : kSizeLoBits;
+ u64 pc : kCompressedAddrBits;
+ u64 addr : kCompressedAddrBits;
+ u64 size_hi : 64 - kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
+
+// Mutex lock.
+struct EventLock {
+ static constexpr uptr kStackIDLoBits = 15;
+ static constexpr uptr kStackIDHiBits =
+ sizeof(StackID) * kByteBits - kStackIDLoBits;
+ static constexpr uptr kUnusedBits = 3;
+ static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
+ "unused bits in EventLock");
+ static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
+ "unused bits in EventLock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kLock or EventType::kRLock
+ u64 pc : kCompressedAddrBits;
+ u64 stack_lo : kStackIDLoBits;
+ u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventLock) == 16, "bad EventLock size");
+
+// Mutex unlock.
+struct EventUnlock {
+ static constexpr uptr kUnusedBits = 15;
+ static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
+ "unused bits in EventUnlock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kUnlock
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
+
+// Time change event.
+struct EventTime {
+ static constexpr uptr kUnusedBits = 37;
+ static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
+ "unused bits in EventTime");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kTime
+ u64 sid : sizeof(Sid) * kByteBits;
+ u64 epoch : kEpochBits;
+ u64 _ : kUnusedBits;
+};
+static_assert(sizeof(EventTime) == 8, "bad EventTime size");
+
+struct Trace;
+
+struct TraceHeader {
+ Trace* trace = nullptr; // back-pointer to Trace containing this part
+ INode trace_parts; // in Trace::parts
+};
+
+struct TracePart : TraceHeader {
+ // There are a lot of goroutines in Go, so we use smaller parts.
+ static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
+ static constexpr uptr kSize =
+ (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
+ // TraceAcquire does a fast event pointer overflow check by comparing
+ // pointer into TracePart::events with kAlignment mask. Since TracePart's
+ // are allocated page-aligned, this check detects end of the array
+ // (it also have false positives in the middle that are filtered separately).
+ // This also requires events to be the last field.
+ static constexpr uptr kAlignment = 0xff0;
+ Event events[kSize];
+
+ TracePart() {}
+};
+static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
+
+struct Trace {
+ Mutex mtx;
+ IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
+ Event* final_pos =
+ nullptr; // final position in the last part for finished threads
+
+ Trace() : mtx(MutexTypeTrace) {}
+};
+
+} // namespace v3
+
+} // namespace __tsan
+
+#endif // TSAN_TRACE_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word.inc b/compiler-rt/lib/tsan/rtl-old/tsan_update_shadow_word.inc
index a58ef0f17efa..a58ef0f17efa 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word.inc
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_update_shadow_word.inc
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.cpp
new file mode 100644
index 000000000000..278298565d3f
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.cpp
@@ -0,0 +1,126 @@
+//===-- tsan_vector_clock.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_vector_clock.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+#if TSAN_VECTORIZE
+const uptr kVectorClockSize = kThreadSlotCount * sizeof(Epoch) / sizeof(m128);
+#endif
+
+VectorClock::VectorClock() { Reset(); }
+
+void VectorClock::Reset() {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = kEpochZero;
+#else
+ m128 z = _mm_setzero_si128();
+ m128* vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) _mm_store_si128(&vclk[i], z);
+#endif
+}
+
+void VectorClock::Acquire(const VectorClock* src) {
+ if (!src)
+ return;
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = max(clk_[i], src->clk_[i]);
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(src->clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(s, d);
+ _mm_store_si128(&vdst[i], m);
+ }
+#endif
+}
+
+static VectorClock* AllocClock(VectorClock** dstp) {
+ if (UNLIKELY(!*dstp))
+ *dstp = New<VectorClock>();
+ return *dstp;
+}
+
+void VectorClock::Release(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ dst->Acquire(this);
+}
+
+void VectorClock::ReleaseStore(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ *dst = *this;
+}
+
+VectorClock& VectorClock::operator=(const VectorClock& other) {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = other.clk_[i];
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(other.clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ _mm_store_si128(&vdst[i], s);
+ }
+#endif
+ return *this;
+}
+
+void VectorClock::ReleaseStoreAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ Epoch tmp = dst->clk_[i];
+ dst->clk_[i] = clk_[i];
+ clk_[i] = max(clk_[i], tmp);
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 t = _mm_load_si128(&vdst[i]);
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 m = _mm_max_epu16(c, t);
+ _mm_store_si128(&vdst[i], c);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+void VectorClock::ReleaseAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ dst->clk_[i] = max(dst->clk_[i], clk_[i]);
+ clk_[i] = dst->clk_[i];
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(c, d);
+ _mm_store_si128(&vdst[i], m);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.h b/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.h
new file mode 100644
index 000000000000..63b206302190
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_vector_clock.h
@@ -0,0 +1,51 @@
+//===-- tsan_vector_clock.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_VECTOR_CLOCK_H
+#define TSAN_VECTOR_CLOCK_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// Fixed-size vector clock, used both for threads and sync objects.
+class VectorClock {
+ public:
+ VectorClock();
+
+ Epoch Get(Sid sid) const;
+ void Set(Sid sid, Epoch v);
+
+ void Reset();
+ void Acquire(const VectorClock* src);
+ void Release(VectorClock** dstp) const;
+ void ReleaseStore(VectorClock** dstp) const;
+ void ReleaseStoreAcquire(VectorClock** dstp);
+ void ReleaseAcquire(VectorClock** dstp);
+
+ VectorClock& operator=(const VectorClock& other);
+
+ private:
+ Epoch clk_[kThreadSlotCount] VECTOR_ALIGNED;
+};
+
+ALWAYS_INLINE Epoch VectorClock::Get(Sid sid) const {
+ return clk_[static_cast<u8>(sid)];
+}
+
+ALWAYS_INLINE void VectorClock::Set(Sid sid, Epoch v) {
+ DCHECK_GE(v, clk_[static_cast<u8>(sid)]);
+ clk_[static_cast<u8>(sid)] = v;
+}
+
+} // namespace __tsan
+
+#endif // TSAN_VECTOR_CLOCK_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
index 1d3c3849a446..1e61c31c5a97 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
@@ -157,7 +157,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
ReportMutex *mutex = rep->mutexes[idx];
*mutex_id = mutex->id;
*addr = (void *)mutex->addr;
- *destroyed = mutex->destroyed;
+ *destroyed = false;
if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
return 1;
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h
index 4712c2be1813..2e13e0e5486b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_defs.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h
@@ -63,41 +63,14 @@ enum class Epoch : u16 {};
constexpr uptr kEpochBits = 14;
constexpr Epoch kEpochZero = static_cast<Epoch>(0);
constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
+constexpr Epoch kEpochLast = static_cast<Epoch>((1 << kEpochBits) - 1);
-const int kClkBits = 42;
-const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
-
-struct ClockElem {
- u64 epoch : kClkBits;
- u64 reused : 64 - kClkBits; // tid reuse count
-};
-
-struct ClockBlock {
- static const uptr kSize = 512;
- static const uptr kTableSize = kSize / sizeof(u32);
- static const uptr kClockCount = kSize / sizeof(ClockElem);
- static const uptr kRefIdx = kTableSize - 1;
- static const uptr kBlockIdx = kTableSize - 2;
-
- union {
- u32 table[kTableSize];
- ClockElem clock[kClockCount];
- };
+inline Epoch EpochInc(Epoch epoch) {
+ return static_cast<Epoch>(static_cast<u16>(epoch) + 1);
+}
- ClockBlock() {
- }
-};
+inline bool EpochOverflow(Epoch epoch) { return epoch == kEpochOver; }
-const int kTidBits = 13;
-// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
-// occupied by reference counter, so total number of elements we can store
-// in SyncClock is kClockCount * (kTableSize - 1).
-const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
-#if !SANITIZER_GO
-const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
-#else
-const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
-#endif
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
@@ -107,7 +80,7 @@ const uptr kShadowCnt = 4;
const uptr kShadowCell = 8;
// Single shadow value.
-typedef u64 RawShadow;
+enum class RawShadow : u32 {};
const uptr kShadowSize = sizeof(RawShadow);
// Shadow memory is kShadowMultiplier times larger than user memory.
@@ -184,10 +157,13 @@ MD5Hash md5_hash(const void *data, uptr size);
struct Processor;
struct ThreadState;
class ThreadContext;
+struct TidSlot;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
+struct Trace;
+struct TracePart;
typedef uptr AccessType;
@@ -198,6 +174,8 @@ enum : AccessType {
kAccessVptr = 1 << 2, // read or write of an object virtual table pointer
kAccessFree = 1 << 3, // synthetic memory access during memory freeing
kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set
+ kAccessCheckOnly = 1 << 5, // check for races, but don't store
+ kAccessNoRodata = 1 << 6, // don't check for .rodata marker
};
// Descriptor of user's memory block.
@@ -219,9 +197,8 @@ enum ExternalTag : uptr {
// as 16-bit values, see tsan_defs.h.
};
-enum MutexType {
- MutexTypeTrace = MutexLastCommon,
- MutexTypeReport,
+enum {
+ MutexTypeReport = MutexLastCommon,
MutexTypeSyncVar,
MutexTypeAnnotations,
MutexTypeAtExit,
@@ -229,6 +206,9 @@ enum MutexType {
MutexTypeRacy,
MutexTypeGlobalProc,
MutexTypeInternalAlloc,
+ MutexTypeTrace,
+ MutexTypeSlot,
+ MutexTypeSlots,
};
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
index 9e15f74a0615..7a39a39d51de 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
@@ -104,6 +104,15 @@ class DenseSlabAlloc {
return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
}
+ template <typename Func>
+ void ForEach(Func func) {
+ SpinMutexLock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ for (uptr l1 = 0; l1 < fillpos; l1++) {
+ for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
+ }
+ }
+
private:
T *map_[kL1Size];
SpinMutex mtx_;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_fd.cpp b/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
index 255ffa8daf76..9a6400c2e9f9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
@@ -11,9 +11,12 @@
//===----------------------------------------------------------------------===//
#include "tsan_fd.h"
-#include "tsan_rtl.h"
+
#include <sanitizer_common/sanitizer_atomic.h>
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
namespace __tsan {
const int kTableSizeL1 = 1024;
@@ -192,19 +195,21 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- if (write) {
- // To catch races between fd usage and close.
- MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
- } else {
- // This path is used only by dup2/dup3 calls.
- // We do read instead of write because there is a number of legitimate
- // cases where write would lead to false positives:
- // 1. Some software dups a closed pipe in place of a socket before closing
- // the socket (to prevent races actually).
- // 2. Some daemons dup /dev/null in place of stdin/stdout.
- // On the other hand we have not seen cases when write here catches real
- // bugs.
- MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ if (!MustIgnoreInterceptor(thr)) {
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
+ }
}
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.cpp b/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
index ee89862d17bd..54bed9f9a6be 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
@@ -110,12 +110,6 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
if (common_flags()->help) parser.PrintFlagDescriptions();
- if (f->history_size < 0 || f->history_size > 7) {
- Printf("ThreadSanitizer: incorrect value for history_size"
- " (must be [0..7])\n");
- Die();
- }
-
if (f->io_sync < 0 || f->io_sync > 2) {
Printf("ThreadSanitizer: incorrect value for io_sync"
" (must be [0..2])\n");
diff --git a/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 7954a4307fa1..b1691452d022 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -43,6 +43,9 @@ TSAN_FLAG(
bool, force_seq_cst_atomics, false,
"If set, all atomics are effectively sequentially consistent (seq_cst), "
"regardless of what user actually specified.")
+TSAN_FLAG(bool, force_background_thread, false,
+ "If set, eagerly launch a background thread for memory reclamation "
+ "instead of waiting for a user call to pthread_create.")
TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
TSAN_FLAG(int, atexit_sleep_ms, 1000,
"Sleep in main thread before exiting for that many ms "
@@ -59,14 +62,10 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
-// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, SANITIZER_GO ? 1 : 3,
- "Per-thread history size, controls how many previous memory accesses "
- "are remembered per thread. Possible values are [0..7]. "
- "history_size=0 amounts to 32K memory accesses. Each next value doubles "
- "the amount of memory accesses, up to history_size=7 that amounts to "
- "4M memory accesses. The default value is 2 (128K memory accesses).")
+ uptr, history_size, 0,
+ "Per-thread history size,"
+ " controls how many extra previous memory accesses are remembered per thread.")
TSAN_FLAG(int, io_sync, 1,
"Controls level of synchronization implied by IO operations. "
"0 - no synchronization "
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
index 61dbb81ffec4..88a54b554421 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
@@ -36,6 +36,10 @@ inline bool in_symbolizer() {
}
#endif
+inline bool MustIgnoreInterceptor(ThreadState *thr) {
+ return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib;
+}
+
} // namespace __tsan
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
@@ -60,10 +64,10 @@ inline bool in_symbolizer() {
# define CHECK_REAL_FUNC(func) DCHECK(REAL(func))
#endif
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
- SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- CHECK_REAL_FUNC(func); \
- if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ CHECK_REAL_FUNC(func); \
+ if (MustIgnoreInterceptor(thr)) \
return REAL(func)(__VA_ARGS__);
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index cf3dc90d96a1..c4f43d8171ab 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -1681,11 +1681,10 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
- SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
+ FdClose(thr, pc, fd);
fd = REAL(signalfd)(fd, mask, flags);
- if (fd >= 0)
+ if (!MustIgnoreInterceptor(thr))
FdSignalCreate(thr, pc, fd);
return fd;
}
@@ -1762,17 +1761,15 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
}
TSAN_INTERCEPTOR(int, close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(close, fd);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(close, fd);
+ FdClose(thr, pc, fd);
return REAL(close)(fd);
}
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(__close, fd);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(__close, fd);
+ FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
@@ -1783,13 +1780,10 @@ TSAN_INTERCEPTOR(int, __close, int fd) {
// glibc guts
#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
- SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
int fds[64];
int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
- for (int i = 0; i < cnt; i++) {
- if (fds[i] > 0)
- FdClose(thr, pc, fds[i]);
- }
+ for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
REAL(__res_iclose)(state, free_addr);
}
#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
@@ -1870,7 +1864,7 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
}
TSAN_INTERCEPTOR(int, closedir, void *dirp) {
- SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ SCOPED_INTERCEPTOR_RAW(closedir, dirp);
if (dirp) {
int fd = dirfd(dirp);
FdClose(thr, pc, fd);
@@ -1981,6 +1975,7 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) {
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
int sig, __sanitizer_siginfo *info,
void *uctx) {
+ CHECK(thr->slot);
__sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
@@ -2268,7 +2263,7 @@ struct dl_iterate_phdr_data {
};
static bool IsAppNotRodata(uptr addr) {
- return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata;
+ return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
}
static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
@@ -2374,7 +2369,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
if (file) { \
int fd = fileno_unlocked(file); \
- if (fd >= 0) FdClose(thr, pc, fd); \
+ FdClose(thr, pc, fd); \
}
#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
@@ -2581,7 +2576,7 @@ static USED void syscall_release(uptr pc, uptr addr) {
}
static void syscall_fd_close(uptr pc, int fd) {
- TSAN_SYSCALL();
+ auto *thr = cur_thread();
FdClose(thr, pc, fd);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
index 048715185151..e6c4bf2e60a7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
@@ -26,20 +26,6 @@ void __tsan_flush_memory() {
FlushShadowMemory();
}
-void __tsan_read16(void *addr) {
- uptr pc = CALLERPC;
- ThreadState *thr = cur_thread();
- MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
- MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
-}
-
-void __tsan_write16(void *addr) {
- uptr pc = CALLERPC;
- ThreadState *thr = cur_thread();
- MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
- MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
-}
-
void __tsan_read16_pc(void *addr, void *pc) {
uptr pc_no_pac = STRIP_PAC_PC(pc);
ThreadState *thr = cur_thread();
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.inc b/compiler-rt/lib/tsan/rtl/tsan_interface.inc
index 0031800e851f..b0a424ff9c25 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.inc
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.inc
@@ -34,6 +34,10 @@ void __tsan_read8(void *addr) {
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
}
+void __tsan_read16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessRead);
+}
+
void __tsan_write1(void *addr) {
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
}
@@ -50,6 +54,10 @@ void __tsan_write8(void *addr) {
MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
}
+void __tsan_write16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessWrite);
+}
+
void __tsan_read1_pc(void *addr, void *pc) {
MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 24ba3bb1f65d..f794a2fcdd0d 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -235,8 +235,9 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
T v = NoTsanAtomicLoad(a, mo);
SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
if (s) {
- ReadLock l(&s->mtx);
- AcquireImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
// Re-read under sync mutex because we need a consistent snapshot
// of the value and the clock we acquire.
v = NoTsanAtomicLoad(a, mo);
@@ -270,14 +271,14 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
NoTsanAtomicStore(a, v, mo);
return;
}
- __sync_synchronize();
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
- NoTsanAtomicStore(a, v, mo);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ NoTsanAtomicStore(a, v, mo);
+ }
+ IncrementEpoch(thr);
}
template <typename T, T (*F)(volatile T *v, T op)>
@@ -285,18 +286,21 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
if (LIKELY(mo == mo_relaxed))
return F(a, v);
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- if (IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
- else if (IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
- else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
- return F(a, v);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, IsReleaseOrder(mo));
+ if (IsAcqRelOrder(mo))
+ thr->clock.ReleaseAcquire(&s->clock);
+ else if (IsReleaseOrder(mo))
+ thr->clock.Release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.Acquire(s->clock);
+ v = F(a, v);
+ }
+ if (IsReleaseOrder(mo))
+ IncrementEpoch(thr);
+ return v;
}
template<typename T>
@@ -416,27 +420,28 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
*c = pr;
return false;
}
-
+ SlotLocker locker(thr);
bool release = IsReleaseOrder(mo);
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
- RWLock l(&s->mtx, release);
- T cc = *c;
- T pr = func_cas(a, cc, v);
- bool success = pr == cc;
- if (!success) {
- *c = pr;
- mo = fmo;
+ bool success;
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, release);
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
+ if (success && IsAcqRelOrder(mo))
+ thr->clock.ReleaseAcquire(&s->clock);
+ else if (success && IsReleaseOrder(mo))
+ thr->clock.Release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.Acquire(s->clock);
}
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
- if (success && IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
- else if (success && IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
- else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
+ if (success && release)
+ IncrementEpoch(thr);
return success;
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
index c090c1f08cbe..7c15a1638826 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
@@ -106,7 +106,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
DCHECK_GE(ptr, jctx->heap_begin);
DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- ctx->metamap.FreeRange(thr->proc(), ptr, size);
+ ctx->metamap.FreeRange(thr->proc(), ptr, size, false);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
@@ -133,7 +133,7 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
// support that anymore as it contains addresses of accesses.
RawShadow *d = MemToShadow(dst);
RawShadow *dend = MemToShadow(dst + size);
- internal_memset(d, 0, (dend - d) * sizeof(*d));
+ ShadowSet(d, dend, Shadow::kEmpty);
}
jptr __tsan_java_find(jptr *from_ptr, jptr to) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index a31bebcb6ba9..7a72efb12263 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -125,7 +125,6 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
}
void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
- global_proc()->mtx.Lock();
global_proc()->internal_alloc_mtx.Lock();
InternalAllocatorLock();
}
@@ -133,6 +132,13 @@ void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
InternalAllocatorUnlock();
global_proc()->internal_alloc_mtx.Unlock();
+}
+
+void GlobalProcessorLock() NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Lock();
+}
+
+void GlobalProcessorUnlock() NO_THREAD_SAFETY_ANALYSIS {
global_proc()->mtx.Unlock();
}
@@ -192,6 +198,12 @@ void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
GET_STACK_TRACE_FATAL(thr, pc);
ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportRssLimitExceeded(&stack);
+ }
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
@@ -245,8 +257,17 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
+ // Note: this can run before thread initialization/after finalization.
+ // As a result this is not necessarily synchronized with DoReset,
+ // which iterates over and resets all sync objects,
+ // but it is fine to create new MBlocks in this context.
ctx->metamap.AllocBlock(thr, pc, p, sz);
- if (write && thr->ignore_reads_and_writes == 0 && thr->is_inited)
+ // If this runs before thread initialization/after finalization
+ // and we don't have trace initialized, we can't imitate writes.
+ // In such case just reset the shadow range, it is fine since
+ // it affects only a small fraction of special objects.
+ if (write && thr->ignore_reads_and_writes == 0 &&
+ atomic_load_relaxed(&thr->trace_pos))
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
MemoryResetRange(thr, pc, (uptr)p, sz);
@@ -254,9 +275,16 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
+ if (!thr->slot) {
+ // Very early/late in thread lifetime, or during fork.
+ UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
+ DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
+ return;
+ }
+ SlotLocker locker(thr);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
- if (write && thr->ignore_reads_and_writes == 0 && thr->is_inited)
+ if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
@@ -336,7 +364,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
}
uptr user_alloc_usable_size(const void *p) {
- if (p == 0)
+ if (p == 0 || !IsAppMem((uptr)p))
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
if (!b)
@@ -421,8 +449,6 @@ uptr __sanitizer_get_allocated_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- thr->clock.ResetCached(&thr->proc()->clock_cache);
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
allocator()->SwallowCache(&thr->proc()->alloc_cache);
internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
ctx->metamap.OnProcIdle(thr->proc());
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.h b/compiler-rt/lib/tsan/rtl/tsan_mman.h
index db8488eabbe2..2095f28c0253 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.h
@@ -26,6 +26,8 @@ void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
void AllocatorLock();
void AllocatorUnlock();
+void GlobalProcessorLock();
+void GlobalProcessorUnlock();
// For user allocations.
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp b/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
index 735179686ba9..3a75b80ac30f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
@@ -19,57 +19,7 @@ namespace __tsan {
MutexSet::MutexSet() {
}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {
- // Look up existing mutex with the same id.
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- descs_[i].count++;
- descs_[i].epoch = epoch;
- return;
- }
- }
- // On overflow, find the oldest mutex and drop it.
- if (size_ == kMaxSize) {
- u64 minepoch = (u64)-1;
- u64 mini = (u64)-1;
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].epoch < minepoch) {
- minepoch = descs_[i].epoch;
- mini = i;
- }
- }
- RemovePos(mini);
- CHECK_EQ(size_, kMaxSize - 1);
- }
- // Add new mutex descriptor.
- descs_[size_].addr = 0;
- descs_[size_].stack_id = kInvalidStackID;
- descs_[size_].id = id;
- descs_[size_].write = write;
- descs_[size_].epoch = epoch;
- descs_[size_].seq = seq_++;
- descs_[size_].count = 1;
- size_++;
-}
-
-void MutexSet::Del(u64 id, bool write) {
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- if (--descs_[i].count == 0)
- RemovePos(i);
- return;
- }
- }
-}
-
-void MutexSet::Remove(u64 id) {
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- RemovePos(i);
- return;
- }
- }
-}
+void MutexSet::Reset() { internal_memset(this, 0, sizeof(*this)); }
void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
// Look up existing mutex with the same id.
@@ -93,9 +43,7 @@ void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
// Add new mutex descriptor.
descs_[size_].addr = addr;
descs_[size_].stack_id = stack_id;
- descs_[size_].id = 0;
descs_[size_].write = write;
- descs_[size_].epoch = 0;
descs_[size_].seq = seq_++;
descs_[size_].count = 1;
size_++;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutexset.h b/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
index 93776a664135..aabd361e6afd 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
@@ -25,8 +25,6 @@ class MutexSet {
struct Desc {
uptr addr;
StackID stack_id;
- u64 id;
- u64 epoch;
u32 seq;
u32 count;
bool write;
@@ -40,10 +38,7 @@ class MutexSet {
};
MutexSet();
- // The 'id' is obtained from SyncVar::GetId().
- void Add(u64 id, bool write, u64 epoch);
- void Del(u64 id, bool write);
- void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ void Reset();
void AddAddr(uptr addr, StackID stack_id, bool write);
void DelAddr(uptr addr, bool destroy = false);
uptr Size() const;
@@ -82,9 +77,7 @@ class DynamicMutexSet {
// in different goroutine).
#if SANITIZER_GO
MutexSet::MutexSet() {}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {}
-void MutexSet::Del(u64 id, bool write) {}
-void MutexSet::Remove(u64 id) {}
+void MutexSet::Reset() {}
void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {}
void MutexSet::DelAddr(uptr addr, bool destroy) {}
uptr MutexSet::Size() const { return 0; }
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 7ff0acace8f6..233bf0a39df0 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -18,8 +18,8 @@
# error "Only 64-bit is supported"
#endif
+#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"
-#include "tsan_trace.h"
namespace __tsan {
@@ -40,14 +40,12 @@ enum {
C/C++ on linux/x86_64 and freebsd/x86_64
0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
0040 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 2000 0000 0000: shadow
-2000 0000 0000 - 3000 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 5500 0000 0000: -
5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
-5680 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
+5680 0000 0000 - 7d00 0000 0000: -
7b00 0000 0000 - 7c00 0000 0000: heap
7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
@@ -67,10 +65,8 @@ C/C++ on netbsd/amd64 can reuse the same mapping:
struct Mapping48AddressSpace {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
static const uptr kHeapMemBeg = 0x7b0000000000ull;
static const uptr kHeapMemEnd = 0x7c0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
@@ -89,14 +85,13 @@ struct Mapping48AddressSpace {
C/C++ on linux/mips64 (40-bit VMA)
0000 0000 00 - 0100 0000 00: - (4 GB)
0100 0000 00 - 0200 0000 00: main binary (4 GB)
-0200 0000 00 - 2000 0000 00: - (120 GB)
-2000 0000 00 - 4000 0000 00: shadow (128 GB)
+0200 0000 00 - 1200 0000 00: - (64 GB)
+1200 0000 00 - 2200 0000 00: shadow (64 GB)
+2200 0000 00 - 4000 0000 00: - (120 GB)
4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
5000 0000 00 - aa00 0000 00: - (360 GB)
aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
-ab00 0000 00 - b000 0000 00: - (20 GB)
-b000 0000 00 - b200 0000 00: traces (8 GB)
-b200 0000 00 - fe00 0000 00: - (304 GB)
+ab00 0000 00 - fe00 0000 00: - (332 GB)
fe00 0000 00 - ff00 0000 00: heap (4 GB)
ff00 0000 00 - ff80 0000 00: - (2 GB)
ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
@@ -104,10 +99,8 @@ ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
struct MappingMips64_40 {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
- static const uptr kTraceMemBeg = 0xb000000000ull;
- static const uptr kTraceMemEnd = 0xb200000000ull;
- static const uptr kShadowBeg = 0x2000000000ull;
- static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kShadowBeg = 0x1200000000ull;
+ static const uptr kShadowEnd = 0x2200000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
@@ -128,12 +121,10 @@ C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
0200 0000 00 - 0300 0000 00: heap (4 GB)
0300 0000 00 - 0400 0000 00: - (4 GB)
-0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
-0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0400 0000 00 - 0800 0000 00: shadow memory (16 GB)
+0800 0000 00 - 0d00 0000 00: - (20 GB)
0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
-0e00 0000 00 - 0f00 0000 00: - (4 GB)
-0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
-0fc0 0000 00 - 1000 0000 00: -
+0e00 0000 00 - 1000 0000 00: -
*/
struct MappingAppleAarch64 {
static const uptr kLoAppMemBeg = 0x0100000000ull;
@@ -141,16 +132,14 @@ struct MappingAppleAarch64 {
static const uptr kHeapMemBeg = 0x0200000000ull;
static const uptr kHeapMemEnd = 0x0300000000ull;
static const uptr kShadowBeg = 0x0400000000ull;
- static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kShadowEnd = 0x0800000000ull;
static const uptr kMetaShadowBeg = 0x0d00000000ull;
static const uptr kMetaShadowEnd = 0x0e00000000ull;
- static const uptr kTraceMemBeg = 0x0f00000000ull;
- static const uptr kTraceMemEnd = 0x0fc0000000ull;
static const uptr kHiAppMemBeg = 0x0fc0000000ull;
static const uptr kHiAppMemEnd = 0x0fc0000000ull;
static const uptr kShadowMsk = 0x0ull;
static const uptr kShadowXor = 0x0ull;
- static const uptr kShadowAdd = 0x0ull;
+ static const uptr kShadowAdd = 0x0200000000ull;
static const uptr kVdsoBeg = 0x7000000000000000ull;
static const uptr kMidAppMemBeg = 0;
static const uptr kMidAppMemEnd = 0;
@@ -159,29 +148,25 @@ struct MappingAppleAarch64 {
/*
C/C++ on linux/aarch64 (39-bit VMA)
0000 0010 00 - 0100 0000 00: main binary
-0100 0000 00 - 0800 0000 00: -
-0800 0000 00 - 2000 0000 00: shadow memory
+0100 0000 00 - 0400 0000 00: -
+0400 0000 00 - 1000 0000 00: shadow memory
2000 0000 00 - 3100 0000 00: -
3100 0000 00 - 3400 0000 00: metainfo
3400 0000 00 - 5500 0000 00: -
5500 0000 00 - 5600 0000 00: main binary (PIE)
-5600 0000 00 - 6000 0000 00: -
-6000 0000 00 - 6200 0000 00: traces
-6200 0000 00 - 7d00 0000 00: -
+5600 0000 00 - 7c00 0000 00: -
7c00 0000 00 - 7d00 0000 00: heap
7d00 0000 00 - 7fff ffff ff: modules and main thread stack
*/
struct MappingAarch64_39 {
static const uptr kLoAppMemBeg = 0x0000001000ull;
static const uptr kLoAppMemEnd = 0x0100000000ull;
- static const uptr kShadowBeg = 0x0800000000ull;
- static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kShadowBeg = 0x0400000000ull;
+ static const uptr kShadowEnd = 0x1000000000ull;
static const uptr kMetaShadowBeg = 0x3100000000ull;
static const uptr kMetaShadowEnd = 0x3400000000ull;
static const uptr kMidAppMemBeg = 0x5500000000ull;
- static const uptr kMidAppMemEnd = 0x5600000000ull;
- static const uptr kTraceMemBeg = 0x6000000000ull;
- static const uptr kTraceMemEnd = 0x6200000000ull;
+ static const uptr kMidAppMemEnd = 0x5600000000ull;
static const uptr kHeapMemBeg = 0x7c00000000ull;
static const uptr kHeapMemEnd = 0x7d00000000ull;
static const uptr kHiAppMemBeg = 0x7e00000000ull;
@@ -195,15 +180,13 @@ struct MappingAarch64_39 {
/*
C/C++ on linux/aarch64 (42-bit VMA)
00000 0010 00 - 01000 0000 00: main binary
-01000 0000 00 - 10000 0000 00: -
-10000 0000 00 - 20000 0000 00: shadow memory
-20000 0000 00 - 26000 0000 00: -
+01000 0000 00 - 08000 0000 00: -
+08000 0000 00 - 10000 0000 00: shadow memory
+10000 0000 00 - 26000 0000 00: -
26000 0000 00 - 28000 0000 00: metainfo
28000 0000 00 - 2aa00 0000 00: -
2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
-2ab00 0000 00 - 36200 0000 00: -
-36200 0000 00 - 36240 0000 00: traces
-36240 0000 00 - 3e000 0000 00: -
+2ab00 0000 00 - 3e000 0000 00: -
3e000 0000 00 - 3f000 0000 00: heap
3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
*/
@@ -211,14 +194,12 @@ struct MappingAarch64_42 {
static const uptr kBroken = kBrokenReverseMapping;
static const uptr kLoAppMemBeg = 0x00000001000ull;
static const uptr kLoAppMemEnd = 0x01000000000ull;
- static const uptr kShadowBeg = 0x10000000000ull;
- static const uptr kShadowEnd = 0x20000000000ull;
+ static const uptr kShadowBeg = 0x08000000000ull;
+ static const uptr kShadowEnd = 0x10000000000ull;
static const uptr kMetaShadowBeg = 0x26000000000ull;
static const uptr kMetaShadowEnd = 0x28000000000ull;
static const uptr kMidAppMemBeg = 0x2aa00000000ull;
- static const uptr kMidAppMemEnd = 0x2ab00000000ull;
- static const uptr kTraceMemBeg = 0x36200000000ull;
- static const uptr kTraceMemEnd = 0x36400000000ull;
+ static const uptr kMidAppMemEnd = 0x2ab00000000ull;
static const uptr kHeapMemBeg = 0x3e000000000ull;
static const uptr kHeapMemEnd = 0x3f000000000ull;
static const uptr kHiAppMemBeg = 0x3f000000000ull;
@@ -232,14 +213,12 @@ struct MappingAarch64_42 {
struct MappingAarch64_48 {
static const uptr kLoAppMemBeg = 0x0000000001000ull;
static const uptr kLoAppMemEnd = 0x0000200000000ull;
- static const uptr kShadowBeg = 0x0002000000000ull;
- static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kShadowBeg = 0x0001000000000ull;
+ static const uptr kShadowEnd = 0x0002000000000ull;
static const uptr kMetaShadowBeg = 0x0005000000000ull;
static const uptr kMetaShadowEnd = 0x0006000000000ull;
static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
- static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
- static const uptr kTraceMemBeg = 0x0f06000000000ull;
- static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
static const uptr kHeapMemBeg = 0x0ffff00000000ull;
static const uptr kHeapMemEnd = 0x0ffff00000000ull;
static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
@@ -257,9 +236,7 @@ C/C++ on linux/powerpc64 (44-bit VMA)
0001 0000 0000 - 0b00 0000 0000: shadow
0b00 0000 0000 - 0b00 0000 0000: -
0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
-0d00 0000 0000 - 0d00 0000 0000: -
-0d00 0000 0000 - 0f00 0000 0000: traces
-0f00 0000 0000 - 0f00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: -
0f00 0000 0000 - 0f50 0000 0000: heap
0f50 0000 0000 - 0f60 0000 0000: -
0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
@@ -269,8 +246,6 @@ struct MappingPPC64_44 {
kBrokenMapping | kBrokenReverseMapping | kBrokenLinearity;
static const uptr kMetaShadowBeg = 0x0b0000000000ull;
static const uptr kMetaShadowEnd = 0x0d0000000000ull;
- static const uptr kTraceMemBeg = 0x0d0000000000ull;
- static const uptr kTraceMemEnd = 0x0f0000000000ull;
static const uptr kShadowBeg = 0x000100000000ull;
static const uptr kShadowEnd = 0x0b0000000000ull;
static const uptr kLoAppMemBeg = 0x000000000100ull;
@@ -291,23 +266,19 @@ struct MappingPPC64_44 {
C/C++ on linux/powerpc64 (46-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 3d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 3d00 0000 0000: -
3d00 0000 0000 - 3e00 0000 0000: heap
3e00 0000 0000 - 3e80 0000 0000: -
3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
*/
struct MappingPPC64_46 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x3d0000000000ull;
static const uptr kHeapMemEnd = 0x3e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
@@ -326,23 +297,19 @@ struct MappingPPC64_46 {
C/C++ on linux/powerpc64 (47-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 7d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
struct MappingPPC64_47 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x7d0000000000ull;
static const uptr kHeapMemEnd = 0x7e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
@@ -362,22 +329,18 @@ C/C++ on linux/s390x
While the kernel provides a 64-bit address space, we have to restrict ourselves
to 48 bits due to how e.g. SyncVar::GetId() works.
0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB
-0e00 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+0e00 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 4000 0000 0000: shadow - 32TiB (2 * app)
+4000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
-b000 0000 0000 - be00 0000 0000: -
+9800 0000 0000 - be00 0000 0000: -
be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
*/
struct MappingS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
- static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x400000000000ull;
static const uptr kHeapMemBeg = 0xbe0000000000ull;
static const uptr kHeapMemEnd = 0xc00000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
@@ -397,21 +360,17 @@ struct MappingS390x {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 3000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: -
*/
struct MappingGo48 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -431,8 +390,8 @@ struct MappingGo48 {
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0500 0000 0000: shadow
-0500 0000 0000 - 0700 0000 0000: traces
+0100 0000 0000 - 0300 0000 0000: shadow
+0300 0000 0000 - 0700 0000 0000: -
0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
*/
@@ -440,10 +399,8 @@ struct MappingGo48 {
struct MappingGoWindows {
static const uptr kMetaShadowBeg = 0x070000000000ull;
static const uptr kMetaShadowEnd = 0x077000000000ull;
- static const uptr kTraceMemBeg = 0x050000000000ull;
- static const uptr kTraceMemEnd = 0x070000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x050000000000ull;
+ static const uptr kShadowEnd = 0x030000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -463,21 +420,17 @@ struct MappingGoWindows {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 2400 0000 0000: -
-2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
-3400 0000 0000 - 3600 0000 0000: -
-3600 0000 0000 - 3800 0000 0000: traces
-3800 0000 0000 - 4000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 2470 0000 0000: metainfo (memory blocks and sync objects)
+2470 0000 0000 - 4000 0000 0000: -
*/
struct MappingGoPPC64_46 {
static const uptr kMetaShadowBeg = 0x240000000000ull;
- static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x360000000000ull;
- static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kMetaShadowEnd = 0x247000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -497,21 +450,17 @@ struct MappingGoPPC64_46 {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
struct MappingGoPPC64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -531,20 +480,16 @@ struct MappingGoPPC64_47 {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
struct MappingGoAarch64 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -565,20 +510,16 @@ Go on linux/mips64 (47-bit VMA)
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
struct MappingGoMips64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x00e000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -597,19 +538,15 @@ struct MappingGoMips64_47 {
Go on linux/s390x
0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
1000 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+4000 0000 0000 - 6000 0000 0000: shadow - 64TiB (4 * app)
+6000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
*/
struct MappingGoS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kShadowEnd = 0x600000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x100000000000ull;
static const uptr kMidAppMemBeg = 0;
@@ -648,11 +585,11 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) {
return Func::template Apply<MappingGo48>(arg);
# endif
#else // SANITIZER_GO
-# if defined(__x86_64__) || SANITIZER_IOSSIM || SANITIZER_MAC && !SANITIZER_IOS
- return Func::template Apply<Mapping48AddressSpace>(arg);
-# elif defined(__aarch64__) && defined(__APPLE__)
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
return Func::template Apply<MappingAppleAarch64>(arg);
-# elif defined(__aarch64__) && !defined(__APPLE__)
+# elif defined(__x86_64__) || SANITIZER_MAC
+ return Func::template Apply<Mapping48AddressSpace>(arg);
+# elif defined(__aarch64__)
switch (vmaSize) {
case 39:
return Func::template Apply<MappingAarch64_39>(arg);
@@ -715,8 +652,6 @@ enum MappingType {
kShadowEnd,
kMetaShadowBeg,
kMetaShadowEnd,
- kTraceMemBeg,
- kTraceMemEnd,
kVdsoBeg,
};
@@ -750,10 +685,6 @@ struct MappingField {
return Mapping::kMetaShadowBeg;
case kMetaShadowEnd:
return Mapping::kMetaShadowEnd;
- case kTraceMemBeg:
- return Mapping::kTraceMemBeg;
- case kTraceMemEnd:
- return Mapping::kTraceMemEnd;
}
Die();
}
@@ -792,11 +723,6 @@ uptr MetaShadowBeg(void) { return SelectMapping<MappingField>(kMetaShadowBeg); }
ALWAYS_INLINE
uptr MetaShadowEnd(void) { return SelectMapping<MappingField>(kMetaShadowEnd); }
-ALWAYS_INLINE
-uptr TraceMemBeg(void) { return SelectMapping<MappingField>(kTraceMemBeg); }
-ALWAYS_INLINE
-uptr TraceMemEnd(void) { return SelectMapping<MappingField>(kTraceMemEnd); }
-
struct IsAppMemImpl {
template <typename Mapping>
static bool Apply(uptr mem) {
@@ -934,43 +860,10 @@ inline uptr RestoreAddr(uptr addr) {
return SelectMapping<RestoreAddrImpl>(addr);
}
-// The additional page is to catch shadow stack overflow as paging fault.
-// Windows wants 64K alignment for mmaps.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
- + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
-
-struct GetThreadTraceImpl {
- template <typename Mapping>
- static uptr Apply(uptr tid) {
- uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize;
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
- }
-};
-
-ALWAYS_INLINE
-uptr GetThreadTrace(int tid) { return SelectMapping<GetThreadTraceImpl>(tid); }
-
-struct GetThreadTraceHeaderImpl {
- template <typename Mapping>
- static uptr Apply(uptr tid) {
- uptr p = Mapping::kTraceMemBeg + tid * kTotalTraceSize +
- kTraceSize * sizeof(Event);
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
- }
-};
-
-ALWAYS_INLINE
-uptr GetThreadTraceHeader(int tid) {
- return SelectMapping<GetThreadTraceHeaderImpl>(tid);
-}
-
void InitializePlatform();
void InitializePlatformEarly();
void CheckAndProtect();
void InitializeShadowMemoryPlatform();
-void FlushShadowMemory();
void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index 73ec14892d28..17dbdff8a539 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -94,7 +94,6 @@ enum {
MemMeta,
MemFile,
MemMmap,
- MemTrace,
MemHeap,
MemOther,
MemCount,
@@ -112,8 +111,6 @@ void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) {
mem[file ? MemFile : MemMmap] += rss;
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
- else if (p >= TraceMemBeg() && p < TraceMemEnd())
- mem[MemTrace] += rss;
else
mem[MemOther] += rss;
}
@@ -126,42 +123,33 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
StackDepotStats stacks = StackDepotGetStats();
uptr nthread, nlive;
ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ uptr trace_mem;
+ {
+ Lock l(&ctx->slot_mtx);
+ trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart);
+ }
uptr internal_stats[AllocatorStatCount];
internal_allocator()->GetStats(internal_stats);
// All these are allocated from the common mmap region.
- mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated +
- internal_stats[AllocatorStatMapped];
+ mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem +
+ stacks.allocated + internal_stats[AllocatorStatMapped];
if (s64(mem[MemMmap]) < 0)
mem[MemMmap] = 0;
internal_snprintf(
buf, buf_size,
- "%llus: RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
- " trace:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
- " stacks=%zd[%zd] nthr=%zd/%zd\n",
- uptime_ns / (1000 * 1000 * 1000), mem[MemTotal] >> 20,
- mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
- mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
+ "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd"
+ " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
+ " trace:%zu stacks=%zd threads=%zu/%zu\n",
+ internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch,
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20,
mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
- meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20,
- stacks.n_uniq_ids, nlive, nthread);
-}
-
-# if SANITIZER_LINUX
-void FlushShadowMemoryCallback(
- const SuspendedThreadsList &suspended_threads_list,
- void *argument) {
- ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
-}
-#endif
-
-void FlushShadowMemory() {
-#if SANITIZER_LINUX
- StopTheWorld(FlushShadowMemoryCallback, 0);
-#endif
+ meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20,
+ stacks.allocated >> 20, nlive, nthread);
}
#if !SANITIZER_GO
-// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Mark shadow for .rodata sections with the special Shadow::kRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
// First create temp file.
@@ -182,13 +170,13 @@ static void MapRodata() {
return;
internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
- // Fill the file with kShadowRodata.
+ // Fill the file with Shadow::kRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
InternalMmapVector<RawShadow> marker(kMarkerSize);
// volatile to prevent insertion of memset
for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
p++)
- *p = kShadowRodata;
+ *p = Shadow::kRodata;
internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
// Map the file into memory.
uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index 1465f9953c19..44b98d46cfbc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -126,9 +126,6 @@ void cur_thread_finalize() {
}
#endif
-void FlushShadowMemory() {
-}
-
static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
vm_address_t address = start;
vm_address_t end_address = end;
@@ -156,12 +153,10 @@ static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
uptr shadow_res, shadow_dirty;
uptr meta_res, meta_dirty;
- uptr trace_res, trace_dirty;
RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
- RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
-#if !SANITIZER_GO
+# if !SANITIZER_GO
uptr low_res, low_dirty;
uptr high_res, high_dirty;
uptr heap_res, heap_dirty;
@@ -180,7 +175,6 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
buf, buf_size,
"shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
# if !SANITIZER_GO
"low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
@@ -193,7 +187,6 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
"------------------------------\n",
ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
- TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
# if !SANITIZER_GO
LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
index 763ac444377e..71874aad8dc5 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
@@ -110,27 +110,23 @@ void CheckAndProtect() {
Die();
}
-# if defined(__aarch64__) && defined(__APPLE__) && SANITIZER_IOS
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#else
+ ProtectRange(MetaShadowEnd(), HiAppMemBeg());
+# else
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
if (MidAppMemBeg()) {
ProtectRange(MetaShadowEnd(), MidAppMemBeg());
- ProtectRange(MidAppMemEnd(), TraceMemBeg());
+ ProtectRange(MidAppMemEnd(), HeapMemBeg());
} else {
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
+ ProtectRange(MetaShadowEnd(), HeapMemBeg());
}
- // Memory for traces is mapped lazily in MapThreadTrace.
- // Protect the whole range for now, so that user does not map something here.
- ProtectRange(TraceMemBeg(), TraceMemEnd());
- ProtectRange(TraceMemEnd(), HeapMemBeg());
ProtectRange(HeapEnd(), HiAppMemBeg());
-#endif
+# endif
-#if defined(__s390x__)
+# if defined(__s390x__)
// Protect the rest of the address space.
const uptr user_addr_max_l4 = 0x0020000000000000ull;
const uptr user_addr_max_l5 = 0xfffffffffffff000ull;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
index fea893768c79..eb8f354742f4 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
@@ -20,9 +20,6 @@
namespace __tsan {
-void FlushShadowMemory() {
-}
-
void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {}
void InitializePlatformEarly() {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index a926c3761ccf..10d9c761b8ee 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -126,7 +126,7 @@ static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
if (i == 0)
Printf(" (mutexes:");
const ReportMopMutex m = mset[i];
- Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(" %s M%u", m.write ? "write" : "read", m.id);
Printf(i == mset.Size() - 1 ? ")" : ",");
}
}
@@ -211,29 +211,23 @@ static void PrintLocation(const ReportLocation *loc) {
static void PrintMutexShort(const ReportMutex *rm, const char *after) {
Decorator d;
- Printf("%sM%lld%s%s", d.Mutex(), rm->id, d.Default(), after);
+ Printf("%sM%d%s%s", d.Mutex(), rm->id, d.Default(), after);
}
static void PrintMutexShortWithAddress(const ReportMutex *rm,
const char *after) {
Decorator d;
- Printf("%sM%lld (%p)%s%s", d.Mutex(), rm->id,
+ Printf("%sM%d (%p)%s%s", d.Mutex(), rm->id,
reinterpret_cast<void *>(rm->addr), d.Default(), after);
}
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
- if (rm->destroyed) {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
- Printf("%s", d.Default());
- } else {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu (%p) created at:\n", rm->id,
- reinterpret_cast<void *>(rm->addr));
- Printf("%s", d.Default());
- PrintStack(rm->stack);
- }
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%u (%p) created at:\n", rm->id,
+ reinterpret_cast<void *>(rm->addr));
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
}
static void PrintThread(const ReportThread *rt) {
@@ -460,12 +454,12 @@ void PrintReport(const ReportDesc *rep) {
} else if (rep->typ == ReportTypeDeadlock) {
Printf("WARNING: DEADLOCK\n");
for (uptr i = 0; i < rep->mutexes.Size(); i++) {
- Printf("Goroutine %d lock mutex %llu while holding mutex %llu:\n", 999,
+ Printf("Goroutine %d lock mutex %u while holding mutex %u:\n", 999,
rep->mutexes[i]->id,
rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i]);
Printf("\n");
- Printf("Mutex %llu was previously locked here:\n",
+ Printf("Mutex %u was previously locked here:\n",
rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i + 1]);
Printf("\n");
diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.h b/compiler-rt/lib/tsan/rtl/tsan_report.h
index d68c2db88828..3b367f38e266 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_report.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_report.h
@@ -43,7 +43,7 @@ struct ReportStack {
};
struct ReportMopMutex {
- u64 id;
+ int id;
bool write;
};
@@ -91,9 +91,8 @@ struct ReportThread {
};
struct ReportMutex {
- u64 id;
+ int id;
uptr addr;
- bool destroyed;
ReportStack *stack;
};
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index c14af9788e32..ed60e250cff8 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -57,110 +57,352 @@ Context *ctx;
bool OnFinalize(bool failed);
void OnInitialize();
#else
-#include <dlfcn.h>
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnFinalize(bool failed) {
-#if !SANITIZER_GO
+# if !SANITIZER_GO
if (on_finalize)
return on_finalize(failed);
-#endif
+# endif
return failed;
}
+
SANITIZER_WEAK_CXX_DEFAULT_IMPL
void OnInitialize() {
-#if !SANITIZER_GO
+# if !SANITIZER_GO
if (on_initialize)
on_initialize();
-#endif
+# endif
}
#endif
-static ThreadContextBase *CreateThreadContext(Tid tid) {
- // Map thread trace when context is created.
- char name[50];
- internal_snprintf(name, sizeof(name), "trace %u", tid);
- MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
- const uptr hdr = GetThreadTraceHeader(tid);
- internal_snprintf(name, sizeof(name), "trace header %u", tid);
- MapThreadTrace(hdr, sizeof(Trace), name);
- new((void*)hdr) Trace();
- // We are going to use only a small part of the trace with the default
- // value of history_size. However, the constructor writes to the whole trace.
- // Release the unused part.
- uptr hdr_end = hdr + sizeof(Trace);
- hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
- hdr_end = RoundUp(hdr_end, GetPageSizeCached());
- if (hdr_end < hdr + sizeof(Trace)) {
- ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
- uptr unused = hdr + sizeof(Trace) - hdr_end;
- if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
- Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
- unused);
- CHECK("unable to mprotect" && 0);
+static TracePart* TracePartAlloc(ThreadState* thr) {
+ TracePart* part = nullptr;
+ {
+ Lock lock(&ctx->slot_mtx);
+ uptr max_parts = Trace::kMinParts + flags()->history_size;
+ Trace* trace = &thr->tctx->trace;
+ if (trace->parts_allocated == max_parts ||
+ ctx->trace_part_finished_excess) {
+ part = ctx->trace_part_recycle.PopFront();
+ DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
+ if (part && part->trace) {
+ Trace* trace1 = part->trace;
+ Lock trace_lock(&trace1->mtx);
+ part->trace = nullptr;
+ TracePart* part1 = trace1->parts.PopFront();
+ CHECK_EQ(part, part1);
+ if (trace1->parts_allocated > trace1->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace1->parts_allocated - trace1->parts.Size();
+ trace1->parts_allocated = trace1->parts.Size();
+ }
+ }
+ }
+ if (trace->parts_allocated < max_parts) {
+ trace->parts_allocated++;
+ if (ctx->trace_part_finished_excess)
+ ctx->trace_part_finished_excess--;
+ }
+ if (!part)
+ ctx->trace_part_total_allocated++;
+ else if (ctx->trace_part_recycle_finished)
+ ctx->trace_part_recycle_finished--;
+ }
+ if (!part)
+ part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
+ return part;
+}
+
+static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) {
+ DCHECK(part->trace);
+ part->trace = nullptr;
+ ctx->trace_part_recycle.PushFront(part);
+}
+
+void TraceResetForTesting() {
+ Lock lock(&ctx->slot_mtx);
+ while (auto* part = ctx->trace_part_recycle.PopFront()) {
+ if (auto trace = part->trace)
+ CHECK_EQ(trace->parts.PopFront(), part);
+ UnmapOrDie(part, sizeof(*part));
+ }
+ ctx->trace_part_total_allocated = 0;
+ ctx->trace_part_recycle_finished = 0;
+ ctx->trace_part_finished_excess = 0;
+}
+
+static void DoResetImpl(uptr epoch) {
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ Lock lock1(&ctx->slot_mtx);
+ CHECK_EQ(ctx->global_epoch, epoch);
+ ctx->global_epoch++;
+ CHECK(!ctx->resetting);
+ ctx->resetting = true;
+ for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
+ ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
+ static_cast<Tid>(i));
+ // Potentially we could purge all ThreadStatusDead threads from the
+ // registry. Since we reset all shadow, they can't race with anything
+ // anymore. However, their tid's can still be stored in some aux places
+ // (e.g. tid of thread that created something).
+ auto trace = &tctx->trace;
+ Lock lock(&trace->mtx);
+ bool attached = tctx->thr && tctx->thr->slot;
+ auto parts = &trace->parts;
+ bool local = false;
+ while (!parts->Empty()) {
+ auto part = parts->Front();
+ local = local || part == trace->local_head;
+ if (local)
+ CHECK(!ctx->trace_part_recycle.Queued(part));
+ else
+ ctx->trace_part_recycle.Remove(part);
+ if (attached && parts->Size() == 1) {
+ // The thread is running and this is the last/current part.
+ // Set the trace position to the end of the current part
+ // to force the thread to call SwitchTracePart and re-attach
+ // to a new slot and allocate a new trace part.
+ // Note: the thread is concurrently modifying the position as well,
+ // so this is only best-effort. The thread can only modify position
+ // within this part, because switching parts is protected by
+ // slot/trace mutexes that we hold here.
+ atomic_store_relaxed(
+ &tctx->thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
+ break;
+ }
+ parts->Remove(part);
+ TracePartFree(part);
+ }
+ CHECK_LE(parts->Size(), 1);
+ trace->local_head = parts->Front();
+ if (tctx->thr && !tctx->thr->slot) {
+ atomic_store_relaxed(&tctx->thr->trace_pos, 0);
+ tctx->thr->trace_prev_pc = 0;
+ }
+ if (trace->parts_allocated > trace->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace->parts_allocated - trace->parts.Size();
+ trace->parts_allocated = trace->parts.Size();
+ }
+ }
+ while (ctx->slot_queue.PopFront()) {
+ }
+ for (auto& slot : ctx->slots) {
+ slot.SetEpoch(kEpochZero);
+ slot.journal.Reset();
+ slot.thr = nullptr;
+ ctx->slot_queue.PushBack(&slot);
+ }
+
+ DPrintf("Resetting shadow...\n");
+ if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
+ "shadow")) {
+ Printf("failed to reset shadow memory\n");
+ Die();
+ }
+ DPrintf("Resetting meta shadow...\n");
+ ctx->metamap.ResetClocks();
+ ctx->resetting = false;
+}
+
+// Clang does not understand locking all slots in the loop:
+// error: expecting mutex 'slot.mtx' to be held at start of each loop
+void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
+ {
+ for (auto& slot : ctx->slots) {
+ slot.mtx.Lock();
+ if (UNLIKELY(epoch == 0))
+ epoch = ctx->global_epoch;
+ if (UNLIKELY(epoch != ctx->global_epoch)) {
+ // Epoch can't change once we've locked the first slot.
+ CHECK_EQ(slot.sid, 0);
+ slot.mtx.Unlock();
+ return;
+ }
+ }
+ }
+ DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
+ DoResetImpl(epoch);
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+}
+
+void FlushShadowMemory() { DoReset(nullptr, 0); }
+
+static TidSlot* FindSlotAndLock(ThreadState* thr)
+ ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(!thr->slot);
+ TidSlot* slot = nullptr;
+ for (;;) {
+ uptr epoch;
+ {
+ Lock lock(&ctx->slot_mtx);
+ epoch = ctx->global_epoch;
+ if (slot) {
+ // This is an exhausted slot from the previous iteration.
+ if (ctx->slot_queue.Queued(slot))
+ ctx->slot_queue.Remove(slot);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ }
+ for (;;) {
+ slot = ctx->slot_queue.PopFront();
+ if (!slot)
+ break;
+ if (slot->epoch() != kEpochLast) {
+ ctx->slot_queue.PushBack(slot);
+ break;
+ }
+ }
}
+ if (!slot) {
+ DoReset(thr, epoch);
+ continue;
+ }
+ slot->mtx.Lock();
+ CHECK(!thr->slot_locked);
+ thr->slot_locked = true;
+ if (slot->thr) {
+ DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
+ slot->thr->tid);
+ slot->SetEpoch(slot->thr->fast_state.epoch());
+ slot->thr = nullptr;
+ }
+ if (slot->epoch() != kEpochLast)
+ return slot;
}
- return New<ThreadContext>(tid);
}
+void SlotAttachAndLock(ThreadState* thr) {
+ TidSlot* slot = FindSlotAndLock(thr);
+ DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
+ CHECK(!slot->thr);
+ CHECK(!thr->slot);
+ slot->thr = thr;
+ thr->slot = slot;
+ Epoch epoch = EpochInc(slot->epoch());
+ CHECK(!EpochOverflow(epoch));
+ slot->SetEpoch(epoch);
+ thr->fast_state.SetSid(slot->sid);
+ thr->fast_state.SetEpoch(epoch);
+ if (thr->slot_epoch != ctx->global_epoch) {
+ thr->slot_epoch = ctx->global_epoch;
+ thr->clock.Reset();
#if !SANITIZER_GO
-static const u32 kThreadQuarantineSize = 16;
-#else
-static const u32 kThreadQuarantineSize = 64;
+ thr->last_sleep_stack_id = kInvalidStackID;
+ thr->last_sleep_clock.Reset();
+#endif
+ }
+ thr->clock.Set(slot->sid, epoch);
+ slot->journal.PushBack({thr->tid, epoch});
+}
+
+static void SlotDetachImpl(ThreadState* thr, bool exiting) {
+ TidSlot* slot = thr->slot;
+ thr->slot = nullptr;
+ if (thr != slot->thr) {
+ slot = nullptr; // we don't own the slot anymore
+ if (thr->slot_epoch != ctx->global_epoch) {
+ TracePart* part = nullptr;
+ auto* trace = &thr->tctx->trace;
+ {
+ Lock l(&trace->mtx);
+ auto* parts = &trace->parts;
+ // The trace can be completely empty in an unlikely event
+ // the thread is preempted right after it acquired the slot
+ // in ThreadStart and did not trace any events yet.
+ CHECK_LE(parts->Size(), 1);
+ part = parts->PopFront();
+ thr->tctx->trace.local_head = nullptr;
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->trace_prev_pc = 0;
+ }
+ if (part) {
+ Lock l(&ctx->slot_mtx);
+ TracePartFree(part);
+ }
+ }
+ return;
+ }
+ CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
+ slot->SetEpoch(thr->fast_state.epoch());
+ slot->thr = nullptr;
+}
+
+void SlotDetach(ThreadState* thr) {
+ Lock lock(&thr->slot->mtx);
+ SlotDetachImpl(thr, true);
+}
+
+void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(!thr->slot_locked);
+#if SANITIZER_DEBUG
+ // Check these mutexes are not locked.
+ // We can call DoReset from SlotAttachAndLock, which will lock
+ // these mutexes, but it happens only every once in a while.
+ { ThreadRegistryLock lock(&ctx->thread_registry); }
+ { Lock lock(&ctx->slot_mtx); }
#endif
+ TidSlot* slot = thr->slot;
+ slot->mtx.Lock();
+ thr->slot_locked = true;
+ if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
+ return;
+ SlotDetachImpl(thr, false);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ SlotAttachAndLock(thr);
+}
+
+void SlotUnlock(ThreadState* thr) {
+ DCHECK(thr->slot_locked);
+ thr->slot_locked = false;
+ thr->slot->mtx.Unlock();
+}
Context::Context()
: initialized(),
report_mtx(MutexTypeReport),
nreported(),
- thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
- kMaxTidReuse),
+ thread_registry([](Tid tid) -> ThreadContextBase* {
+ return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
+ }),
racy_mtx(MutexTypeRacy),
racy_stacks(),
racy_addresses(),
fired_suppressions_mtx(MutexTypeFired),
- clock_alloc(LINKER_INITIALIZED, "clock allocator") {
+ slot_mtx(MutexTypeSlots),
+ resetting() {
fired_suppressions.reserve(8);
+ for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
+ TidSlot* slot = &slots[i];
+ slot->sid = static_cast<Sid>(i);
+ slot_queue.PushBack(slot);
+ }
+ global_epoch = 1;
}
+TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
+
// The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size)
- : fast_state(tid, epoch)
- // Do not touch these, rely on zero initialization,
- // they may be accessed before the ctor.
- // , ignore_reads_and_writes()
- // , ignore_interceptors()
- ,
- clock(tid, reuse_count)
-#if !SANITIZER_GO
- ,
- jmp_bufs()
-#endif
- ,
- tid(tid),
- unique_id(unique_id),
- stk_addr(stk_addr),
- stk_size(stk_size),
- tls_addr(tls_addr),
- tls_size(tls_size)
-#if !SANITIZER_GO
- ,
- last_sleep_clock(tid)
-#endif
-{
+ThreadState::ThreadState(Tid tid)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // ignore_reads_and_writes()
+ // ignore_interceptors()
+ : tid(tid) {
CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
#if !SANITIZER_GO
// C/C++ uses fixed size shadow stack.
const int kInitStackSize = kShadowStackSize;
- shadow_stack = static_cast<uptr *>(
+ shadow_stack = static_cast<uptr*>(
MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
kInitStackSize * sizeof(uptr));
#else
// Go uses malloc-allocated shadow stack with dynamic size.
const int kInitStackSize = 8;
- shadow_stack = static_cast<uptr *>(Alloc(kInitStackSize * sizeof(uptr)));
+ shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
#endif
shadow_stack_pos = shadow_stack;
shadow_stack_end = shadow_stack + kInitStackSize;
@@ -175,11 +417,11 @@ void MemoryProfiler(u64 uptime) {
WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
}
-void InitializeMemoryProfiler() {
+static bool InitializeMemoryProfiler() {
ctx->memprof_fd = kInvalidFd;
const char *fname = flags()->profile_memory;
if (!fname || !fname[0])
- return;
+ return false;
if (internal_strcmp(fname, "stdout") == 0) {
ctx->memprof_fd = 1;
} else if (internal_strcmp(fname, "stderr") == 0) {
@@ -191,11 +433,11 @@ void InitializeMemoryProfiler() {
if (ctx->memprof_fd == kInvalidFd) {
Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
filename.data());
- return;
+ return false;
}
}
MemoryProfiler(0);
- MaybeSpawnBackgroundThread();
+ return true;
}
static void *BackgroundThread(void *arg) {
@@ -207,33 +449,34 @@ static void *BackgroundThread(void *arg) {
const u64 kMs2Ns = 1000 * 1000;
const u64 start = NanoTime();
- u64 last_flush = NanoTime();
+ u64 last_flush = start;
uptr last_rss = 0;
- for (int i = 0;
- atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
- i++) {
+ while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ VReport(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
- last_flush = NanoTime();
+ now = last_flush = NanoTime();
}
}
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- VPrintf(1, "ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ VReport(1,
+ "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
(u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ now = NanoTime();
+ VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
+ (u64)rss >> 20);
}
last_rss = rss;
}
@@ -309,7 +552,8 @@ void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
return;
DontNeedShadowFor(addr, size);
ScopedGlobalProcessor sgp;
- ctx->metamap.ResetRange(thr->proc(), addr, size);
+ SlotLocker locker(thr, true);
+ ctx->metamap.ResetRange(thr->proc(), addr, size, true);
}
#endif
@@ -355,18 +599,6 @@ void MapShadow(uptr addr, uptr size) {
addr + size, meta_begin, meta_end);
}
-void MapThreadTrace(uptr addr, uptr size, const char *name) {
- DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, TraceMemBeg());
- CHECK_LE(addr + size, TraceMemEnd());
- CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- if (!MmapFixedSuperNoReserve(addr, size, name)) {
- Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
- addr, size);
- Die();
- }
-}
-
#if !SANITIZER_GO
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
@@ -385,8 +617,11 @@ void CheckUnwind() {
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
#if !SANITIZER_GO
- cur_thread()->ignore_sync++;
- cur_thread()->ignore_reads_and_writes++;
+ ThreadState* thr = cur_thread();
+ thr->nomalloc = false;
+ thr->ignore_sync++;
+ thr->ignore_reads_and_writes++;
+ atomic_store_relaxed(&thr->in_signal_handler, 0);
#endif
PrintCurrentStackSlow(StackTrace::GetCurrentPc());
}
@@ -441,22 +676,23 @@ void Initialize(ThreadState *thr) {
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
- VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
(int)internal_getpid());
// Initialize thread 0.
- Tid tid = ThreadCreate(thr, 0, 0, true);
+ Tid tid = ThreadCreate(nullptr, 0, 0, true);
CHECK_EQ(tid, kMainTid);
ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
#if TSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
- ctx->initialized = true;
#if !SANITIZER_GO
Symbolizer::LateInitialize();
- InitializeMemoryProfiler();
+ if (InitializeMemoryProfiler() || flags()->force_background_thread)
+ MaybeSpawnBackgroundThread();
#endif
+ ctx->initialized = true;
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
@@ -482,7 +718,6 @@ void MaybeSpawnBackgroundThread() {
#endif
}
-
int Finalize(ThreadState *thr) {
bool failed = false;
@@ -490,12 +725,12 @@ int Finalize(ThreadState *thr) {
DumpProcessMap();
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
- SleepForMillis(flags()->atexit_sleep_ms);
+ internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
- // Wait for pending reports.
- ctx->report_mtx.Lock();
- { ScopedErrorReportLock l; }
- ctx->report_mtx.Unlock();
+ {
+ // Wait for pending reports.
+ ScopedErrorReportLock lock;
+ }
#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
@@ -522,8 +757,13 @@ int Finalize(ThreadState *thr) {
#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+ GlobalProcessorLock();
+ // Detaching from the slot makes OnUserFree skip writing to the shadow.
+ // The slot will be locked so any attempts to use it will deadlock anyway.
+ SlotDetach(thr);
+ for (auto& slot : ctx->slots) slot.mtx.Lock();
ctx->thread_registry.Lock();
- ctx->report_mtx.Lock();
+ ctx->slot_mtx.Lock();
ScopedErrorReportLock::Lock();
AllocatorLock();
// Suppress all reports in the pthread_atfork callbacks.
@@ -543,30 +783,29 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
__tsan_test_only_on_fork();
}
-void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
thr->ignore_reads_and_writes--;
AllocatorUnlock();
ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
+ ctx->slot_mtx.Unlock();
ctx->thread_registry.Unlock();
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+ SlotAttachAndLock(thr);
+ SlotUnlock(thr);
+ GlobalProcessorUnlock();
}
-void ForkChildAfter(ThreadState *thr, uptr pc,
- bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
- thr->suppress_reports--; // Enabled in ForkBefore.
- thr->ignore_interceptors--;
- thr->ignore_reads_and_writes--;
- AllocatorUnlock();
- ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
- ctx->thread_registry.Unlock();
+void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); }
- uptr nthread = 0;
- ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
- VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
- " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
+ ForkAfter(thr);
+ u32 nthread = ctx->thread_registry.OnFork(thr->tid);
+ VPrintf(1,
+ "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n",
+ (int)internal_getpid(), (int)nthread);
if (nthread == 1) {
if (start_thread)
StartBackgroundThread();
@@ -576,6 +815,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc,
// ignores for everything in the hope that we will exec soon.
ctx->after_multithreaded_fork = true;
thr->ignore_interceptors++;
+ thr->suppress_reports++;
ThreadIgnoreBegin(thr, pc);
ThreadIgnoreSyncBegin(thr, pc);
}
@@ -597,8 +837,10 @@ void GrowShadowStack(ThreadState *thr) {
#endif
StackID CurrentStackId(ThreadState *thr, uptr pc) {
+#if !SANITIZER_GO
if (!thr->is_inited) // May happen during bootstrap.
return kInvalidStackID;
+#endif
if (pc != 0) {
#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -616,53 +858,72 @@ StackID CurrentStackId(ThreadState *thr, uptr pc) {
return id;
}
-namespace v3 {
-
-NOINLINE
-void TraceSwitchPart(ThreadState *thr) {
+static bool TraceSkipGap(ThreadState* thr) {
Trace *trace = &thr->tctx->trace;
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
auto *part = trace->parts.Back();
- DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
- if (part) {
- // We can get here when we still have space in the current trace part.
- // The fast-path check in TraceAcquire has false positives in the middle of
- // the part. Check if we are indeed at the end of the current part or not,
- // and fill any gaps with NopEvent's.
- Event *end = &part->events[TracePart::kSize];
- DCHECK_GE(pos, &part->events[0]);
- DCHECK_LE(pos, end);
- if (pos + 1 < end) {
- if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
- TracePart::kAlignment)
- *pos++ = NopEvent;
+ DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
+ trace, trace->parts.Front(), part, pos);
+ if (!part)
+ return false;
+ // We can get here when we still have space in the current trace part.
+ // The fast-path check in TraceAcquire has false positives in the middle of
+ // the part. Check if we are indeed at the end of the current part or not,
+ // and fill any gaps with NopEvent's.
+ Event* end = &part->events[TracePart::kSize];
+ DCHECK_GE(pos, &part->events[0]);
+ DCHECK_LE(pos, end);
+ if (pos + 1 < end) {
+ if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
+ TracePart::kAlignment)
*pos++ = NopEvent;
- DCHECK_LE(pos + 2, end);
- atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
- // Ensure we setup trace so that the next TraceAcquire
- // won't detect trace part end.
- Event *ev;
- CHECK(TraceAcquire(thr, &ev));
- return;
- }
- // We are indeed at the end.
- for (; pos < end; pos++) *pos = NopEvent;
+ *pos++ = NopEvent;
+ DCHECK_LE(pos + 2, end);
+ atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
+ return true;
}
+ // We are indeed at the end.
+ for (; pos < end; pos++) *pos = NopEvent;
+ return false;
+}
+
+NOINLINE
+void TraceSwitchPart(ThreadState* thr) {
+ if (TraceSkipGap(thr))
+ return;
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
// We just need to survive till exec.
- CHECK(part);
- atomic_store_relaxed(&thr->trace_pos,
- reinterpret_cast<uptr>(&part->events[0]));
- return;
+ TracePart* part = thr->tctx->trace.parts.Back();
+ if (part) {
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
+ return;
+ }
}
#endif
- part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
+ TraceSwitchPartImpl(thr);
+}
+
+void TraceSwitchPartImpl(ThreadState* thr) {
+ SlotLocker locker(thr, true);
+ Trace* trace = &thr->tctx->trace;
+ TracePart* part = TracePartAlloc(thr);
part->trace = trace;
thr->trace_prev_pc = 0;
+ TracePart* recycle = nullptr;
+ // Keep roughly half of parts local to the thread
+ // (not queued into the recycle queue).
+ uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
{
Lock lock(&trace->mtx);
+ if (trace->parts.Empty())
+ trace->local_head = part;
+ if (trace->parts.Size() >= local_parts) {
+ recycle = trace->local_head;
+ trace->local_head = trace->parts.Next(recycle);
+ }
trace->parts.PushBack(part);
atomic_store_relaxed(&thr->trace_pos,
reinterpret_cast<uptr>(&part->events[0]));
@@ -670,60 +931,49 @@ void TraceSwitchPart(ThreadState *thr) {
// Make this part self-sufficient by restoring the current stack
// and mutex set in the beginning of the trace.
TraceTime(thr);
- for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
- CHECK(TryTraceFunc(thr, *pos));
+ {
+ // Pathologically large stacks may not fit into the part.
+ // In these cases we log only fixed number of top frames.
+ const uptr kMaxFrames = 1000;
+ // Sanity check that kMaxFrames won't consume the whole part.
+ static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
+ uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
+ for (; pos < thr->shadow_stack_pos; pos++) {
+ if (TryTraceFunc(thr, *pos))
+ continue;
+ CHECK(TraceSkipGap(thr));
+ CHECK(TryTraceFunc(thr, *pos));
+ }
+ }
for (uptr i = 0; i < thr->mset.Size(); i++) {
MutexSet::Desc d = thr->mset.Get(i);
- TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
- d.addr, d.stack_id);
+ for (uptr i = 0; i < d.count; i++)
+ TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
+ d.addr, d.stack_id);
}
+ {
+ Lock lock(&ctx->slot_mtx);
+ // There is a small chance that the slot may be not queued at this point.
+ // This can happen if the slot has kEpochLast epoch and another thread
+ // in FindSlotAndLock discovered that it's exhausted and removed it from
+ // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
+ // was called with the slot locked and epoch already at kEpochLast,
+ // or (2) if we've acquired a new slot in SlotLock in the beginning
+ // of the function and the slot was at kEpochLast - 1, so after increment
+ // in SlotAttachAndLock it become kEpochLast.
+ if (ctx->slot_queue.Queued(thr->slot)) {
+ ctx->slot_queue.Remove(thr->slot);
+ ctx->slot_queue.PushBack(thr->slot);
+ }
+ if (recycle)
+ ctx->trace_part_recycle.PushBack(recycle);
+ }
+ DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
+ trace->parts.Front(), trace->parts.Back(),
+ atomic_load_relaxed(&thr->trace_pos));
}
-} // namespace v3
-
-void TraceSwitch(ThreadState *thr) {
-#if !SANITIZER_GO
- if (ctx->after_multithreaded_fork)
- return;
-#endif
- thr->nomalloc++;
- Trace *thr_trace = ThreadTrace(thr->tid);
- Lock l(&thr_trace->mtx);
- unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
- TraceHeader *hdr = &thr_trace->headers[trace];
- hdr->epoch0 = thr->fast_state.epoch();
- ObtainCurrentStack(thr, 0, &hdr->stack0);
- hdr->mset0 = thr->mset;
- thr->nomalloc--;
-}
-
-Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
-
-uptr TraceTopPC(ThreadState *thr) {
- Event *events = (Event*)GetThreadTrace(thr->tid);
- uptr pc = events[thr->fast_state.GetTracePos()];
- return pc;
-}
-
-uptr TraceSize() {
- return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
-}
-
-uptr TraceParts() {
- return TraceSize() / kTracePartSize;
-}
-
-#if !SANITIZER_GO
-extern "C" void __tsan_trace_switch() {
- TraceSwitch(cur_thread());
-}
-
-extern "C" void __tsan_report_race() {
- ReportRace(cur_thread());
-}
-#endif
-
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
+void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
@@ -783,7 +1033,6 @@ void build_consistency_debug() {}
#else
void build_consistency_release() {}
#endif
-
} // namespace __tsan
#if SANITIZER_CHECK_DEADLOCKS
@@ -791,21 +1040,27 @@ namespace __sanitizer {
using namespace __tsan;
MutexMeta mutex_meta[] = {
{MutexInvalid, "Invalid", {}},
- {MutexThreadRegistry, "ThreadRegistry", {}},
- {MutexTypeTrace, "Trace", {}},
- {MutexTypeReport,
- "Report",
- {MutexTypeSyncVar, MutexTypeGlobalProc, MutexTypeTrace}},
- {MutexTypeSyncVar, "SyncVar", {MutexTypeTrace}},
+ {MutexThreadRegistry,
+ "ThreadRegistry",
+ {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
+ {MutexTypeReport, "Report", {MutexTypeTrace}},
+ {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
{MutexTypeAnnotations, "Annotations", {}},
- {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+ {MutexTypeAtExit, "AtExit", {}},
{MutexTypeFired, "Fired", {MutexLeaf}},
{MutexTypeRacy, "Racy", {MutexLeaf}},
- {MutexTypeGlobalProc, "GlobalProc", {}},
+ {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
{MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
+ {MutexTypeTrace, "Trace", {}},
+ {MutexTypeSlot,
+ "Slot",
+ {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
+ MutexTypeSlots}},
+ {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
{},
};
void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
+
} // namespace __sanitizer
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index c71b27e1cbf5..d06358b462eb 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -34,10 +34,10 @@
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_vector.h"
-#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
#include "tsan_ignoreset.h"
+#include "tsan_ilist.h"
#include "tsan_mman.h"
#include "tsan_mutexset.h"
#include "tsan_platform.h"
@@ -46,6 +46,7 @@
#include "tsan_stack_trace.h"
#include "tsan_sync.h"
#include "tsan_trace.h"
+#include "tsan_vector_clock.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -116,7 +117,6 @@ struct Processor {
#endif
DenseSlabAllocCache block_cache;
DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
DDPhysicalThread *dd_pt;
};
@@ -130,30 +130,56 @@ struct ScopedGlobalProcessor {
};
#endif
+struct TidEpoch {
+ Tid tid;
+ Epoch epoch;
+};
+
+struct TidSlot {
+ Mutex mtx;
+ Sid sid;
+ atomic_uint32_t raw_epoch;
+ ThreadState *thr;
+ Vector<TidEpoch> journal;
+ INode node;
+
+ Epoch epoch() const {
+ return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
+ }
+
+ void SetEpoch(Epoch v) {
+ atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
+ }
+
+ TidSlot();
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
- // Synch epoch represents the threads's epoch before the last synchronization
- // action. It allows to reduce number of shadow state updates.
- // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
- // if we are processing write to X from the same thread at epoch=200,
- // we do nothing, because both writes happen in the same 'synch epoch'.
- // That is, if another memory access does not race with the former write,
- // it does not race with the latter as well.
- // QUESTION: can we can squeeze this into ThreadState::Fast?
- // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
- // taken by epoch between synchs.
- // This way we can save one load from tls.
- u64 fast_synch_epoch;
+ int ignore_sync;
+#if !SANITIZER_GO
+ int ignore_interceptors;
+#endif
+ uptr *shadow_stack_pos;
+
+ // Current position in tctx->trace.Back()->events (Event*).
+ atomic_uintptr_t trace_pos;
+ // PC of the last memory access, used to compute PC deltas in the trace.
+ uptr trace_prev_pc;
+
// Technically `current` should be a separate THREADLOCAL variable;
// but it is placed here in order to share cache line with previous fields.
ThreadState* current;
+
+ atomic_sint32_t pending_signals;
+
+ VectorClock clock;
+
// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
// We do not distinguish beteween ignoring reads and writes
// for better performance.
int ignore_reads_and_writes;
- atomic_sint32_t pending_signals;
- int ignore_sync;
int suppress_reports;
// Go does not support ignores.
#if !SANITIZER_GO
@@ -162,31 +188,27 @@ struct ThreadState {
#endif
uptr *shadow_stack;
uptr *shadow_stack_end;
- uptr *shadow_stack_pos;
- RawShadow *racy_shadow_addr;
- RawShadow racy_state[2];
- MutexSet mset;
- ThreadClock clock;
#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
- int ignore_interceptors;
-#endif
- const Tid tid;
- const int unique_id;
- bool in_symbolizer;
+ int in_symbolizer;
bool in_ignored_lib;
bool is_inited;
+#endif
+ MutexSet mset;
bool is_dead;
- bool is_freeing;
- bool is_vptr_access;
- const uptr stk_addr;
- const uptr stk_size;
- const uptr tls_addr;
- const uptr tls_size;
+ const Tid tid;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
ThreadContext *tctx;
DDLogicalThread *dd_lt;
+ TidSlot *slot;
+ uptr slot_epoch;
+ bool slot_locked;
+
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
#if !SANITIZER_GO
@@ -200,7 +222,7 @@ struct ThreadState {
#if !SANITIZER_GO
StackID last_sleep_stack_id;
- ThreadClock last_sleep_clock;
+ VectorClock last_sleep_clock;
#endif
// Set in regions of runtime that must be signal-safe and fork-safe.
@@ -209,16 +231,7 @@ struct ThreadState {
const ReportDesc *current_report;
- // Current position in tctx->trace.Back()->events (Event*).
- atomic_uintptr_t trace_pos;
- // PC of the last memory access, used to compute PC deltas in the trace.
- uptr trace_prev_pc;
- Sid sid;
- Epoch epoch;
-
- explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size);
+ explicit ThreadState(Tid tid);
} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
#if !SANITIZER_GO
@@ -252,14 +265,9 @@ class ThreadContext final : public ThreadContextBase {
~ThreadContext();
ThreadState *thr;
StackID creation_stack_id;
- SyncClock sync;
- // Epoch at which the thread had started.
- // If we see an event from the thread stamped by an older epoch,
- // the event is from a dead thread that shared tid with this thread.
- u64 epoch0;
- u64 epoch1;
-
- v3::Trace trace;
+ VectorClock *sync;
+ uptr sync_epoch;
+ Trace trace;
// Override superclass callbacks.
void OnDead() override;
@@ -314,12 +322,22 @@ struct Context {
InternalMmapVector<FiredSuppression> fired_suppressions;
DDetector *dd;
- ClockAlloc clock_alloc;
-
Flags flags;
fd_t memprof_fd;
+ // The last slot index (kFreeSid) is used to denote freed memory.
+ TidSlot slots[kThreadSlotCount - 1];
+
+ // Protects global_epoch, slot_queue, trace_part_recycle.
Mutex slot_mtx;
+ uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
+ bool resetting; // global reset is in progress
+ IList<TidSlot, &TidSlot::node> slot_queue GUARDED_BY(slot_mtx);
+ IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
+ GUARDED_BY(slot_mtx);
+ uptr trace_part_total_allocated GUARDED_BY(slot_mtx);
+ uptr trace_part_recycle_finished GUARDED_BY(slot_mtx);
+ uptr trace_part_finished_excess GUARDED_BY(slot_mtx);
};
extern Context *ctx; // The one and the only global runtime context.
@@ -348,14 +366,13 @@ uptr TagFromShadowStackFrame(uptr pc);
class ScopedReportBase {
public:
- void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
- const MutexSet *mset);
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
+ StackTrace stack, const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
- void AddThread(Tid unique_tid, bool suppressable = false);
+ void AddThread(Tid tid, bool suppressable = false);
void AddUniqueTid(Tid unique_tid);
- void AddMutex(const SyncVar *s);
- u64 AddMutex(u64 id);
+ int AddMutex(uptr addr, StackID creation_stack_id);
void AddLocation(uptr addr, uptr size);
void AddSleep(StackID stack_id);
void SetCount(int count);
@@ -372,8 +389,6 @@ class ScopedReportBase {
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore_interceptors_;
- void AddDeadMutex(u64 id);
-
ScopedReportBase(const ScopedReportBase &) = delete;
void operator=(const ScopedReportBase &) = delete;
};
@@ -389,8 +404,6 @@ class ScopedReport : public ScopedReportBase {
bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
-void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag = nullptr);
// The stack could look like:
// <start> | <main> | <foo> | tag | <bar>
@@ -438,7 +451,8 @@ void ForkBefore(ThreadState *thr, uptr pc);
void ForkParentAfter(ThreadState *thr, uptr pc);
void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
-void ReportRace(ThreadState *thr);
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
@@ -468,55 +482,28 @@ int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur);
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write);
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
-
-const int kSizeLog1 = 0;
-const int kSizeLog2 = 1;
-const int kSizeLog4 = 2;
-const int kSizeLog8 = 3;
+// This creates 2 non-inlined specialized versions of MemoryAccessRange.
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
ALWAYS_INLINE
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
- AccessType typ) {
- int size_log;
- switch (size) {
- case 1:
- size_log = kSizeLog1;
- break;
- case 2:
- size_log = kSizeLog2;
- break;
- case 4:
- size_log = kSizeLog4;
- break;
- default:
- DCHECK_EQ(size, 8);
- size_log = kSizeLog8;
- break;
- }
- bool is_write = !(typ & kAccessRead);
- bool is_atomic = typ & kAccessAtomic;
- if (typ & kAccessVptr)
- thr->is_vptr_access = true;
- if (typ & kAccessFree)
- thr->is_freeing = true;
- MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic);
- if (typ & kAccessVptr)
- thr->is_vptr_access = false;
- if (typ & kAccessFree)
- thr->is_freeing = false;
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ bool is_write) {
+ if (size == 0)
+ return;
+ if (is_write)
+ MemoryAccessRangeT<false>(thr, pc, addr, size);
+ else
+ MemoryAccessRangeT<true>(thr, pc, addr, size);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
uptr size);
@@ -526,9 +513,6 @@ void ThreadIgnoreEnd(ThreadState *thr);
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
void ThreadIgnoreSyncEnd(ThreadState *thr);
-void FuncEntry(ThreadState *thr, uptr pc);
-void FuncExit(ThreadState *thr);
-
Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type);
@@ -574,63 +558,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
void AfterSleep(ThreadState *thr, uptr pc);
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-
-// The hacky call uses custom calling convention and an assembly thunk.
-// It is considerably faster that a normal call for the caller
-// if it is not executed (it is intended for slow paths from hot functions).
-// The trick is that the call preserves all registers and the compiler
-// does not treat it as a call.
-// If it does not work for you, use normal call.
-#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
-// The caller may not create the stack frame for itself at all,
-// so we create a reserve stack frame for it (1024b must be enough).
-#define HACKY_CALL(f) \
- __asm__ __volatile__("sub $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(1024) \
- ".hidden " #f "_thunk;" \
- "call " #f "_thunk;" \
- "add $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(-1024) \
- ::: "memory", "cc");
-#else
-#define HACKY_CALL(f) f()
-#endif
-
-void TraceSwitch(ThreadState *thr);
-uptr TraceTopPC(ThreadState *thr);
-uptr TraceSize();
-uptr TraceParts();
-Trace *ThreadTrace(Tid tid);
-
-extern "C" void __tsan_trace_switch();
-void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
- EventType typ, u64 addr) {
- if (!kCollectHistory)
- return;
- // TraceSwitch accesses shadow_stack, but it's called infrequently,
- // so we check it here proactively.
- DCHECK(thr->shadow_stack);
- DCHECK_GE((int)typ, 0);
- DCHECK_LE((int)typ, 7);
- DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
- u64 pos = fs.GetTracePos();
- if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_trace_switch);
-#else
- TraceSwitch(thr);
-#endif
- }
- Event *trace = (Event*)GetThreadTrace(fs.tid());
- Event *evp = &trace[pos];
- Event ev = (u64)addr | ((u64)typ << kEventPCBits);
- *evp = ev;
-}
+void IncrementEpoch(ThreadState *thr);
#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
@@ -638,6 +566,13 @@ uptr ALWAYS_INLINE HeapEnd() {
}
#endif
+void SlotAttachAndLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
+void SlotDetach(ThreadState *thr);
+void SlotLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
+void SlotUnlock(ThreadState *thr) RELEASE(thr->slot->mtx);
+void DoReset(ThreadState *thr, uptr epoch);
+void FlushShadowMemory();
+
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
@@ -648,6 +583,43 @@ enum FiberSwitchFlags {
FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
};
+class SlotLocker {
+ public:
+ ALWAYS_INLINE
+ SlotLocker(ThreadState *thr, bool recursive = false)
+ : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
+ if (!locked_)
+ SlotLock(thr_);
+ }
+
+ ALWAYS_INLINE
+ ~SlotLocker() {
+ if (!locked_)
+ SlotUnlock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
+class SlotUnlocker {
+ public:
+ SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
+ if (locked_)
+ SlotUnlock(thr_);
+ }
+
+ ~SlotUnlocker() {
+ if (locked_)
+ SlotLock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
ProcessPendingSignalsImpl(thr);
@@ -666,16 +638,19 @@ void LazyInitialize(ThreadState *thr) {
#endif
}
-namespace v3 {
-
+void TraceResetForTesting();
void TraceSwitchPart(ThreadState *thr);
-bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
- uptr size, AccessType typ, VarSizeStackTrace *pstk,
+void TraceSwitchPartImpl(ThreadState *thr);
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
MutexSet *pmset, uptr *ptag);
template <typename EventT>
ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
EventT **ev) {
+ // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
+ // so we check it here proactively.
+ DCHECK(thr->shadow_stack);
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
#if SANITIZER_DEBUG
// TraceSwitch acquires these mutexes,
@@ -746,20 +721,16 @@ void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
void TraceMutexUnlock(ThreadState *thr, uptr addr);
void TraceTime(ThreadState *thr);
-} // namespace v3
+void TraceRestartFuncExit(ThreadState *thr);
+void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
void GrowShadowStack(ThreadState *thr);
ALWAYS_INLINE
void FuncEntry(ThreadState *thr, uptr pc) {
- DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void *)pc);
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
- }
-
- // Shadow stack maintenance can be replaced with
- // stack unwinding during trace switch (which presumably must be faster).
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
+ if (UNLIKELY(!TryTraceFunc(thr, pc)))
+ return TraceRestartFuncEntry(thr, pc);
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -773,12 +744,9 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE
void FuncExit(ThreadState *thr) {
- DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
- }
-
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
+ if (UNLIKELY(!TryTraceFunc(thr, 0)))
+ return TraceRestartFuncExit(thr);
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -790,7 +758,6 @@ void FuncExit(ThreadState *thr) {
extern void (*on_initialize)(void);
extern int (*on_finalize)(int);
#endif
-
} // namespace __tsan
#endif // TSAN_RTL_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 7365fdaa3038..940c20fcfa1a 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -15,15 +15,13 @@
namespace __tsan {
-namespace v3 {
-
-ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
+ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState* thr, uptr pc,
uptr addr, uptr size,
AccessType typ) {
DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
if (!kCollectHistory)
return true;
- EventAccess *ev;
+ EventAccess* ev;
if (UNLIKELY(!TraceAcquire(thr, &ev)))
return false;
u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
@@ -40,25 +38,27 @@ ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
TraceRelease(thr, ev);
return true;
}
- auto *evex = reinterpret_cast<EventAccessExt *>(ev);
+ auto* evex = reinterpret_cast<EventAccessExt*>(ev);
evex->is_access = 0;
evex->is_func = 0;
evex->type = EventType::kAccessExt;
evex->is_read = !!(typ & kAccessRead);
evex->is_atomic = !!(typ & kAccessAtomic);
evex->size_log = size_log;
+ // Note: this is important, see comment in EventAccessExt.
+ evex->_ = 0;
evex->addr = CompressAddr(addr);
evex->pc = pc;
TraceRelease(thr, evex);
return true;
}
-ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
- uptr addr, uptr size,
- AccessType typ) {
+ALWAYS_INLINE
+bool TryTraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
if (!kCollectHistory)
return true;
- EventAccessRange *ev;
+ EventAccessRange* ev;
if (UNLIKELY(!TraceAcquire(thr, &ev)))
return false;
thr->trace_prev_pc = pc;
@@ -75,7 +75,7 @@ ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
return true;
}
-void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+void TraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
AccessType typ) {
if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
return;
@@ -84,7 +84,7 @@ void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
DCHECK(res);
}
-void TraceFunc(ThreadState *thr, uptr pc) {
+void TraceFunc(ThreadState* thr, uptr pc) {
if (LIKELY(TryTraceFunc(thr, pc)))
return;
TraceSwitchPart(thr);
@@ -92,7 +92,17 @@ void TraceFunc(ThreadState *thr, uptr pc) {
DCHECK(res);
}
-void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+NOINLINE void TraceRestartFuncEntry(ThreadState* thr, uptr pc) {
+ TraceSwitchPart(thr);
+ FuncEntry(thr, pc);
+}
+
+NOINLINE void TraceRestartFuncExit(ThreadState* thr) {
+ TraceSwitchPart(thr);
+ FuncExit(thr);
+}
+
+void TraceMutexLock(ThreadState* thr, EventType type, uptr pc, uptr addr,
StackID stk) {
DCHECK(type == EventType::kLock || type == EventType::kRLock);
if (!kCollectHistory)
@@ -109,7 +119,7 @@ void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
TraceEvent(thr, ev);
}
-void TraceMutexUnlock(ThreadState *thr, uptr addr) {
+void TraceMutexUnlock(ThreadState* thr, uptr addr) {
if (!kCollectHistory)
return;
EventUnlock ev;
@@ -121,396 +131,523 @@ void TraceMutexUnlock(ThreadState *thr, uptr addr) {
TraceEvent(thr, ev);
}
-void TraceTime(ThreadState *thr) {
+void TraceTime(ThreadState* thr) {
if (!kCollectHistory)
return;
+ FastState fast_state = thr->fast_state;
EventTime ev;
ev.is_access = 0;
ev.is_func = 0;
ev.type = EventType::kTime;
- ev.sid = static_cast<u64>(thr->sid);
- ev.epoch = static_cast<u64>(thr->epoch);
+ ev.sid = static_cast<u64>(fast_state.sid());
+ ev.epoch = static_cast<u64>(fast_state.epoch());
ev._ = 0;
TraceEvent(thr, ev);
}
-} // namespace v3
-
-ALWAYS_INLINE
-Shadow LoadShadow(u64 *p) {
- u64 raw = atomic_load((atomic_uint64_t *)p, memory_order_relaxed);
- return Shadow(raw);
+ALWAYS_INLINE RawShadow LoadShadow(RawShadow* p) {
+ return static_cast<RawShadow>(
+ atomic_load((atomic_uint32_t*)p, memory_order_relaxed));
}
-ALWAYS_INLINE
-void StoreShadow(u64 *sp, u64 s) {
- atomic_store((atomic_uint64_t *)sp, s, memory_order_relaxed);
+ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
+ atomic_store((atomic_uint32_t*)sp, static_cast<u32>(s), memory_order_relaxed);
}
-ALWAYS_INLINE
-void StoreIfNotYetStored(u64 *sp, u64 *s) {
- StoreShadow(sp, *s);
- *s = 0;
+NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ Shadow old,
+ AccessType typ) NO_THREAD_SAFETY_ANALYSIS {
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (old.sid() == kFreeSid)
+ old = Shadow(LoadShadow(&shadow_mem[1]));
+ // This prevents trapping on this address in future.
+ for (uptr i = 0; i < kShadowCnt; i++)
+ StoreShadow(&shadow_mem[i], i == 0 ? Shadow::kRodata : Shadow::kEmpty);
+ // See the comment in MemoryRangeFreed as to why the slot is locked
+ // for free memory accesses. ReportRace must not be called with
+ // the slot locked because of the fork. But MemoryRangeFreed is not
+ // called during fork because fork sets ignore_reads_and_writes,
+ // so simply unlocking the slot should be fine.
+ if (typ & kAccessFree)
+ SlotUnlock(thr);
+ ReportRace(thr, shadow_mem, cur, Shadow(old), typ);
+ if (typ & kAccessFree)
+ SlotLock(thr);
}
-extern "C" void __tsan_report_race();
-
+#if !TSAN_VECTORIZE
ALWAYS_INLINE
-void HandleRace(ThreadState *thr, u64 *shadow_mem, Shadow cur, Shadow old) {
- thr->racy_state[0] = cur.raw();
- thr->racy_state[1] = old.raw();
- thr->racy_shadow_addr = shadow_mem;
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_report_race);
-#else
- ReportRace(thr);
-#endif
-}
-
-static inline bool HappensBefore(Shadow old, ThreadState *thr) {
- return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
+bool ContainsSameAccess(RawShadow* s, Shadow cur, int unused0, int unused1,
+ AccessType typ) {
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ auto old = LoadShadow(&s[i]);
+ if (!(typ & kAccessRead)) {
+ if (old == cur.raw())
+ return true;
+ continue;
+ }
+ auto masked = static_cast<RawShadow>(static_cast<u32>(old) |
+ static_cast<u32>(Shadow::kRodata));
+ if (masked == cur.raw())
+ return true;
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ if (old == Shadow::kRodata)
+ return true;
+ }
+ }
+ return false;
}
ALWAYS_INLINE
-void MemoryAccessImpl1(ThreadState *thr, uptr addr, int kAccessSizeLog,
- bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem,
- Shadow cur) {
- // This potentially can live in an MMX/SSE scratch register.
- // The required intrinsics are:
- // __m128i _mm_move_epi64(__m128i*);
- // _mm_storel_epi64(u64*, __m128i);
- u64 store_word = cur.raw();
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ int unused0, int unused1, AccessType typ) {
bool stored = false;
+ for (uptr idx = 0; idx < kShadowCnt; idx++) {
+ RawShadow* sp = &shadow_mem[idx];
+ Shadow old(LoadShadow(sp));
+ if (LIKELY(old.raw() == Shadow::kEmpty)) {
+ if (!(typ & kAccessCheckOnly) && !stored)
+ StoreShadow(sp, cur.raw());
+ return false;
+ }
+ if (LIKELY(!(cur.access() & old.access())))
+ continue;
+ if (LIKELY(cur.sid() == old.sid())) {
+ if (!(typ & kAccessCheckOnly) &&
+ LIKELY(cur.access() == old.access() && old.IsRWWeakerOrEqual(typ))) {
+ StoreShadow(sp, cur.raw());
+ stored = true;
+ }
+ continue;
+ }
+ if (LIKELY(old.IsBothReadsOrAtomic(typ)))
+ continue;
+ if (LIKELY(thr->clock.Get(old.sid()) >= old.epoch()))
+ continue;
+ DoReportRace(thr, shadow_mem, cur, old, typ);
+ return true;
+ }
+ // We did not find any races and had already stored
+ // the current access info, so we are done.
+ if (LIKELY(stored))
+ return false;
+ // Choose a random candidate slot and replace it.
+ uptr index =
+ atomic_load_relaxed(&thr->trace_pos) / sizeof(Event) % kShadowCnt;
+ StoreShadow(&shadow_mem[index], cur.raw());
+ return false;
+}
- // scan all the shadow values and dispatch to 4 categories:
- // same, replace, candidate and race (see comments below).
- // we consider only 3 cases regarding access sizes:
- // equal, intersect and not intersect. initially I considered
- // larger and smaller as well, it allowed to replace some
- // 'candidates' with 'same' or 'replace', but I think
- // it's just not worth it (performance- and complexity-wise).
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) UNUSED int access = 0, shadow = 0
- Shadow old(0);
+#else /* !TSAN_VECTORIZE */
- // It release mode we manually unroll the loop,
- // because empirically gcc generates better code this way.
- // However, we can't afford unrolling in debug mode, because the function
- // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
- // threads, which is not enough for the unrolled loop.
-#if SANITIZER_DEBUG
- for (int idx = 0; idx < 4; idx++) {
-# include "tsan_update_shadow_word.inc"
- }
-#else
- int idx = 0;
-# include "tsan_update_shadow_word.inc"
- idx = 1;
- if (stored) {
-# include "tsan_update_shadow_word.inc"
- } else {
-# include "tsan_update_shadow_word.inc"
- }
- idx = 2;
- if (stored) {
-# include "tsan_update_shadow_word.inc"
- } else {
-# include "tsan_update_shadow_word.inc"
+ALWAYS_INLINE
+bool ContainsSameAccess(RawShadow* unused0, Shadow unused1, m128 shadow,
+ m128 access, AccessType typ) {
+ // Note: we could check if there is a larger access of the same type,
+ // e.g. we just allocated/memset-ed a block (so it contains 8 byte writes)
+ // and now do smaller reads/writes, these can also be considered as "same
+ // access". However, it will make the check more expensive, so it's unclear
+ // if it's worth it. But this would conserve trace space, so it's useful
+ // besides potential speed up.
+ if (!(typ & kAccessRead)) {
+ const m128 same = _mm_cmpeq_epi32(shadow, access);
+ return _mm_movemask_epi8(same);
}
- idx = 3;
- if (stored) {
-# include "tsan_update_shadow_word.inc"
- } else {
-# include "tsan_update_shadow_word.inc"
+ // For reads we need to reset read bit in the shadow,
+ // because we need to match read with both reads and writes.
+ // Shadow::kRodata has only read bit set, so it does what we want.
+ // We also abuse it for rodata check to save few cycles
+ // since we already loaded Shadow::kRodata into a register.
+ // Reads from rodata can't race.
+ // Measurements show that they can be 10-20% of all memory accesses.
+ // Shadow::kRodata has epoch 0 which cannot appear in shadow normally
+ // (thread epochs start from 1). So the same read bit mask
+ // serves as rodata indicator.
+ const m128 read_mask = _mm_set1_epi32(static_cast<u32>(Shadow::kRodata));
+ const m128 masked_shadow = _mm_or_si128(shadow, read_mask);
+ m128 same = _mm_cmpeq_epi32(masked_shadow, access);
+ // Range memory accesses check Shadow::kRodata before calling this,
+ // Shadow::kRodatas is not possible for free memory access
+ // and Go does not use Shadow::kRodata.
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ const m128 ro = _mm_cmpeq_epi32(shadow, read_mask);
+ same = _mm_or_si128(ro, same);
}
-#endif
-
- // we did not find any races and had already stored
- // the current access info, so we are done
- if (LIKELY(stored))
- return;
- // choose a random candidate slot and replace it
- StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- return;
-RACE:
- HandleRace(thr, shadow_mem, cur, old);
- return;
+ return _mm_movemask_epi8(same);
}
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
- AccessType typ) {
- DCHECK(!(typ & kAccessAtomic));
- const bool kAccessIsWrite = !(typ & kAccessRead);
- const bool kIsAtomic = false;
- while (size) {
- int size1 = 1;
- int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
- size1 = 8;
- kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
- size1 = 4;
- kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
- size1 = 2;
- kAccessSizeLog = kSizeLog2;
- }
- MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
- addr += size1;
- size -= size1;
+NOINLINE void DoReportRaceV(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ u32 race_mask, m128 shadow, AccessType typ) {
+ // race_mask points which of the shadow elements raced with the current
+ // access. Extract that element.
+ CHECK_NE(race_mask, 0);
+ u32 old;
+ // Note: _mm_extract_epi32 index must be a constant value.
+ switch (__builtin_ffs(race_mask) / 4) {
+ case 0:
+ old = _mm_extract_epi32(shadow, 0);
+ break;
+ case 1:
+ old = _mm_extract_epi32(shadow, 1);
+ break;
+ case 2:
+ old = _mm_extract_epi32(shadow, 2);
+ break;
+ case 3:
+ old = _mm_extract_epi32(shadow, 3);
+ break;
}
+ Shadow prev(static_cast<RawShadow>(old));
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (prev.sid() == kFreeSid)
+ prev = Shadow(static_cast<RawShadow>(_mm_extract_epi32(shadow, 1)));
+ DoReportRace(thr, shadow_mem, cur, prev, typ);
}
ALWAYS_INLINE
-bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- Shadow cur(a);
- for (uptr i = 0; i < kShadowCnt; i++) {
- Shadow old(LoadShadow(&s[i]));
- if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
- old.TidWithIgnore() == cur.TidWithIgnore() &&
- old.epoch() > sync_epoch && old.IsAtomic() == cur.IsAtomic() &&
- old.IsRead() <= cur.IsRead())
- return true;
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ m128 shadow, m128 access, AccessType typ) {
+ // Note: empty/zero slots don't intersect with any access.
+ const m128 zero = _mm_setzero_si128();
+ const m128 mask_access = _mm_set1_epi32(0x000000ff);
+ const m128 mask_sid = _mm_set1_epi32(0x0000ff00);
+ const m128 mask_read_atomic = _mm_set1_epi32(0xc0000000);
+ const m128 access_and = _mm_and_si128(access, shadow);
+ const m128 access_xor = _mm_xor_si128(access, shadow);
+ const m128 intersect = _mm_and_si128(access_and, mask_access);
+ const m128 not_intersect = _mm_cmpeq_epi32(intersect, zero);
+ const m128 not_same_sid = _mm_and_si128(access_xor, mask_sid);
+ const m128 same_sid = _mm_cmpeq_epi32(not_same_sid, zero);
+ const m128 both_read_or_atomic = _mm_and_si128(access_and, mask_read_atomic);
+ const m128 no_race =
+ _mm_or_si128(_mm_or_si128(not_intersect, same_sid), both_read_or_atomic);
+ const int race_mask = _mm_movemask_epi8(_mm_cmpeq_epi32(no_race, zero));
+ if (UNLIKELY(race_mask))
+ goto SHARED;
+
+STORE : {
+ if (typ & kAccessCheckOnly)
+ return false;
+ // We could also replace different sid's if access is the same,
+ // rw weaker and happens before. However, just checking access below
+ // is not enough because we also need to check that !both_read_or_atomic
+ // (reads from different sids can be concurrent).
+ // Theoretically we could replace smaller accesses with larger accesses,
+ // but it's unclear if it's worth doing.
+ const m128 mask_access_sid = _mm_set1_epi32(0x0000ffff);
+ const m128 not_same_sid_access = _mm_and_si128(access_xor, mask_access_sid);
+ const m128 same_sid_access = _mm_cmpeq_epi32(not_same_sid_access, zero);
+ const m128 access_read_atomic =
+ _mm_set1_epi32((typ & (kAccessRead | kAccessAtomic)) << 30);
+ const m128 rw_weaker =
+ _mm_cmpeq_epi32(_mm_max_epu32(shadow, access_read_atomic), shadow);
+ const m128 rewrite = _mm_and_si128(same_sid_access, rw_weaker);
+ const int rewrite_mask = _mm_movemask_epi8(rewrite);
+ int index = __builtin_ffs(rewrite_mask);
+ if (UNLIKELY(index == 0)) {
+ const m128 empty = _mm_cmpeq_epi32(shadow, zero);
+ const int empty_mask = _mm_movemask_epi8(empty);
+ index = __builtin_ffs(empty_mask);
+ if (UNLIKELY(index == 0))
+ index = (atomic_load_relaxed(&thr->trace_pos) / 2) % 16;
}
+ StoreShadow(&shadow_mem[index / 4], cur.raw());
+ // We could zero other slots determined by rewrite_mask.
+ // That would help other threads to evict better slots,
+ // but it's unclear if it's worth it.
return false;
}
-#if TSAN_VECTORIZE
-# define SHUF(v0, v1, i0, i1, i2, i3) \
- _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(v0), \
- _mm_castsi128_ps(v1), \
- (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
-ALWAYS_INLINE
-bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- // This is an optimized version of ContainsSameAccessSlow.
- // load current access into access[0:63]
- const m128 access = _mm_cvtsi64_si128(a);
- // duplicate high part of access in addr0:
- // addr0[0:31] = access[32:63]
- // addr0[32:63] = access[32:63]
- // addr0[64:95] = access[32:63]
- // addr0[96:127] = access[32:63]
- const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
- // load 4 shadow slots
- const m128 shadow0 = _mm_load_si128((__m128i *)s);
- const m128 shadow1 = _mm_load_si128((__m128i *)s + 1);
- // load high parts of 4 shadow slots into addr_vect:
- // addr_vect[0:31] = shadow0[32:63]
- // addr_vect[32:63] = shadow0[96:127]
- // addr_vect[64:95] = shadow1[32:63]
- // addr_vect[96:127] = shadow1[96:127]
- m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
- if (!is_write) {
- // set IsRead bit in addr_vect
- const m128 rw_mask1 = _mm_cvtsi64_si128(1 << 15);
- const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
- addr_vect = _mm_or_si128(addr_vect, rw_mask);
- }
- // addr0 == addr_vect?
- const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
- // epoch1[0:63] = sync_epoch
- const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
- // epoch[0:31] = sync_epoch[0:31]
- // epoch[32:63] = sync_epoch[0:31]
- // epoch[64:95] = sync_epoch[0:31]
- // epoch[96:127] = sync_epoch[0:31]
- const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
- // load low parts of shadow cell epochs into epoch_vect:
- // epoch_vect[0:31] = shadow0[0:31]
- // epoch_vect[32:63] = shadow0[64:95]
- // epoch_vect[64:95] = shadow1[0:31]
- // epoch_vect[96:127] = shadow1[64:95]
- const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
- // epoch_vect >= sync_epoch?
- const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
- // addr_res & epoch_res
- const m128 res = _mm_and_si128(addr_res, epoch_res);
- // mask[0] = res[7]
- // mask[1] = res[15]
- // ...
- // mask[15] = res[127]
- const int mask = _mm_movemask_epi8(res);
- return mask != 0;
-}
-#endif
+SHARED:
+ m128 thread_epochs = _mm_set1_epi32(0x7fffffff);
+ // Need to unwind this because _mm_extract_epi8/_mm_insert_epi32
+ // indexes must be constants.
+# define LOAD_EPOCH(idx) \
+ if (LIKELY(race_mask & (1 << (idx * 4)))) { \
+ u8 sid = _mm_extract_epi8(shadow, idx * 4 + 1); \
+ u16 epoch = static_cast<u16>(thr->clock.Get(static_cast<Sid>(sid))); \
+ thread_epochs = _mm_insert_epi32(thread_epochs, u32(epoch) << 16, idx); \
+ }
+ LOAD_EPOCH(0);
+ LOAD_EPOCH(1);
+ LOAD_EPOCH(2);
+ LOAD_EPOCH(3);
+# undef LOAD_EPOCH
+ const m128 mask_epoch = _mm_set1_epi32(0x3fff0000);
+ const m128 shadow_epochs = _mm_and_si128(shadow, mask_epoch);
+ const m128 concurrent = _mm_cmplt_epi32(thread_epochs, shadow_epochs);
+ const int concurrent_mask = _mm_movemask_epi8(concurrent);
+ if (LIKELY(concurrent_mask == 0))
+ goto STORE;
-ALWAYS_INLINE
-bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
-#if TSAN_VECTORIZE
- bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
- // NOTE: this check can fail if the shadow is concurrently mutated
- // by other threads. But it still can be useful if you modify
- // ContainsSameAccessFast and want to ensure that it's not completely broken.
- // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
- return res;
-#else
- return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
-#endif
+ DoReportRaceV(thr, shadow_mem, cur, concurrent_mask, shadow, typ);
+ return true;
}
-ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
- bool kIsAtomic) {
- RawShadow *shadow_mem = MemToShadow(addr);
- DPrintf2(
- "#%d: MemoryAccess: @%p %p size=%d"
- " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
- (int)thr->fast_state.tid(), (void *)pc, (void *)addr,
- (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
- (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2],
- (uptr)shadow_mem[3]);
-#if SANITIZER_DEBUG
- if (!IsAppMem(addr)) {
- Printf("Access to non app mem %zx\n", addr);
- DCHECK(IsAppMem(addr));
- }
- if (!IsShadowMem(shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
- DCHECK(IsShadowMem(shadow_mem));
- }
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) \
+ const m128 access = _mm_set1_epi32(static_cast<u32>((cur).raw())); \
+ const m128 shadow = _mm_load_si128(reinterpret_cast<m128*>(shadow_mem))
#endif
- if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
- return;
+char* DumpShadow(char* buf, RawShadow raw) {
+ if (raw == Shadow::kEmpty) {
+ internal_snprintf(buf, 64, "0");
+ return buf;
}
+ Shadow s(raw);
+ AccessType typ;
+ s.GetAccess(nullptr, nullptr, &typ);
+ internal_snprintf(buf, 64, "{tid=%u@%u access=0x%x typ=%x}",
+ static_cast<u32>(s.sid()), static_cast<u32>(s.epoch()),
+ s.access(), static_cast<u32>(typ));
+ return buf;
+}
+
+// TryTrace* and TraceRestart* functions allow to turn memory access and func
+// entry/exit callbacks into leaf functions with all associated performance
+// benefits. These hottest callbacks do only 2 slow path calls: report a race
+// and trace part switching. Race reporting is easy to turn into a tail call, we
+// just always return from the runtime after reporting a race. But trace part
+// switching is harder because it needs to be in the middle of callbacks. To
+// turn it into a tail call we immidiately return after TraceRestart* functions,
+// but TraceRestart* functions themselves recurse into the callback after
+// switching trace part. As the result the hottest callbacks contain only tail
+// calls, which effectively makes them leaf functions (can use all registers,
+// no frame setup, etc).
+NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess(thr, pc, addr, size, typ);
+}
+
+ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ RawShadow* shadow_mem = MemToShadow(addr);
+ UNUSED char memBuf[4][64];
+ DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
+ static_cast<int>(thr->fast_state.sid()),
+ static_cast<int>(thr->fast_state.epoch()), (void*)addr, size,
+ static_cast<int>(typ), DumpShadow(memBuf[0], shadow_mem[0]),
+ DumpShadow(memBuf[1], shadow_mem[1]),
+ DumpShadow(memBuf[2], shadow_mem[2]),
+ DumpShadow(memBuf[3], shadow_mem[3]));
FastState fast_state = thr->fast_state;
- if (UNLIKELY(fast_state.GetIgnoreBit())) {
+ Shadow cur(fast_state, addr, size, typ);
+
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
return;
- }
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ if (!TryTraceMemoryAccess(thr, pc, addr, size, typ))
+ return TraceRestartMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
- Shadow cur(fast_state);
- cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
- cur.SetWrite(kAccessIsWrite);
- cur.SetAtomic(kIsAtomic);
+void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr, AccessType typ);
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
- kAccessIsWrite))) {
- return;
- }
+NOINLINE
+void RestartMemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess16(thr, pc, addr, typ);
+}
- if (kCollectHistory) {
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- cur.IncrementEpoch();
+ALWAYS_INLINE USED void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ const uptr size = 16;
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ Shadow cur(fast_state, 0, 8, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
}
+SECOND:
+ shadow_mem += kShadowCnt;
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
+NOINLINE
+void RestartUnalignedMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ UnalignedMemoryAccess(thr, pc, addr, size, typ);
}
-// Called by MemoryAccessRange in tsan_rtl_thread.cpp
-ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog,
- bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur) {
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), thr->fast_synch_epoch,
- kAccessIsWrite))) {
+ALWAYS_INLINE USED void UnalignedMemoryAccess(ThreadState* thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK_LE(size, 8);
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
return;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ uptr size1 = Min<uptr>(size, RoundUp(addr + 1, kShadowCell) - addr);
+ {
+ Shadow cur(fast_state, addr, size1, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
}
+SECOND:
+ uptr size2 = size - size1;
+ if (LIKELY(size2 == 0))
+ return;
+ shadow_mem += kShadowCnt;
+ Shadow cur(fast_state, 0, size2, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
+void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) {
+ DCHECK_LE(p, end);
+ DCHECK(IsShadowMem(p));
+ DCHECK(IsShadowMem(end));
+ UNUSED const uptr kAlign = kShadowCnt * kShadowSize;
+ DCHECK_EQ(reinterpret_cast<uptr>(p) % kAlign, 0);
+ DCHECK_EQ(reinterpret_cast<uptr>(end) % kAlign, 0);
+#if !TSAN_VECTORIZE
+ for (; p < end; p += kShadowCnt) {
+ p[0] = v;
+ for (uptr i = 1; i < kShadowCnt; i++) p[i] = Shadow::kEmpty;
+ }
+#else
+ m128 vv = _mm_setr_epi32(
+ static_cast<u32>(v), static_cast<u32>(Shadow::kEmpty),
+ static_cast<u32>(Shadow::kEmpty), static_cast<u32>(Shadow::kEmpty));
+ m128* vp = reinterpret_cast<m128*>(p);
+ m128* vend = reinterpret_cast<m128*>(end);
+ for (; vp < vend; vp++) _mm_store_si128(vp, vv);
+#endif
}
-static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
- u64 val) {
- (void)thr;
- (void)pc;
+static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) {
if (size == 0)
return;
- // FIXME: fix me.
- uptr offset = addr % kShadowCell;
- if (offset) {
- offset = kShadowCell - offset;
- if (size <= offset)
- return;
- addr += offset;
- size -= offset;
- }
- DCHECK_EQ(addr % 8, 0);
+ DCHECK_EQ(addr % kShadowCell, 0);
+ DCHECK_EQ(size % kShadowCell, 0);
// If a user passes some insane arguments (memset(0)),
// let it just crash as usual.
if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
return;
+ RawShadow* begin = MemToShadow(addr);
+ RawShadow* end = begin + size / kShadowCell * kShadowCnt;
// Don't want to touch lots of shadow memory.
// If a program maps 10MB stack, there is no need reset the whole range.
- size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows.
- if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
- RawShadow *p = MemToShadow(addr);
- CHECK(IsShadowMem(p));
- CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1));
- // FIXME: may overwrite a part outside the region
- for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
- p[i++] = val;
- for (uptr j = 1; j < kShadowCnt; j++) p[i++] = 0;
- }
- } else {
- // The region is big, reset only beginning and end.
- const uptr kPageSize = GetPageSizeCached();
- RawShadow *begin = MemToShadow(addr);
- RawShadow *end = begin + size / kShadowCell * kShadowCnt;
- RawShadow *p = begin;
- // Set at least first kPageSize/2 to page boundary.
- while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
- }
- // Reset middle part.
- RawShadow *p1 = p;
- p = RoundDown(end, kPageSize);
- if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ if (SANITIZER_WINDOWS ||
+ size <= common_flags()->clear_shadow_mmap_threshold) {
+ ShadowSet(begin, end, val);
+ return;
+ }
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ // Set at least first kPageSize/2 to page boundary.
+ RawShadow* mid1 =
+ Min(end, reinterpret_cast<RawShadow*>(RoundUp(
+ reinterpret_cast<uptr>(begin) + kPageSize / 2, kPageSize)));
+ ShadowSet(begin, mid1, val);
+ // Reset middle part.
+ RawShadow* mid2 = RoundDown(end, kPageSize);
+ if (mid2 > mid1) {
+ if (!MmapFixedSuperNoReserve((uptr)mid1, (uptr)mid2 - (uptr)mid1))
Die();
- // Set the ending.
- while (p < end) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++) *p++ = 0;
- }
}
+ // Set the ending.
+ ShadowSet(mid2, end, val);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- MemoryRangeSet(thr, pc, addr, size, 0);
+void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ uptr addr1 = RoundDown(addr, kShadowCell);
+ uptr size1 = RoundUp(size + addr - addr1, kShadowCell);
+ MemoryRangeSet(addr1, size1, Shadow::kEmpty);
}
-void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- // Processing more than 1k (4k of shadow) is expensive,
+void MemoryRangeFreed(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ // Callers must lock the slot to ensure synchronization with the reset.
+ // The problem with "freed" memory is that it's not "monotonic"
+ // with respect to bug detection: freed memory is bad to access,
+ // but then if the heap block is reallocated later, it's good to access.
+ // As the result a garbage "freed" shadow can lead to a false positive
+ // if it happens to match a real free in the thread trace,
+ // but the heap block was reallocated before the current memory access,
+ // so it's still good to access. It's not the case with data races.
+ DCHECK(thr->slot_locked);
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ // Processing more than 1k (2k of shadow) is expensive,
// can cause excessive memory consumption (user does not necessary touch
// the whole range) and most likely unnecessary.
- if (size > 1024)
- size = 1024;
- CHECK_EQ(thr->is_freeing, false);
- thr->is_freeing = true;
- MemoryAccessRange(thr, pc, addr, size, true);
- thr->is_freeing = false;
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ size = Min<uptr>(size, 1024);
+ const AccessType typ =
+ kAccessWrite | kAccessFree | kAccessCheckOnly | kAccessNoRodata;
+ TraceMemoryAccessRange(thr, pc, addr, size, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ Shadow cur(thr->fast_state, 0, kShadowCell, typ);
+#if TSAN_VECTORIZE
+ const m128 access = _mm_set1_epi32(static_cast<u32>(cur.raw()));
+ const m128 freed = _mm_setr_epi32(
+ static_cast<u32>(Shadow::FreedMarker()),
+ static_cast<u32>(Shadow::FreedInfo(cur.sid(), cur.epoch())), 0, 0);
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ const m128 shadow = _mm_load_si128((m128*)shadow_mem);
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ _mm_store_si128((m128*)shadow_mem, freed);
+ }
+#else
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, 0, 0, typ)))
+ return;
+ StoreShadow(&shadow_mem[0], Shadow::FreedMarker());
+ StoreShadow(&shadow_mem[1], Shadow::FreedInfo(cur.sid(), cur.epoch()));
+ StoreShadow(&shadow_mem[2], Shadow::kEmpty);
+ StoreShadow(&shadow_mem[3], Shadow::kEmpty);
}
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.MarkAsFreed();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
+#endif
}
-void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
- }
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
+void MemoryRangeImitateWrite(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ TraceMemoryAccessRange(thr, pc, addr, size, kAccessWrite);
+ Shadow cur(thr->fast_state, 0, 8, kAccessWrite);
+ MemoryRangeSet(addr, size, cur.raw());
}
-void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
+void MemoryRangeImitateWriteOrResetRange(ThreadState* thr, uptr pc, uptr addr,
uptr size) {
if (thr->ignore_reads_and_writes == 0)
MemoryRangeImitateWrite(thr, pc, addr, size);
@@ -518,14 +655,29 @@ void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
MemoryResetRange(thr, pc, addr, size);
}
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
- bool is_write) {
- if (size == 0)
- return;
+ALWAYS_INLINE
+bool MemoryAccessRangeOne(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ AccessType typ) {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return false;
+ return CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+template <bool is_read>
+NOINLINE void RestartMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr,
+ uptr size) {
+ TraceSwitchPart(thr);
+ MemoryAccessRangeT<is_read>(thr, pc, addr, size);
+}
- RawShadow *shadow_mem = MemToShadow(addr);
- DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid,
- (void *)pc, (void *)addr, (int)size, is_write);
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ const AccessType typ =
+ (is_read ? kAccessRead : kAccessWrite) | kAccessNoRodata;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_read=%d\n", thr->tid,
+ (void*)pc, (void*)addr, (int)size, is_read);
#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
@@ -537,65 +689,62 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
DCHECK(IsAppMem(addr + size - 1));
}
if (!IsShadowMem(shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ Printf("Bad shadow addr %p (%zx)\n", static_cast<void*>(shadow_mem), addr);
DCHECK(IsShadowMem(shadow_mem));
}
- if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1,
+ if (!IsShadowMem(shadow_mem + size * kShadowCnt - 1)) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ static_cast<void*>(shadow_mem + size * kShadowCnt - 1),
addr + size - 1);
- DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1));
+ DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt - 1));
}
#endif
- if (*shadow_mem == kShadowRodata) {
- DCHECK(!is_write);
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ // Check here once to not check for every access separately.
+ // Note: we could (and should) do this only for the is_read case
+ // (writes shouldn't go to .rodata). But it happens in Chromium tests:
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=1275581#c19
+ // Details are unknown since it happens only on CI machines.
+ if (*shadow_mem == Shadow::kRodata)
return;
- }
FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
return;
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
-
- bool unaligned = (addr % kShadowCell) != 0;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccessRange<is_read>(thr, pc, addr, size);
- // Handle unaligned beginning, if any.
- for (; addr % kShadowCell && size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
- cur);
- }
- if (unaligned)
+ if (UNLIKELY(addr % kShadowCell)) {
+ // Handle unaligned beginning, if any.
+ uptr size1 = Min(size, RoundUp(addr, kShadowCell) - addr);
+ size -= size1;
+ Shadow cur(fast_state, addr, size1, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
shadow_mem += kShadowCnt;
+ }
// Handle middle part, if any.
- for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
- int const kAccessSizeLog = 3;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
- cur);
- shadow_mem += kShadowCnt;
+ Shadow cur(fast_state, 0, kShadowCell, typ);
+ for (; size >= kShadowCell; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
}
// Handle ending, if any.
- for (; size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem,
- cur);
+ if (UNLIKELY(size)) {
+ Shadow cur(fast_state, 0, size, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
}
}
+template void MemoryAccessRangeT<true>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+template void MemoryAccessRangeT<false>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+
} // namespace __tsan
#if !SANITIZER_GO
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
index 632b19d18158..f848be9dd46c 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -9,242 +9,6 @@
.section __TEXT,__text
#endif
-ASM_HIDDEN(__tsan_trace_switch)
-.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
-ASM_SYMBOL(__tsan_trace_switch_thunk):
- CFI_STARTPROC
- _CET_ENDBR
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # All XMM registers are caller-saved.
- sub $0x100, %rsp
- CFI_ADJUST_CFA_OFFSET(0x100)
- vmovdqu %xmm0, 0x0(%rsp)
- vmovdqu %xmm1, 0x10(%rsp)
- vmovdqu %xmm2, 0x20(%rsp)
- vmovdqu %xmm3, 0x30(%rsp)
- vmovdqu %xmm4, 0x40(%rsp)
- vmovdqu %xmm5, 0x50(%rsp)
- vmovdqu %xmm6, 0x60(%rsp)
- vmovdqu %xmm7, 0x70(%rsp)
- vmovdqu %xmm8, 0x80(%rsp)
- vmovdqu %xmm9, 0x90(%rsp)
- vmovdqu %xmm10, 0xa0(%rsp)
- vmovdqu %xmm11, 0xb0(%rsp)
- vmovdqu %xmm12, 0xc0(%rsp)
- vmovdqu %xmm13, 0xd0(%rsp)
- vmovdqu %xmm14, 0xe0(%rsp)
- vmovdqu %xmm15, 0xf0(%rsp)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_trace_switch)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- vmovdqu 0x0(%rsp), %xmm0
- vmovdqu 0x10(%rsp), %xmm1
- vmovdqu 0x20(%rsp), %xmm2
- vmovdqu 0x30(%rsp), %xmm3
- vmovdqu 0x40(%rsp), %xmm4
- vmovdqu 0x50(%rsp), %xmm5
- vmovdqu 0x60(%rsp), %xmm6
- vmovdqu 0x70(%rsp), %xmm7
- vmovdqu 0x80(%rsp), %xmm8
- vmovdqu 0x90(%rsp), %xmm9
- vmovdqu 0xa0(%rsp), %xmm10
- vmovdqu 0xb0(%rsp), %xmm11
- vmovdqu 0xc0(%rsp), %xmm12
- vmovdqu 0xd0(%rsp), %xmm13
- vmovdqu 0xe0(%rsp), %xmm14
- vmovdqu 0xf0(%rsp), %xmm15
- add $0x100, %rsp
- CFI_ADJUST_CFA_OFFSET(-0x100)
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
-ASM_HIDDEN(__tsan_report_race)
-.globl ASM_SYMBOL(__tsan_report_race_thunk)
-ASM_SYMBOL(__tsan_report_race_thunk):
- CFI_STARTPROC
- _CET_ENDBR
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # All XMM registers are caller-saved.
- sub $0x100, %rsp
- CFI_ADJUST_CFA_OFFSET(0x100)
- vmovdqu %xmm0, 0x0(%rsp)
- vmovdqu %xmm1, 0x10(%rsp)
- vmovdqu %xmm2, 0x20(%rsp)
- vmovdqu %xmm3, 0x30(%rsp)
- vmovdqu %xmm4, 0x40(%rsp)
- vmovdqu %xmm5, 0x50(%rsp)
- vmovdqu %xmm6, 0x60(%rsp)
- vmovdqu %xmm7, 0x70(%rsp)
- vmovdqu %xmm8, 0x80(%rsp)
- vmovdqu %xmm9, 0x90(%rsp)
- vmovdqu %xmm10, 0xa0(%rsp)
- vmovdqu %xmm11, 0xb0(%rsp)
- vmovdqu %xmm12, 0xc0(%rsp)
- vmovdqu %xmm13, 0xd0(%rsp)
- vmovdqu %xmm14, 0xe0(%rsp)
- vmovdqu %xmm15, 0xf0(%rsp)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_report_race)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- vmovdqu 0x0(%rsp), %xmm0
- vmovdqu 0x10(%rsp), %xmm1
- vmovdqu 0x20(%rsp), %xmm2
- vmovdqu 0x30(%rsp), %xmm3
- vmovdqu 0x40(%rsp), %xmm4
- vmovdqu 0x50(%rsp), %xmm5
- vmovdqu 0x60(%rsp), %xmm6
- vmovdqu 0x70(%rsp), %xmm7
- vmovdqu 0x80(%rsp), %xmm8
- vmovdqu 0x90(%rsp), %xmm9
- vmovdqu 0xa0(%rsp), %xmm10
- vmovdqu 0xb0(%rsp), %xmm11
- vmovdqu 0xc0(%rsp), %xmm12
- vmovdqu 0xd0(%rsp), %xmm13
- vmovdqu 0xe0(%rsp), %xmm14
- vmovdqu 0xf0(%rsp), %xmm15
- add $0x100, %rsp
- CFI_ADJUST_CFA_OFFSET(-0x100)
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
ASM_HIDDEN(__tsan_setjmp)
#if defined(__NetBSD__)
.comm _ZN14__interception15real___setjmp14E,8,8
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 7d6b41116aa6..5d31005c2af0 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -23,6 +23,8 @@
namespace __tsan {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id);
struct Callback final : public DDCallback {
ThreadState *thr;
@@ -36,17 +38,17 @@ struct Callback final : public DDCallback {
}
StackID Unwind() override { return CurrentStackId(thr, pc); }
- int UniqueTid() override { return thr->unique_id; }
+ int UniqueTid() override { return thr->tid; }
};
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
Callback cb(thr, pc);
ctx->dd->MutexInit(&cb, &s->dd);
- s->dd.ctx = s->GetId();
+ s->dd.ctx = s->addr;
}
static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
- uptr addr, u64 mid) {
+ uptr addr, StackID creation_stack_id) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
if (SANITIZER_GO)
@@ -55,7 +57,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
return;
ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(typ);
- rep.AddMutex(mid);
+ rep.AddMutex(addr, creation_stack_id);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
rep.AddStack(trace, true);
@@ -63,95 +65,93 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
OutputReport(thr, rep);
}
+static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
+ StackID stack_id, bool write) {
+ auto typ = write ? EventType::kLock : EventType::kRLock;
+ // Note: it's important to trace before modifying mutex set
+ // because tracing can switch trace part and we write the current
+ // mutex set in the beginning of each part.
+ // If we do it in the opposite order, we will write already reduced
+ // mutex set in the beginning of the part and then trace unlock again.
+ TraceMutexLock(thr, typ, pc, addr, stack_id);
+ thr->mset.AddAddr(addr, stack_id, write);
+}
+
+static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
+ // See the comment in RecordMutexLock re order of operations.
+ TraceMutexUnlock(thr, addr);
+ thr->mset.DelAddr(addr);
+}
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
+ if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessWrite);
- thr->is_freeing = false;
- }
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
s->SetFlags(flagz & MutexCreationFlagMask);
// Save stack in the case the sync object was created before as atomic.
- if (!SANITIZER_GO && s->creation_stack_id == 0)
+ if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
s->creation_stack_id = CurrentStackId(thr, pc);
}
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
bool unlock_locked = false;
- u64 mid = 0;
- u64 last_lock = 0;
+ StackID creation_stack_id;
+ FastState last_lock;
{
- SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
- if (s == 0)
+ auto s = ctx->metamap.GetSyncIfExists(addr);
+ if (!s)
return;
- Lock l(&s->mtx);
- if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
- ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
- // Destroy is no-op for linker-initialized mutexes.
- return;
- }
- if (common_flags()->detect_deadlocks) {
- Callback cb(thr, pc);
- ctx->dd->MutexDestroy(&cb, &s->dd);
- ctx->dd->MutexInit(&cb, &s->dd);
- }
- if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
- !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- unlock_locked = true;
- }
- mid = s->GetId();
- last_lock = s->last_lock;
- if (!unlock_locked)
- s->Reset(thr->proc()); // must not reset it before the report is printed
- }
- if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(mid);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- FastState last(last_lock);
- RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
-
- SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
- if (s != 0) {
- Lock l(&s->mtx);
- s->Reset(thr->proc());
+ SlotLocker locker(thr);
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ last_lock = s->last_lock;
+ if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
+ ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ s->Reset();
}
+ // Imitate a memory write to catch unlock-destroy races.
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
}
- thr->mset.Remove(mid);
- // Imitate a memory write to catch unlock-destroy races.
- // Do this outside of sync mutex, because it can report a race which locks
- // sync mutexes.
- if (IsAppMem(addr))
- MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
+ ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
+ thr->mset.DelAddr(addr, true);
// s will be destroyed and freed in MetaMap::FreeBlock.
}
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- {
- ReadLock l(&s->mtx);
- s->UpdateFlags(flagz);
- if (s->owner_tid != thr->tid) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- }
- }
- Callback cb(thr, pc);
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ if (flagz & MutexFlagTryLock)
+ return;
+ if (!common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
@@ -161,48 +161,51 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
CHECK_GT(rec, 0);
else
rec = 1;
- if (IsAppMem(addr))
+ if (pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
- u64 mid = 0;
+ bool report_double_lock = false;
bool pre_lock = false;
bool first = false;
- bool report_double_lock = false;
+ StackID creation_stack_id = kInvalidStackID;
{
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
- if (s->owner_tid == kInvalidTid) {
- CHECK_EQ(s->recursion, 0);
- s->owner_tid = thr->tid;
- s->last_lock = thr->fast_state.raw();
- } else if (s->owner_tid == thr->tid) {
- CHECK_GT(s->recursion, 0);
- } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_double_lock = true;
- }
- first = s->recursion == 0;
- s->recursion += rec;
- if (first) {
- AcquireImpl(thr, pc, &s->clock);
- AcquireImpl(thr, pc, &s->read_clock);
- } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
- }
- thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
- if (first && common_flags()->detect_deadlocks) {
- pre_lock =
- (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, true);
+ {
+ Lock lock(&s->mtx);
+ first = s->recursion == 0;
+ s->UpdateFlags(flagz);
+ if (s->owner_tid == kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state;
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ s->recursion += rec;
+ if (first) {
+ if (!thr->ignore_sync) {
+ thr->clock.Acquire(s->clock);
+ thr->clock.Acquire(s->read_clock);
+ }
+ }
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
}
- mid = s->GetId();
}
if (report_double_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
+ creation_stack_id);
if (first && pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -211,40 +214,47 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
+ if (pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
- u64 mid = 0;
+ StackID creation_stack_id;
+ RecordMutexUnlock(thr, addr);
bool report_bad_unlock = false;
int rec = 0;
{
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
- }
- } else {
- rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
- s->recursion -= rec;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
} else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
}
}
- thr->mset.Del(s->GetId(), true);
- if (common_flags()->detect_deadlocks && s->recursion == 0 &&
- !report_bad_unlock) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
- }
- mid = s->GetId();
+ if (released)
+ IncrementEpoch(thr);
}
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks && !report_bad_unlock) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -254,53 +264,56 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- {
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- ReadLock l(&s->mtx);
- s->UpdateFlags(flagz);
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- }
- Callback cb(thr, pc);
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
+ s->UpdateFlags(flagz);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
+ if (pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
- u64 mid = 0;
bool report_bad_lock = false;
bool pre_lock = false;
+ StackID creation_stack_id = kInvalidStackID;
{
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- ReadLock l(&s->mtx);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_lock = true;
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, false);
+ {
+ ReadLock lock(&s->mtx);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ if (!thr->ignore_sync)
+ thr->clock.Acquire(s->clock);
+ s->last_lock = thr->fast_state;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
}
}
- AcquireImpl(thr, pc, &s->clock);
- s->last_lock = thr->fast_state.raw();
- thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
- if (common_flags()->detect_deadlocks) {
- pre_lock =
- (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
- }
- mid = s->GetId();
}
if (report_bad_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
+ creation_stack_id);
if (pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -309,31 +322,39 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
+ if (pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
- u64 mid = 0;
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
{
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
}
}
- ReleaseImpl(thr, pc, &s->read_clock);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
- }
- mid = s->GetId();
+ if (released)
+ IncrementEpoch(thr);
}
- thr->mset.Del(mid, false);
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -342,44 +363,52 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
+ if (pc && IsAppMem(addr))
MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
- u64 mid = 0;
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
+ bool write = true;
{
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
- bool write = true;
- if (s->owner_tid == kInvalidTid) {
- // Seems to be read unlock.
- write = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
- ReleaseImpl(thr, pc, &s->read_clock);
- } else if (s->owner_tid == thr->tid) {
- // Seems to be write unlock.
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- CHECK_GT(s->recursion, 0);
- s->recursion--;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
- } else {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid == kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
}
- } else if (!s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
- }
- thr->mset.Del(s->GetId(), write);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
}
- mid = s->GetId();
+ if (released)
+ IncrementEpoch(thr);
}
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -388,143 +417,112 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- Lock l(&s->mtx);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock lock(&s->mtx);
s->owner_tid = kInvalidTid;
s->recursion = 0;
}
void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
- ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId());
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ if (s)
+ creation_stack_id = s->creation_stack_id;
+ }
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
+ creation_stack_id);
}
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
+ auto s = ctx->metamap.GetSyncIfExists(addr);
if (!s)
return;
- ReadLock l(&s->mtx);
- AcquireImpl(thr, pc, &s->clock);
-}
-
-static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning) {
- epoch = tctx->thr->fast_state.epoch();
- tctx->thr->clock.NoteGlobalAcquire(epoch);
- }
- thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+ SlotLocker locker(thr);
+ if (!s->clock)
+ return;
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
}
void AcquireGlobal(ThreadState *thr) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateClockCallback, thr);
-}
-
-void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
- DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
- if (thr->ignore_sync)
- return;
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreAcquireImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.Release(&s->clock);
+ }
+ IncrementEpoch(thr);
}
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
- Lock l(&s->mtx);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
-}
-
-#if !SANITIZER_GO
-static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning)
- epoch = tctx->thr->fast_state.epoch();
- thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
-}
-
-void AfterSleep(ThreadState *thr, uptr pc) {
- DPrintf("#%d: AfterSleep\n", thr->tid);
- if (thr->ignore_sync)
- return;
- thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(&ctx->thread_registry);
- ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateSleepClockCallback,
- thr);
-}
-#endif
-
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->proc()->clock_cache, c);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStoreAcquire(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
+void IncrementEpoch(ThreadState *thr) {
+ DCHECK(!thr->ignore_sync);
+ DCHECK(thr->slot_locked);
+ Epoch epoch = EpochInc(thr->fast_state.epoch());
+ if (!EpochOverflow(epoch)) {
+ Sid sid = thr->fast_state.sid();
+ thr->clock.Set(sid, epoch);
+ thr->fast_state.SetEpoch(epoch);
+ thr->slot->SetEpoch(epoch);
+ TraceTime(thr);
+ }
}
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+#if !SANITIZER_GO
+void AfterSleep(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AfterSleep\n", thr->tid);
if (thr->ignore_sync)
return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->proc()->clock_cache, c);
+ thr->last_sleep_stack_id = CurrentStackId(thr, pc);
+ thr->last_sleep_clock.Reset();
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots)
+ thr->last_sleep_clock.Set(slot.sid, slot.epoch());
}
+#endif
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
@@ -532,7 +530,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx);
}
@@ -540,7 +538,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
for (int i = 0; i < r->n; i++) {
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j];
- if (stk && stk != 0xffffffff) {
+ if (stk && stk != kInvalidStackID) {
rep.AddStack(StackDepotGet(stk), true);
} else {
// Sometimes we fail to extract the stack trace (FIXME: investigate),
@@ -552,4 +550,28 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
OutputReport(thr, rep);
}
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id) {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+
+ Tid tid;
+ DynamicMutexSet mset;
+ uptr tag;
+ if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
+ 0, kAccessWrite, &tid, &trace, mset, &tag))
+ return;
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
index def61cca14d5..5acc3967208e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
@@ -35,7 +35,6 @@ void ProcDestroy(Processor *proc) {
#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
- ctx->clock_alloc.FlushCache(&proc->clock_cache);
ctx->metamap.OnProcIdle(proc);
if (common_flags()->detect_deadlocks)
ctx->dd->DestroyPhysicalThread(proc->dd_pt);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index f332a6a8d1d8..58949ead07b3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -175,22 +175,26 @@ void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
}
void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
- StackTrace stack, const MutexSet *mset) {
+ Tid tid, StackTrace stack,
+ const MutexSet *mset) {
+ uptr addr0, size;
+ AccessType typ;
+ s.GetAccess(&addr0, &size, &typ);
auto *mop = New<ReportMop>();
rep_->mops.PushBack(mop);
- mop->tid = s.tid();
- mop->addr = addr + s.addr0();
- mop->size = s.size();
- mop->write = s.IsWrite();
- mop->atomic = s.IsAtomic();
+ mop->tid = tid;
+ mop->addr = addr + addr0;
+ mop->size = size;
+ mop->write = !(typ & kAccessRead);
+ mop->atomic = typ & kAccessAtomic;
mop->stack = SymbolizeStack(stack);
mop->external_tag = external_tag;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 mid = this->AddMutex(d.id);
- ReportMopMutex mtx = {mid, d.write};
+ int id = this->AddMutex(d.addr, d.stack_id);
+ ReportMopMutex mtx = {id, d.write};
mop->mset.PushBack(mtx);
}
}
@@ -219,18 +223,6 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
}
#if !SANITIZER_GO
-static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
- int unique_id = *(int *)arg;
- return tctx->unique_id == (u32)unique_id;
-}
-
-static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
- ctx->thread_registry.CheckLocked();
- return static_cast<ThreadContext *>(
- ctx->thread_registry.FindThreadContextLocked(
- FindThreadByUidLockedCallback, &unique_id));
-}
-
static ThreadContext *FindThreadByTidLocked(Tid tid) {
ctx->thread_registry.CheckLocked();
return static_cast<ThreadContext *>(
@@ -262,55 +254,24 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
-void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
+void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
#if !SANITIZER_GO
- if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
AddThread(tctx, suppressable);
#endif
}
-void ScopedReportBase::AddMutex(const SyncVar *s) {
+int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == s->uid)
- return;
- }
- auto *rm = New<ReportMutex>();
- rep_->mutexes.PushBack(rm);
- rm->id = s->uid;
- rm->addr = s->addr;
- rm->destroyed = false;
- rm->stack = SymbolizeStackId(s->creation_stack_id);
-}
-
-u64 ScopedReportBase::AddMutex(u64 id) {
- u64 uid = 0;
- u64 mid = id;
- uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- Lock l(&s->mtx);
- mid = s->uid;
- AddMutex(s);
- } else {
- AddDeadMutex(id);
- }
- return mid;
-}
-
-void ScopedReportBase::AddDeadMutex(u64 id) {
- for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == id)
- return;
+ if (rep_->mutexes[i]->addr == addr)
+ return rep_->mutexes[i]->id;
}
auto *rm = New<ReportMutex>();
rep_->mutexes.PushBack(rm);
- rm->id = id;
- rm->addr = 0;
- rm->destroyed = true;
- rm->stack = 0;
+ rm->id = rep_->mutexes.Size() - 1;
+ rm->addr = addr;
+ rm->stack = SymbolizeStackId(creation_stack_id);
+ return rm->id;
}
void ScopedReportBase::AddLocation(uptr addr, uptr size) {
@@ -327,7 +288,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
rep_->locs.PushBack(loc);
- ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
+ ThreadContext *tctx = FindThreadByTidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
@@ -343,16 +304,15 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
if (!b)
b = JavaHeapBlock(addr, &block_begin);
if (b != 0) {
- ThreadContext *tctx = FindThreadByTidLocked(b->tid);
auto *loc = New<ReportLocation>();
loc->type = ReportLocationHeap;
loc->heap_chunk_start = block_begin;
loc->heap_chunk_size = b->siz;
loc->external_tag = b->tag;
- loc->tid = tctx ? tctx->tid : b->tid;
+ loc->tid = b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- if (tctx)
+ if (ThreadContext *tctx = FindThreadByTidLocked(b->tid))
AddThread(tctx);
return;
}
@@ -387,71 +347,6 @@ ScopedReport::ScopedReport(ReportType typ, uptr tag)
ScopedReport::~ScopedReport() {}
-void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag) {
- // This function restores stack trace and mutex set for the thread/epoch.
- // It does so by getting stack trace and mutex set at the beginning of
- // trace part, and then replaying the trace till the given epoch.
- Trace* trace = ThreadTrace(tid);
- ReadLock l(&trace->mtx);
- const int partidx = (epoch / kTracePartSize) % TraceParts();
- TraceHeader* hdr = &trace->headers[partidx];
- if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
- return;
- CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
- const u64 epoch0 = RoundDown(epoch, TraceSize());
- const u64 eend = epoch % TraceSize();
- const u64 ebegin = RoundDown(eend, kTracePartSize);
- DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
- tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
- Vector<uptr> stack;
- stack.Resize(hdr->stack0.size + 64);
- for (uptr i = 0; i < hdr->stack0.size; i++) {
- stack[i] = hdr->stack0.trace[i];
- DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
- }
- if (mset)
- *mset = hdr->mset0;
- uptr pos = hdr->stack0.size;
- Event *events = (Event*)GetThreadTrace(tid);
- for (uptr i = ebegin; i <= eend; i++) {
- Event ev = events[i];
- EventType typ = (EventType)(ev >> kEventPCBits);
- uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
- DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
- if (typ == EventTypeMop) {
- stack[pos] = pc;
- } else if (typ == EventTypeFuncEnter) {
- if (stack.Size() < pos + 2)
- stack.Resize(pos + 2);
- stack[pos++] = pc;
- } else if (typ == EventTypeFuncExit) {
- if (pos > 0)
- pos--;
- }
- if (mset) {
- if (typ == EventTypeLock) {
- mset->Add(pc, true, epoch0 + i);
- } else if (typ == EventTypeUnlock) {
- mset->Del(pc, true);
- } else if (typ == EventTypeRLock) {
- mset->Add(pc, false, epoch0 + i);
- } else if (typ == EventTypeRUnlock) {
- mset->Del(pc, false);
- }
- }
- for (uptr j = 0; j <= pos; j++)
- DPrintf2(" #%zu: %zx\n", j, stack[j]);
- }
- if (pos == 0 && stack[0] == 0)
- return;
- pos++;
- stk->Init(&stack[0], pos);
- ExtractTagFromStack(stk, tag);
-}
-
-namespace v3 {
-
// Replays the trace up to last_pos position in the last part
// or up to the provided epoch/sid (whichever is earlier)
// and calls the provided function f for each event.
@@ -469,6 +364,7 @@ void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
Event *end = &part->events[TracePart::kSize - 1];
if (part == last)
end = last_pos;
+ f(kFreeSid, kEpochOver, nullptr); // notify about part start
for (Event *evp = &part->events[0]; evp < end; evp++) {
Event *evp0 = evp;
if (!evp->is_access && !evp->is_func) {
@@ -528,21 +424,36 @@ static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
}
-// Replays the trace of thread tid up to the target event identified
-// by sid/epoch/addr/size/typ and restores and returns stack, mutex set
+// Replays the trace of slot sid up to the target event identified
+// by epoch/addr/size/typ and restores and returns tid, stack, mutex set
// and tag for that event. If there are multiple such events, it returns
// the last one. Returns false if the event is not present in the trace.
-bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
- uptr size, AccessType typ, VarSizeStackTrace *pstk,
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
MutexSet *pmset, uptr *ptag) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
+ DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
static_cast<int>(sid), static_cast<int>(epoch), addr, size,
static_cast<int>(typ));
ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
ctx->thread_registry.CheckLocked();
+ TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
+ Tid tid = kInvalidTid;
+ // Need to lock the slot mutex as it protects slot->journal.
+ slot->mtx.CheckLocked();
+ for (uptr i = 0; i < slot->journal.Size(); i++) {
+ DPrintf2(" journal: epoch=%d tid=%d\n",
+ static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
+ if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
+ tid = slot->journal[i].tid;
+ break;
+ }
+ }
+ if (tid == kInvalidTid)
+ return false;
+ *ptid = tid;
ThreadContext *tctx =
static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
Trace *trace = &tctx->trace;
@@ -553,8 +464,10 @@ bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
{
Lock lock(&trace->mtx);
first_part = trace->parts.Front();
- if (!first_part)
+ if (!first_part) {
+ DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
return false;
+ }
last_part = trace->parts.Back();
last_pos = trace->final_pos;
if (tctx->thr)
@@ -567,9 +480,18 @@ bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
bool is_read = typ & kAccessRead;
bool is_atomic = typ & kAccessAtomic;
bool is_free = typ & kAccessFree;
+ DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
+ trace->parts.Front(), last_part, last_pos);
TraceReplay(
trace, last_part, last_pos, sid, epoch,
[&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
+ if (evp == nullptr) {
+ // Each trace part is self-consistent, so we reset state.
+ stack.Resize(0);
+ mset->Reset();
+ prev_pc = 0;
+ return;
+ }
bool match = ev_sid == sid && ev_epoch == epoch;
if (evp->is_access) {
if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
@@ -592,12 +514,15 @@ bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
if (evp->is_func) {
auto *ev = reinterpret_cast<EventFunc *>(evp);
if (ev->pc) {
- DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
+ DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
stack.PushBack(ev->pc);
} else {
- DPrintf2(" FuncExit\n");
- CHECK(stack.Size());
- stack.PopBack();
+ DPrintf2(" FuncExit\n");
+ // We don't log pathologically large stacks in each part,
+ // if the stack was truncated we can have more func exits than
+ // entries.
+ if (stack.Size())
+ stack.PopBack();
}
return;
}
@@ -666,8 +591,6 @@ bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
return found;
}
-} // namespace v3
-
bool RacyStacks::operator==(const RacyStacks &other) const {
if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
return true;
@@ -758,10 +681,7 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
ctx->fired_suppressions.push_back(s);
}
{
- bool old_is_freeing = thr->is_freeing;
- thr->is_freeing = false;
bool suppressed = OnReport(rep, pc_or_addr != 0);
- thr->is_freeing = old_is_freeing;
if (suppressed) {
thr->current_report = nullptr;
return false;
@@ -808,97 +728,72 @@ static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
return false;
}
-static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
- Shadow s0(thr->racy_state[0]);
- Shadow s1(thr->racy_state[1]);
- CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
- if (!s0.IsAtomic() && !s1.IsAtomic())
- return true;
- if (s0.IsAtomic() && s1.IsFreed())
- return true;
- if (s1.IsAtomic() && thr->is_freeing)
- return true;
- return false;
-}
-
-void ReportRace(ThreadState *thr) {
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ0) {
CheckedMutex::CheckNoLocks();
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
+ uptr addr = ShadowToMem(shadow_mem);
+ DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
if (!ShouldReport(thr, ReportTypeRace))
return;
- if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ uptr addr_off0, size0;
+ cur.GetAccess(&addr_off0, &size0, nullptr);
+ uptr addr_off1, size1, typ1;
+ old.GetAccess(&addr_off1, &size1, &typ1);
+ if (!flags()->report_atomic_races &&
+ ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
+ !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
return;
- bool freed = false;
- {
- Shadow s(thr->racy_state[1]);
- freed = s.GetFreedAndReset();
- thr->racy_state[1] = s.raw();
- }
-
- uptr addr = ShadowToMem(thr->racy_shadow_addr);
- uptr addr_min = 0;
- uptr addr_max = 0;
- {
- uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
- uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
- uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
- uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
- addr_min = min(a0, a1);
- addr_max = max(e0, e1);
- if (IsExpectedReport(addr_min, addr_max - addr_min))
- return;
- }
+ const uptr kMop = 2;
+ Shadow s[kMop] = {cur, old};
+ uptr addr0 = addr + addr_off0;
+ uptr addr1 = addr + addr_off1;
+ uptr end0 = addr0 + size0;
+ uptr end1 = addr1 + size1;
+ uptr addr_min = min(addr0, addr1);
+ uptr addr_max = max(end0, end1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
if (HandleRacyAddress(thr, addr_min, addr_max))
return;
- ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access && freed)
- typ = ReportTypeVptrUseAfterFree;
- else if (thr->is_vptr_access)
- typ = ReportTypeVptrRace;
- else if (freed)
- typ = ReportTypeUseAfterFree;
+ ReportType rep_typ = ReportTypeRace;
+ if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
+ rep_typ = ReportTypeVptrUseAfterFree;
+ else if (typ0 & kAccessVptr)
+ rep_typ = ReportTypeVptrRace;
+ else if (typ1 & kAccessFree)
+ rep_typ = ReportTypeUseAfterFree;
- if (IsFiredSuppression(ctx, typ, addr))
+ if (IsFiredSuppression(ctx, rep_typ, addr))
return;
- const uptr kMop = 2;
VarSizeStackTrace traces[kMop];
- uptr tags[kMop] = {kExternalTagNone};
- uptr toppc = TraceTopPC(thr);
- if (toppc >> kEventPCBits) {
- // This is a work-around for a known issue.
- // The scenario where this happens is rather elaborate and requires
- // an instrumented __sanitizer_report_error_summary callback and
- // a __tsan_symbolize_external callback and a race during a range memory
- // access larger than 8 bytes. MemoryAccessRange adds the current PC to
- // the trace and starts processing memory accesses. A first memory access
- // triggers a race, we report it and call the instrumented
- // __sanitizer_report_error_summary, which adds more stuff to the trace
- // since it is intrumented. Then a second memory access in MemoryAccessRange
- // also triggers a race and we get here and call TraceTopPC to get the
- // current PC, however now it contains some unrelated events from the
- // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
- // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
- // and the resulting PC has kExternalPCBit set, so we pass it to
- // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
- // rights to crash since the PC is completely bogus.
- // test/tsan/double_race.cpp contains a test case for this.
- toppc = 0;
- }
- ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
- if (IsFiredSuppression(ctx, typ, traces[0]))
+ Tid tids[kMop] = {thr->tid, kInvalidTid};
+ uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
+
+ ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, rep_typ, traces[0]))
return;
- DynamicMutexSet mset2;
- Shadow s2(thr->racy_state[1]);
- RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
- if (IsFiredSuppression(ctx, typ, traces[1]))
+ DynamicMutexSet mset1;
+ MutexSet *mset[kMop] = {&thr->mset, mset1};
+
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1]))
+ return;
+
+ if (IsFiredSuppression(ctx, rep_typ, traces[1]))
return;
if (HandleRacyStacks(thr, traces))
@@ -908,39 +803,29 @@ void ReportRace(ThreadState *thr) {
uptr tag = kExternalTagNone;
for (uptr i = 0; i < kMop; i++) {
if (tags[i] != kExternalTagNone) {
- typ = ReportTypeExternalRace;
+ rep_typ = ReportTypeExternalRace;
tag = tags[i];
break;
}
}
- ThreadRegistryLock l0(&ctx->thread_registry);
- ScopedReport rep(typ, tag);
- for (uptr i = 0; i < kMop; i++) {
- Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, tags[i], s, traces[i],
- i == 0 ? &thr->mset : mset2);
- }
+ ScopedReport rep(rep_typ, tag);
+ for (uptr i = 0; i < kMop; i++)
+ rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
for (uptr i = 0; i < kMop; i++) {
- FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext *>(
- ctx->thread_registry.GetThreadLocked(s.tid()));
- if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
- continue;
+ ctx->thread_registry.GetThreadLocked(tids[i]));
rep.AddThread(tctx);
}
rep.AddLocation(addr_min, addr_max - addr_min);
#if !SANITIZER_GO
- {
- Shadow s(thr->racy_state[1]);
- if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
- rep.AddSleep(thr->last_sleep_stack_id);
- }
+ if (!((typ0 | typ1) & kAccessFree) &&
+ s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
#endif
-
OutputReport(thr, rep);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index c8f7124c009d..86c8b3764cc7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -21,20 +21,14 @@ namespace __tsan {
// ThreadContext implementation.
-ThreadContext::ThreadContext(Tid tid)
- : ThreadContextBase(tid), thr(), sync(), epoch0(), epoch1() {}
+ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
-void ThreadContext::OnReset() {
- CHECK_EQ(sync.size(), 0);
- uptr trace_p = GetThreadTrace(tid);
- ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
- //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
-}
+void ThreadContext::OnReset() { CHECK(!sync); }
#if !SANITIZER_GO
struct ThreadLeak {
@@ -57,7 +51,9 @@ static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#if !SANITIZER_GO
+// Disabled on Mac because lldb test TestTsanBasic fails:
+// https://reviews.llvm.org/D112603#3163158
+#if !SANITIZER_GO && !SANITIZER_MAC
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -112,30 +108,35 @@ int ThreadCount(ThreadState *thr) {
}
struct OnCreatedArgs {
- ThreadState *thr;
- uptr pc;
+ VectorClock *sync;
+ uptr sync_epoch;
+ StackID stack;
};
Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- OnCreatedArgs args = { thr, pc };
- u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
- Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent_tid, &args);
- DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+ // The main thread and GCD workers don't have a parent thread.
+ Tid parent = kInvalidTid;
+ OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
+ if (thr) {
+ parent = thr->tid;
+ arg.stack = CurrentStackId(thr, pc);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ thr->clock.ReleaseStore(&arg.sync);
+ arg.sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
+ }
+ Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
return tid;
}
void ThreadContext::OnCreated(void *arg) {
- thr = 0;
- if (tid == kMainTid)
- return;
OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
- if (!args->thr) // GCD workers don't have a parent thread.
- return;
- args->thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(args->thr, 0, &sync);
- creation_stack_id = CurrentStackId(args->thr, args->pc);
+ sync = args->sync;
+ sync_epoch = args->sync_epoch;
+ creation_stack_id = args->stack;
}
extern "C" void __tsan_stack_initialization() {}
@@ -150,6 +151,15 @@ struct OnStartedArgs {
void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type) {
+ ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
+ if (!thr->ignore_sync) {
+ SlotAttachAndLock(thr);
+ if (thr->tctx->sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(thr->tctx->sync);
+ SlotUnlock(thr);
+ }
+ Free(thr->tctx->sync);
+
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -159,12 +169,10 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
&tls_size);
#endif
-
- ThreadRegistry *tr = &ctx->thread_registry;
- OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- tr->StartThread(tid, os_id, thread_type, &args);
-
- while (!thr->tctx->trace.parts.Empty()) thr->tctx->trace.parts.PopBack();
+ thr->stk_addr = stk_addr;
+ thr->stk_size = stk_size;
+ thr->tls_addr = tls_addr;
+ thr->tls_size = tls_size;
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
@@ -192,57 +200,41 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
}
void ThreadContext::OnStarted(void *arg) {
- OnStartedArgs *args = static_cast<OnStartedArgs *>(arg);
- thr = args->thr;
- // RoundUp so that one trace part does not contain events
- // from different threads.
- epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
- epoch1 = (u64)-1;
- new (thr)
- ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr,
- args->stk_size, args->tls_addr, args->tls_size);
+ thr = static_cast<ThreadState *>(arg);
+ DPrintf("#%d: ThreadStart\n", tid);
+ new (thr) ThreadState(tid);
if (common_flags()->detect_deadlocks)
- thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- thr->fast_state.SetHistorySize(flags()->history_size);
- // Commit switch to the new part of the trace.
- // TraceAddEvent will reset stack0/mset0 in the new part for us.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
- thr->fast_synch_epoch = epoch0;
- AcquireImpl(thr, 0, &sync);
- sync.Reset(&thr->proc()->clock_cache);
+ thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
thr->tctx = this;
+#if !SANITIZER_GO
thr->is_inited = true;
- DPrintf(
- "#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
- "tls_addr=%zx tls_size=%zx\n",
- tid, (uptr)epoch0, args->stk_addr, args->stk_size, args->tls_addr,
- args->tls_size);
+#endif
}
void ThreadFinish(ThreadState *thr) {
+ DPrintf("#%d: ThreadFinish\n", thr->tid);
ThreadCheckIgnore(thr);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
thr->is_dead = true;
- thr->is_inited = false;
#if !SANITIZER_GO
+ thr->is_inited = false;
thr->ignore_interceptors++;
+ PlatformCleanUpThreadState(thr);
#endif
- ctx->thread_registry.FinishThread(thr->tid);
-}
-
-void ThreadContext::OnFinished() {
- if (!detached) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, 0, &sync);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ ThreadRegistryLock lock(&ctx->thread_registry);
+ // Note: detached is protected by the thread registry mutex,
+ // the thread may be detaching concurrently in another thread.
+ if (!thr->tctx->detached) {
+ thr->clock.ReleaseStore(&thr->tctx->sync);
+ thr->tctx->sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
}
- epoch1 = thr->fast_state.epoch();
-
#if !SANITIZER_GO
UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
#else
@@ -251,18 +243,37 @@ void ThreadContext::OnFinished() {
thr->shadow_stack = nullptr;
thr->shadow_stack_pos = nullptr;
thr->shadow_stack_end = nullptr;
-
if (common_flags()->detect_deadlocks)
ctx->dd->DestroyLogicalThread(thr->dd_lt);
- thr->clock.ResetCached(&thr->proc()->clock_cache);
-#if !SANITIZER_GO
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
-#endif
-#if !SANITIZER_GO
- PlatformCleanUpThreadState(thr);
-#endif
+ SlotDetach(thr);
+ ctx->thread_registry.FinishThread(thr->tid);
thr->~ThreadState();
- thr = 0;
+}
+
+void ThreadContext::OnFinished() {
+ Lock lock(&ctx->slot_mtx);
+ Lock lock1(&trace.mtx);
+ // Queue all trace parts into the global recycle queue.
+ auto parts = &trace.parts;
+ while (trace.local_head) {
+ CHECK(parts->Queued(trace.local_head));
+ ctx->trace_part_recycle.PushBack(trace.local_head);
+ trace.local_head = parts->Next(trace.local_head);
+ }
+ ctx->trace_part_recycle_finished += parts->Size();
+ if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
+ ctx->trace_part_finished_excess += parts->Size();
+ trace.parts_allocated = 0;
+ } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
+ parts->Size() > 1) {
+ ctx->trace_part_finished_excess += parts->Size() - 1;
+ trace.parts_allocated = 1;
+ }
+ // From now on replay will use trace->final_pos.
+ trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->tctx = nullptr;
+ thr = nullptr;
}
struct ConsumeThreadContext {
@@ -274,35 +285,43 @@ Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
return ctx->thread_registry.ConsumeThreadUserId(uid);
}
+struct JoinArg {
+ VectorClock *sync;
+ uptr sync_epoch;
+};
+
void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- ctx->thread_registry.JoinThread(tid, thr);
+ JoinArg arg = {};
+ ctx->thread_registry.JoinThread(tid, &arg);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ if (arg.sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(arg.sync);
+ }
+ Free(arg.sync);
}
-void ThreadContext::OnJoined(void *arg) {
- ThreadState *caller_thr = static_cast<ThreadState *>(arg);
- AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->proc()->clock_cache);
+void ThreadContext::OnJoined(void *ptr) {
+ auto arg = static_cast<JoinArg *>(ptr);
+ arg->sync = sync;
+ arg->sync_epoch = sync_epoch;
+ sync = nullptr;
+ sync_epoch = 0;
}
-void ThreadContext::OnDead() { CHECK_EQ(sync.size(), 0); }
+void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
ctx->thread_registry.DetachThread(tid, thr);
}
-void ThreadContext::OnDetached(void *arg) {
- ThreadState *thr1 = static_cast<ThreadState *>(arg);
- sync.Reset(&thr1->proc()->clock_cache);
-}
+void ThreadContext::OnDetached(void *arg) { Free(sync); }
void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
ctx->thread_registry.SetThreadUserId(tid, uid);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_shadow.h b/compiler-rt/lib/tsan/rtl/tsan_shadow.h
index 8b7bc341713e..843573ecf5d3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_shadow.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_shadow.h
@@ -10,223 +10,170 @@
#define TSAN_SHADOW_H
#include "tsan_defs.h"
-#include "tsan_trace.h"
namespace __tsan {
-// FastState (from most significant bit):
-// ignore : 1
-// tid : kTidBits
-// unused : -
-// history_size : 3
-// epoch : kClkBits
class FastState {
public:
- FastState(u64 tid, u64 epoch) {
- x_ = tid << kTidShift;
- x_ |= epoch;
- DCHECK_EQ(tid, this->tid());
- DCHECK_EQ(epoch, this->epoch());
- DCHECK_EQ(GetIgnoreBit(), false);
- }
-
- explicit FastState(u64 x) : x_(x) {}
-
- u64 raw() const { return x_; }
-
- u64 tid() const {
- u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
- return res;
- }
-
- u64 TidWithIgnore() const {
- u64 res = x_ >> kTidShift;
- return res;
- }
+ FastState() { Reset(); }
- u64 epoch() const {
- u64 res = x_ & ((1ull << kClkBits) - 1);
- return res;
+ void Reset() {
+ part_.unused0_ = 0;
+ part_.sid_ = static_cast<u8>(kFreeSid);
+ part_.epoch_ = static_cast<u16>(kEpochLast);
+ part_.unused1_ = 0;
+ part_.ignore_accesses_ = false;
}
- void IncrementEpoch() {
- u64 old_epoch = epoch();
- x_ += 1;
- DCHECK_EQ(old_epoch + 1, epoch());
- (void)old_epoch;
- }
+ void SetSid(Sid sid) { part_.sid_ = static_cast<u8>(sid); }
- void SetIgnoreBit() { x_ |= kIgnoreBit; }
- void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
- bool GetIgnoreBit() const { return (s64)x_ < 0; }
+ Sid sid() const { return static_cast<Sid>(part_.sid_); }
- void SetHistorySize(int hs) {
- CHECK_GE(hs, 0);
- CHECK_LE(hs, 7);
- x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
- }
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
- ALWAYS_INLINE
- int GetHistorySize() const {
- return (int)((x_ >> kHistoryShift) & kHistoryMask);
- }
+ void SetEpoch(Epoch epoch) { part_.epoch_ = static_cast<u16>(epoch); }
- void ClearHistorySize() { SetHistorySize(0); }
-
- ALWAYS_INLINE
- u64 GetTracePos() const {
- const int hs = GetHistorySize();
- // When hs == 0, the trace consists of 2 parts.
- const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
- return epoch() & mask;
- }
+ void SetIgnoreBit() { part_.ignore_accesses_ = 1; }
+ void ClearIgnoreBit() { part_.ignore_accesses_ = 0; }
+ bool GetIgnoreBit() const { return part_.ignore_accesses_; }
private:
friend class Shadow;
- static const int kTidShift = 64 - kTidBits - 1;
- static const u64 kIgnoreBit = 1ull << 63;
- static const u64 kFreedBit = 1ull << 63;
- static const u64 kHistoryShift = kClkBits;
- static const u64 kHistoryMask = 7;
- u64 x_;
+ struct Parts {
+ u32 unused0_ : 8;
+ u32 sid_ : 8;
+ u32 epoch_ : kEpochBits;
+ u32 unused1_ : 1;
+ u32 ignore_accesses_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
};
-// Shadow (from most significant bit):
-// freed : 1
-// tid : kTidBits
-// is_atomic : 1
-// is_read : 1
-// size_log : 2
-// addr0 : 3
-// epoch : kClkBits
-class Shadow : public FastState {
- public:
- explicit Shadow(u64 x) : FastState(x) {}
-
- explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
-
- void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ((x_ >> kClkBits) & 31, 0);
- DCHECK_LE(addr0, 7);
- DCHECK_LE(kAccessSizeLog, 3);
- x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
- DCHECK_EQ(kAccessSizeLog, size_log());
- DCHECK_EQ(addr0, this->addr0());
- }
+static_assert(sizeof(FastState) == kShadowSize, "bad FastState size");
- void SetWrite(unsigned kAccessIsWrite) {
- DCHECK_EQ(x_ & kReadBit, 0);
- if (!kAccessIsWrite)
- x_ |= kReadBit;
- DCHECK_EQ(kAccessIsWrite, IsWrite());
- }
+class Shadow {
+ public:
+ static constexpr RawShadow kEmpty = static_cast<RawShadow>(0);
- void SetAtomic(bool kIsAtomic) {
- DCHECK(!IsAtomic());
- if (kIsAtomic)
- x_ |= kAtomicBit;
- DCHECK_EQ(IsAtomic(), kIsAtomic);
+ Shadow(FastState state, u32 addr, u32 size, AccessType typ) {
+ raw_ = state.raw_;
+ DCHECK_GT(size, 0);
+ DCHECK_LE(size, 8);
+ UNUSED Sid sid0 = part_.sid_;
+ UNUSED u16 epoch0 = part_.epoch_;
+ raw_ |= (!!(typ & kAccessAtomic) << kIsAtomicShift) |
+ (!!(typ & kAccessRead) << kIsReadShift) |
+ (((((1u << size) - 1) << (addr & 0x7)) & 0xff) << kAccessShift);
+ // Note: we don't check kAccessAtomic because it overlaps with
+ // FastState::ignore_accesses_ and it may be set spuriously.
+ DCHECK_EQ(part_.is_read_, !!(typ & kAccessRead));
+ DCHECK_EQ(sid(), sid0);
+ DCHECK_EQ(epoch(), epoch0);
}
- bool IsAtomic() const { return x_ & kAtomicBit; }
+ explicit Shadow(RawShadow x = Shadow::kEmpty) { raw_ = static_cast<u32>(x); }
- bool IsZero() const { return x_ == 0; }
+ RawShadow raw() const { return static_cast<RawShadow>(raw_); }
+ Sid sid() const { return part_.sid_; }
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
+ u8 access() const { return part_.access_; }
- static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
- u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
- DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
- return shifted_xor == 0;
+ void GetAccess(uptr *addr, uptr *size, AccessType *typ) const {
+ DCHECK(part_.access_ != 0 || raw_ == static_cast<u32>(Shadow::kRodata));
+ if (addr)
+ *addr = part_.access_ ? __builtin_ffs(part_.access_) - 1 : 0;
+ if (size)
+ *size = part_.access_ == kFreeAccess ? kShadowCell
+ : __builtin_popcount(part_.access_);
+ if (typ)
+ *typ = (part_.is_read_ ? kAccessRead : kAccessWrite) |
+ (part_.is_atomic_ ? kAccessAtomic : 0) |
+ (part_.access_ == kFreeAccess ? kAccessFree : 0);
}
- static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
- const Shadow s2) {
- u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
- return masked_xor == 0;
- }
-
- static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
- unsigned kS2AccessSize) {
- bool res = false;
- u64 diff = s1.addr0() - s2.addr0();
- if ((s64)diff < 0) { // s1.addr0 < s2.addr0
- // if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff)
- res = true;
- } else {
- // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff)
- res = true;
- }
- DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
+ ALWAYS_INLINE
+ bool IsBothReadsOrAtomic(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ bool res =
+ raw_ & ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
+ DCHECK_EQ(res,
+ (part_.is_read_ && is_read) || (part_.is_atomic_ && is_atomic));
return res;
}
- u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
- u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
- bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
- bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
-
- // The idea behind the freed bit is as follows.
- // When the memory is freed (or otherwise unaccessible) we write to the shadow
- // values with tid/epoch related to the free and the freed bit set.
- // During memory accesses processing the freed bit is considered
- // as msb of tid. So any access races with shadow with freed bit set
- // (it is as if write from a thread with which we never synchronized before).
- // This allows us to detect accesses to freed memory w/o additional
- // overheads in memory access processing and at the same time restore
- // tid/epoch of free.
- void MarkAsFreed() { x_ |= kFreedBit; }
-
- bool IsFreed() const { return x_ & kFreedBit; }
+ ALWAYS_INLINE
+ bool IsRWWeakerOrEqual(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ UNUSED u32 res0 =
+ (part_.is_atomic_ > is_atomic) ||
+ (part_.is_atomic_ == is_atomic && part_.is_read_ >= is_read);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const u32 kAtomicReadMask = (1 << kIsAtomicShift) | (1 << kIsReadShift);
+ bool res = (raw_ & kAtomicReadMask) >=
+ ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
- bool GetFreedAndReset() {
- bool res = x_ & kFreedBit;
- x_ &= ~kFreedBit;
+ DCHECK_EQ(res, res0);
return res;
+#else
+ return res0;
+#endif
}
- bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
- (u64(kIsAtomic) << kAtomicShift));
- DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
- return v;
- }
-
- bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
- return v;
+ // The FreedMarker must not pass "the same access check" so that we don't
+ // return from the race detection algorithm early.
+ static RawShadow FreedMarker() {
+ FastState fs;
+ fs.SetSid(kFreeSid);
+ fs.SetEpoch(kEpochLast);
+ Shadow s(fs, 0, 8, kAccessWrite);
+ return s.raw();
}
- bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
- return v;
+ static RawShadow FreedInfo(Sid sid, Epoch epoch) {
+ Shadow s;
+ s.part_.sid_ = sid;
+ s.part_.epoch_ = static_cast<u16>(epoch);
+ s.part_.access_ = kFreeAccess;
+ return s.raw();
}
private:
- static const u64 kReadShift = 5 + kClkBits;
- static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6 + kClkBits;
- static const u64 kAtomicBit = 1ull << kAtomicShift;
+ struct Parts {
+ u8 access_;
+ Sid sid_;
+ u16 epoch_ : kEpochBits;
+ u16 is_read_ : 1;
+ u16 is_atomic_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
- u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
+ static constexpr u8 kFreeAccess = 0x81;
- static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
- if (s1.addr0() == s2.addr0())
- return true;
- if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
- return true;
- if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
- return true;
- return false;
- }
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ static constexpr uptr kAccessShift = 0;
+ static constexpr uptr kIsReadShift = 30;
+ static constexpr uptr kIsAtomicShift = 31;
+#else
+ static constexpr uptr kAccessShift = 24;
+ static constexpr uptr kIsReadShift = 1;
+ static constexpr uptr kIsAtomicShift = 0;
+#endif
+
+ public:
+ // .rodata shadow marker, see MapRodata and ContainsSameAccessFast.
+ static constexpr RawShadow kRodata =
+ static_cast<RawShadow>(1 << kIsReadShift);
};
-const RawShadow kShadowRodata = (RawShadow)-1; // .rodata shadow marker
+static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index f042abab74e5..09d41780d188 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -18,43 +18,31 @@ namespace __tsan {
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(); }
-void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid,
- bool save_stack) {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack) {
+ Reset();
this->addr = addr;
- this->uid = uid;
- this->next = 0;
-
- creation_stack_id = kInvalidStackID;
+ next = 0;
if (save_stack && !SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(Processor *proc) {
- uid = 0;
+void SyncVar::Reset() {
+ CHECK(!ctx->resetting);
creation_stack_id = kInvalidStackID;
owner_tid = kInvalidTid;
- last_lock = 0;
+ last_lock.Reset();
recursion = 0;
atomic_store_relaxed(&flags, 0);
-
- if (proc == 0) {
- CHECK_EQ(clock.size(), 0);
- CHECK_EQ(read_clock.size(), 0);
- } else {
- clock.Reset(&proc->clock_cache);
- read_clock.Reset(&proc->clock_cache);
- }
+ Free(clock);
+ Free(read_clock);
}
MetaMap::MetaMap()
- : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
- sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
- atomic_store(&uid_gen_, 0, memory_order_relaxed);
-}
+ : block_alloc_("heap block allocator"), sync_alloc_("sync allocator") {}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
@@ -68,16 +56,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p, bool reset) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return sz;
}
-bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz, bool reset) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -99,7 +87,8 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(proc);
+ if (reset)
+ s->Reset();
sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
@@ -116,30 +105,30 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz, bool reset) {
if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(proc, p, diff);
+ FreeRange(proc, p, diff, reset);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(proc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff, reset);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -150,7 +139,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p, kPageSize);
+ bool has_something = FreeRange(proc, p, kPageSize, reset);
p += kPageSize;
sz -= kPageSize;
if (!has_something && checked > (128 << 10))
@@ -158,7 +147,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
}
// Probe end of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize, reset);
sz -= kPageSize;
// Stacks grow down, so sync object are most likely at the end of the region
// (if it is a stack). The very end of the stack is TLS and tsan increases
@@ -177,6 +166,27 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
Die();
}
+void MetaMap::ResetClocks() {
+ // This can be called from the background thread
+ // which does not have proc/cache.
+ // The cache is too large for stack.
+ static InternalAllocatorCache cache;
+ internal_memset(&cache, 0, sizeof(cache));
+ internal_allocator()->InitCache(&cache);
+ sync_alloc_.ForEach([&](SyncVar *s) {
+ if (s->clock) {
+ InternalFree(s->clock, &cache);
+ s->clock = nullptr;
+ }
+ if (s->read_clock) {
+ InternalFree(s->read_clock, &cache);
+ s->read_clock = nullptr;
+ }
+ s->last_lock.Reset();
+ });
+ internal_allocator()->DestroyCache(&cache);
+}
+
MBlock* MetaMap::GetBlock(uptr p) {
u32 *meta = MemToMeta(p);
u32 idx = *meta;
@@ -193,6 +203,7 @@ MBlock* MetaMap::GetBlock(uptr p) {
SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
bool save_stack) {
+ DCHECK(!create || thr->slot_locked);
u32 *meta = MemToMeta(addr);
u32 idx0 = *meta;
u32 myidx = 0;
@@ -203,7 +214,7 @@ SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (LIKELY(s->addr == addr)) {
if (UNLIKELY(myidx != 0)) {
- mys->Reset(thr->proc());
+ mys->Reset();
sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
}
return s;
@@ -218,10 +229,9 @@ SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
}
if (LIKELY(myidx == 0)) {
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
mys = sync_alloc_.Map(myidx);
- mys->Init(thr, pc, addr, uid, save_stack);
+ mys->Init(thr, pc, addr, save_stack);
}
mys->next = idx0;
if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h
index fc8fa288a841..67d3c0b5e7dd 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h
@@ -16,8 +16,9 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
-#include "tsan_clock.h"
#include "tsan_dense_alloc.h"
+#include "tsan_shadow.h"
+#include "tsan_vector_clock.h"
namespace __tsan {
@@ -53,34 +54,18 @@ struct SyncVar {
uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- u64 uid; // Globally unique id.
StackID creation_stack_id;
Tid owner_tid; // Set only by exclusive owners.
- u64 last_lock;
+ FastState last_lock;
int recursion;
atomic_uint32_t flags;
u32 next; // in MetaMap
DDMutex dd;
- SyncClock read_clock; // Used for rw mutexes only.
- // The clock is placed last, so that it is situated on a different cache line
- // with the mtx. This reduces contention for hot sync objects.
- SyncClock clock;
+ VectorClock *read_clock; // Used for rw mutexes only.
+ VectorClock *clock;
- void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack);
- void Reset(Processor *proc);
-
- u64 GetId() const {
- // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
- return GetLsb((u64)addr | (uid << 48), 60);
- }
- bool CheckId(u64 uid) const {
- CHECK_EQ(uid, GetLsb(uid, 14));
- return GetLsb(this->uid, 14) == uid;
- }
- static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 48;
- return (uptr)GetLsb(id, 48);
- }
+ void Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack);
+ void Reset();
bool IsFlagSet(u32 f) const {
return atomic_load_relaxed(&flags) & f;
@@ -110,9 +95,20 @@ class MetaMap {
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(Processor *proc, uptr p);
- bool FreeRange(Processor *proc, uptr p, uptr sz);
- void ResetRange(Processor *proc, uptr p, uptr sz);
+
+ // FreeBlock resets all sync objects in the range if reset=true and must not
+ // run concurrently with ResetClocks which resets all sync objects
+ // w/o any synchronization (as part of DoReset).
+ // If we don't have a thread slot (very early/late in thread lifetime or
+ // Go/Java callbacks) or the slot is not locked, then reset must be set to
+ // false. In such case sync object clocks will be reset later (when it's
+ // reused or during the next ResetClocks).
+ uptr FreeBlock(Processor *proc, uptr p, bool reset);
+ bool FreeRange(Processor *proc, uptr p, uptr sz, bool reset);
+ void ResetRange(Processor *proc, uptr p, uptr sz, bool reset);
+ // Reset vector clocks of all sync objects.
+ // Must be called when no other threads access sync objects.
+ void ResetClocks();
MBlock* GetBlock(uptr p);
SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
@@ -142,7 +138,6 @@ class MetaMap {
typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
- atomic_uint64_t uid_gen_;
SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
bool save_stack);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_trace.h b/compiler-rt/lib/tsan/rtl/tsan_trace.h
index ffc8c991ece0..01bb7b34f43a 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_trace.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_trace.h
@@ -19,57 +19,6 @@
namespace __tsan {
-const int kTracePartSizeBits = 13;
-const int kTracePartSize = 1 << kTracePartSizeBits;
-const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
-const int kTraceSize = kTracePartSize * kTraceParts;
-
-// Must fit into 3 bits.
-enum EventType {
- EventTypeMop,
- EventTypeFuncEnter,
- EventTypeFuncExit,
- EventTypeLock,
- EventTypeUnlock,
- EventTypeRLock,
- EventTypeRUnlock
-};
-
-// Represents a thread event (from most significant bit):
-// u64 typ : 3; // EventType.
-// u64 addr : 61; // Associated pc.
-typedef u64 Event;
-
-const uptr kEventPCBits = 61;
-
-struct TraceHeader {
-#if !SANITIZER_GO
- BufferedStackTrace stack0; // Start stack for the trace.
-#else
- VarSizeStackTrace stack0;
-#endif
- u64 epoch0; // Start epoch for the trace.
- MutexSet mset0;
-
- TraceHeader() : stack0(), epoch0() {}
-};
-
-struct Trace {
- Mutex mtx;
-#if !SANITIZER_GO
- // Must be last to catch overflow as paging fault.
- // Go shadow stack is dynamically allocated.
- uptr shadow_stack[kShadowStackSize];
-#endif
- // Must be the last field, because we unmap the unused part in
- // CreateThreadContext.
- TraceHeader headers[kTraceParts];
-
- Trace() : mtx(MutexTypeTrace) {}
-};
-
-namespace v3 {
-
enum class EventType : u64 {
kAccessExt,
kAccessRange,
@@ -217,6 +166,7 @@ struct Trace;
struct TraceHeader {
Trace* trace = nullptr; // back-pointer to Trace containing this part
INode trace_parts; // in Trace::parts
+ INode global; // in Contex::trace_part_recycle
};
struct TracePart : TraceHeader {
@@ -239,13 +189,26 @@ static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
struct Trace {
Mutex mtx;
IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
- Event* final_pos =
- nullptr; // final position in the last part for finished threads
+ // First node non-queued into ctx->trace_part_recycle.
+ TracePart* local_head;
+ // Final position in the last part for finished threads.
+ Event* final_pos = nullptr;
+ // Number of trace parts allocated on behalf of this trace specifically.
+ // Total number of parts in this trace can be larger if we retake some
+ // parts from other traces.
+ uptr parts_allocated = 0;
Trace() : mtx(MutexTypeTrace) {}
-};
-} // namespace v3
+ // We need at least 3 parts per thread, because we want to keep at last
+ // 2 parts per thread that are not queued into ctx->trace_part_recycle
+ // (the current one being filled and one full part that ensures that
+ // we always have at least one part worth of previous memory accesses).
+ static constexpr uptr kMinParts = 3;
+
+ static constexpr uptr kFinishedThreadLo = 16;
+ static constexpr uptr kFinishedThreadHi = 64;
+};
} // namespace __tsan
diff --git a/compiler-rt/lib/xray/xray_allocator.h b/compiler-rt/lib/xray/xray_allocator.h
index 4b42c473261d..0284f4299fb1 100644
--- a/compiler-rt/lib/xray/xray_allocator.h
+++ b/compiler-rt/lib/xray/xray_allocator.h
@@ -65,9 +65,9 @@ template <class T> T *allocate() XRAY_NEVER_INSTRUMENT {
int ErrNo = 0;
if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report(
- "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
- RoundedSize, B);
+ Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
+ "%zu\n",
+ RoundedSize, B);
return nullptr;
}
#endif
@@ -114,9 +114,9 @@ T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
int ErrNo = 0;
if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report(
- "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
- RoundedSize, B);
+ Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
+ "%zu\n",
+ RoundedSize, B);
return nullptr;
}
#endif
@@ -183,7 +183,7 @@ private:
BackingStore = allocateBuffer(MaxMemory);
if (BackingStore == nullptr) {
if (Verbosity())
- Report("XRay Profiling: Failed to allocate memory for allocator.\n");
+ Report("XRay Profiling: Failed to allocate memory for allocator\n");
return nullptr;
}
@@ -198,7 +198,7 @@ private:
AlignedNextBlock = BackingStore = nullptr;
if (Verbosity())
Report("XRay Profiling: Cannot obtain enough memory from "
- "preallocated region.\n");
+ "preallocated region\n");
return nullptr;
}
diff --git a/compiler-rt/lib/xray/xray_basic_logging.cpp b/compiler-rt/lib/xray/xray_basic_logging.cpp
index a58ae9b5e267..6e83252a0516 100644
--- a/compiler-rt/lib/xray/xray_basic_logging.cpp
+++ b/compiler-rt/lib/xray/xray_basic_logging.cpp
@@ -345,12 +345,12 @@ static void TLDDestructor(void *P) XRAY_NEVER_INSTRUMENT {
if (TLD.ShadowStack)
InternalFree(TLD.ShadowStack);
if (Verbosity())
- Report("Cleaned up log for TID: %d\n", GetTid());
+ Report("Cleaned up log for TID: %llu\n", GetTid());
});
if (TLD.LogWriter == nullptr || TLD.BufferOffset == 0) {
if (Verbosity())
- Report("Skipping buffer for TID: %d; Offset = %llu\n", GetTid(),
+ Report("Skipping buffer for TID: %llu; Offset = %zu\n", GetTid(),
TLD.BufferOffset);
return;
}
diff --git a/compiler-rt/lib/xray/xray_hexagon.cpp b/compiler-rt/lib/xray/xray_hexagon.cpp
new file mode 100644
index 000000000000..7f127b2b499c
--- /dev/null
+++ b/compiler-rt/lib/xray/xray_hexagon.cpp
@@ -0,0 +1,168 @@
+//===-- xray_hexagon.cpp --------------------------------------*- C++ ---*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of hexagon-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <assert.h>
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_JUMPI_14 = 0x5800c00a, // jump #0x014 (PC + 0x014)
+ PO_CALLR_R6 = 0x50a6c000, // indirect call: callr r6
+ PO_TFR_IMM = 0x78000000, // transfer immed
+ // ICLASS 0x7 - S2-type A-type
+ PO_IMMEXT = 0x00000000, // constant extender
+};
+
+enum PacketWordParseBits : uint32_t {
+ PP_DUPLEX = 0x00 << 14,
+ PP_NOT_END = 0x01 << 14,
+ PP_PACKET_END = 0x03 << 14,
+};
+
+enum RegNum : uint32_t {
+ RN_R6 = 0x6,
+ RN_R7 = 0x7,
+};
+
+inline static uint32_t
+encodeExtendedTransferImmediate(uint32_t Imm, RegNum DestReg,
+ bool PacketEnd = false) XRAY_NEVER_INSTRUMENT {
+ static const uint32_t REG_MASK = 0x1f;
+ assert((DestReg & (~REG_MASK)) == 0);
+ // The constant-extended register transfer encodes the 6 least
+ // significant bits of the effective constant:
+ Imm = Imm & 0x03f;
+ const PacketWordParseBits ParseBits = PacketEnd ? PP_PACKET_END : PP_NOT_END;
+
+ return PO_TFR_IMM | ParseBits | (Imm << 5) | (DestReg & REG_MASK);
+}
+
+inline static uint32_t
+encodeConstantExtender(uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ // Bits Name Description
+ // ----- ------- ------------------------------------------
+ // 31:28 ICLASS Instruction class = 0000
+ // 27:16 high High 12 bits of 26-bit constant extension
+ // 15:14 Parse Parse bits
+ // 13:0 low Low 14 bits of 26-bit constant extension
+ static const uint32_t IMM_MASK_LOW = 0x03fff;
+ static const uint32_t IMM_MASK_HIGH = 0x00fff << 14;
+
+ // The extender encodes the 26 most significant bits of the effective
+ // constant:
+ Imm = Imm >> 6;
+
+ const uint32_t high = (Imm & IMM_MASK_HIGH) << 16;
+ const uint32_t low = Imm & IMM_MASK_LOW;
+
+ return PO_IMMEXT | high | PP_NOT_END | low;
+}
+
+static void WriteInstFlushCache(void *Addr, uint32_t NewInstruction) {
+ asm volatile("icinva(%[inst_addr])\n\t"
+ "isync\n\t"
+ "memw(%[inst_addr]) = %[new_inst]\n\t"
+ "dccleaninva(%[inst_addr])\n\t"
+ "syncht\n\t"
+ :
+ : [ inst_addr ] "r"(Addr), [ new_inst ] "r"(NewInstruction)
+ : "memory");
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // .L_xray_sled_N:
+ // <xray_sled_base>:
+ // { jump .Ltmp0 }
+ // { nop
+ // nop
+ // nop
+ // nop }
+ // .Ltmp0:
+
+ // With the following runtime patch:
+ //
+ // xray_sled_n (32-bit):
+ //
+ // <xray_sled_n>:
+ // { immext(#...) // upper 26-bits of func id
+ // r7 = ##... // lower 6-bits of func id
+ // immext(#...) // upper 26-bits of trampoline
+ // r6 = ##... } // lower 6 bits of trampoline
+ // { callr r6 }
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // { jump .Ltmp0 }
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address());
+ if (Enable) {
+ uint32_t *CurAddress = FirstAddress + 1;
+ *CurAddress = encodeExtendedTransferImmediate(FuncId, RN_R7);
+ CurAddress++;
+ *CurAddress = encodeConstantExtender(reinterpret_cast<uint32_t>(TracingHook));
+ CurAddress++;
+ *CurAddress =
+ encodeExtendedTransferImmediate(reinterpret_cast<uint32_t>(TracingHook), RN_R6, true);
+ CurAddress++;
+
+ *CurAddress = uint32_t(PO_CALLR_R6);
+
+ WriteInstFlushCache(FirstAddress, uint32_t(encodeConstantExtender(FuncId)));
+ } else {
+ WriteInstFlushCache(FirstAddress, uint32_t(PatchOpcodes::PO_JUMPI_14));
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in hexagon?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in hexagon?
+ return false;
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/compiler-rt/lib/xray/xray_interface.cpp b/compiler-rt/lib/xray/xray_interface.cpp
index ddf184c9b857..73e67618c9d5 100644
--- a/compiler-rt/lib/xray/xray_interface.cpp
+++ b/compiler-rt/lib/xray/xray_interface.cpp
@@ -14,7 +14,7 @@
#include "xray_interface_internal.h"
-#include <cstdint>
+#include <cinttypes>
#include <cstdio>
#include <errno.h>
#include <limits>
@@ -52,6 +52,8 @@ static const int16_t cSledLength = 48;
static const int16_t cSledLength = 64;
#elif defined(__powerpc64__)
static const int16_t cSledLength = 8;
+#elif defined(__hexagon__)
+static const int16_t cSledLength = 20;
#else
#error "Unsupported CPU Architecture"
#endif /* CPU architecture */
@@ -169,7 +171,8 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable,
Success = patchTypedEvent(Enable, FuncId, Sled);
break;
default:
- Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
+ Report("Unsupported sled kind '%" PRIu64 "' @%04x\n", Sled.Address,
+ int(Sled.Kind));
return false;
}
return Success;
@@ -305,7 +308,7 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
? flags()->xray_page_size_override
: GetPageSizeCached();
if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
- Report("System page size is not a power of two: %lld\n", PageSize);
+ Report("System page size is not a power of two: %zu\n", PageSize);
return XRayPatchingStatus::FAILED;
}
@@ -356,7 +359,7 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
? flags()->xray_page_size_override
: GetPageSizeCached();
if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
- Report("Provided page size is not a power of two: %lld\n", PageSize);
+ Report("Provided page size is not a power of two: %zu\n", PageSize);
return XRayPatchingStatus::FAILED;
}
diff --git a/compiler-rt/lib/xray/xray_trampoline_hexagon.S b/compiler-rt/lib/xray/xray_trampoline_hexagon.S
new file mode 100644
index 000000000000..c87ec4bed1f9
--- /dev/null
+++ b/compiler-rt/lib/xray/xray_trampoline_hexagon.S
@@ -0,0 +1,99 @@
+//===-- xray_trampoline_hexagon.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the hexagon-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../builtins/assembly.h"
+#include "../sanitizer_common/sanitizer_asm.h"
+
+.macro SAVE_REGISTERS
+memw(sp+#0)=r0
+memw(sp+#4)=r1
+memw(sp+#8)=r2
+memw(sp+#12)=r3
+memw(sp+#16)=r4
+.endm
+.macro RESTORE_REGISTERS
+r0=memw(sp+#0)
+r1=memw(sp+#4)
+r2=memw(sp+#8)
+r3=memw(sp+#12)
+r4=memw(sp+#16)
+.endm
+
+.macro CALL_PATCHED_FUNC entry_type
+ // if (xray::XRayPatchedFunctionE != NULL)
+ // xray::XRayPatchedFunctionE(FuncType);
+
+ r8 = #ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+
+ // The patched sled puts the function type
+ // into r6. Move it into r0 to pass it to
+ // the patched function.
+ { r0 = r6
+ r1 = \entry_type
+ p0 = !cmp.eq(r8, #0)
+ if (p0) callr r8 }
+.endm
+
+ .text
+ .globl ASM_SYMBOL(__xray_FunctionEntry)
+ ASM_HIDDEN(__xray_FunctionEntry)
+ ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+# LLVM-MCA-BEGIN __xray_FunctionEntry
+ASM_SYMBOL(__xray_FunctionEntry):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #0 // XRayEntryType::ENTRY
+.Ltmp0:
+ RESTORE_REGISTERS
+ // return
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionEntry)
+ CFI_ENDPROC
+
+
+ .globl ASM_SYMBOL(__xray_FunctionExit)
+ ASM_HIDDEN(__xray_FunctionExit)
+ ASM_TYPE_FUNCTION(__xray_FunctionExit)
+# LLVM-MCA-BEGIN __xray_FunctionExit
+ASM_SYMBOL(__xray_FunctionExit):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #1 // XRayEntryType::EXIT
+.Ltmp1:
+ RESTORE_REGISTERS
+ // return
+ jumpr r31
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionExit)
+ CFI_ENDPROC
+
+
+ .globl ASM_SYMBOL(__xray_FunctionTailExit)
+ ASM_HIDDEN(__xray_FunctionTailExit)
+ ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
+# LLVM-MCA-BEGIN __xray_FunctionTailExit
+ASM_SYMBOL(__xray_FunctionTailExit):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #2 // XRayEntryType::TAIL
+.Ltmp2:
+ RESTORE_REGISTERS
+ // return
+ jumpr r31
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionTailExit)
+ CFI_ENDPROC
diff --git a/compiler-rt/lib/xray/xray_tsc.h b/compiler-rt/lib/xray/xray_tsc.h
index bd7e1911abb3..58347dca5f7a 100644
--- a/compiler-rt/lib/xray/xray_tsc.h
+++ b/compiler-rt/lib/xray/xray_tsc.h
@@ -42,7 +42,8 @@ inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
#include "xray_x86_64.inc"
#elif defined(__powerpc64__)
#include "xray_powerpc64.inc"
-#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__hexagon__)
// Emulated TSC.
// There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
// not have a constant frequency like TSC on x86(_64), it may go faster