summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:27 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:27 +0000
commit316d58822dada9440bd06ecfc758dcc2364d617c (patch)
treefe72ec2e6ce9a360dda74d9d57f7acdb0e3c39d6 /lib
parent0230fcf22fe7d19f03d981c9c2c59a3db0b72ea5 (diff)
downloadsrc-test2-316d58822dada9440bd06ecfc758dcc2364d617c.tar.gz
src-test2-316d58822dada9440bd06ecfc758dcc2364d617c.zip
Notes
Diffstat (limited to 'lib')
-rw-r--r--lib/CMakeLists.txt79
-rw-r--r--lib/Makefile.mk13
-rw-r--r--lib/asan/CMakeLists.txt24
-rw-r--r--lib/asan/asan_activation.cc6
-rw-r--r--lib/asan/asan_activation_flags.inc1
-rw-r--r--lib/asan/asan_allocator.cc62
-rw-r--r--lib/asan/asan_allocator.h30
-rw-r--r--lib/asan/asan_debugging.cc125
-rw-r--r--lib/asan/asan_descriptions.cc486
-rw-r--r--lib/asan/asan_descriptions.h253
-rw-r--r--lib/asan/asan_errors.cc503
-rw-r--r--lib/asan/asan_errors.h390
-rw-r--r--lib/asan/asan_fake_stack.cc2
-rw-r--r--lib/asan/asan_fake_stack.h6
-rw-r--r--lib/asan/asan_flags.cc12
-rw-r--r--lib/asan/asan_flags.inc11
-rw-r--r--lib/asan/asan_globals.cc79
-rw-r--r--lib/asan/asan_globals_win.cc62
-rw-r--r--lib/asan/asan_globals_win.h34
-rw-r--r--lib/asan/asan_interceptors.cc151
-rw-r--r--lib/asan/asan_interface_internal.h19
-rw-r--r--lib/asan/asan_internal.h18
-rw-r--r--lib/asan/asan_mac.cc9
-rw-r--r--lib/asan/asan_malloc_linux.cc8
-rw-r--r--lib/asan/asan_malloc_win.cc6
-rw-r--r--lib/asan/asan_mapping.h39
-rw-r--r--lib/asan/asan_memory_profile.cc2
-rw-r--r--lib/asan/asan_new_delete.cc62
-rw-r--r--lib/asan/asan_poisoning.cc47
-rw-r--r--lib/asan/asan_poisoning.h4
-rw-r--r--lib/asan/asan_posix.cc19
-rw-r--r--lib/asan/asan_report.cc983
-rw-r--r--lib/asan/asan_report.h26
-rw-r--r--lib/asan/asan_rtl.cc157
-rw-r--r--lib/asan/asan_scariness_score.h21
-rw-r--r--lib/asan/asan_thread.cc18
-rw-r--r--lib/asan/asan_thread.h3
-rw-r--r--lib/asan/asan_win.cc148
-rw-r--r--lib/asan/asan_win_dll_thunk.cc72
-rw-r--r--lib/asan/asan_win_dynamic_runtime_thunk.cc43
-rwxr-xr-xlib/asan/scripts/asan_device_setup31
-rw-r--r--lib/asan/tests/CMakeLists.txt33
-rw-r--r--lib/asan/tests/asan_asm_test.cc26
-rw-r--r--lib/asan/tests/asan_interface_test.cc4
-rw-r--r--lib/asan/tests/asan_internal_interface_test.cc36
-rw-r--r--lib/asan/tests/asan_noinst_test.cc9
-rw-r--r--lib/asan/tests/asan_str_test.cc12
-rw-r--r--lib/asan/tests/asan_test.cc2
-rw-r--r--lib/asan/tests/asan_test_main.cc11
-rw-r--r--lib/asan/tests/asan_test_utils.h4
-rw-r--r--lib/builtins/CMakeLists.txt167
-rw-r--r--lib/builtins/Makefile.mk25
-rw-r--r--lib/builtins/arm/Makefile.mk20
-rw-r--r--lib/builtins/arm/aeabi_idivmod.S18
-rw-r--r--lib/builtins/arm/aeabi_ldivmod.S12
-rw-r--r--lib/builtins/arm/aeabi_uidivmod.S24
-rw-r--r--lib/builtins/arm/aeabi_uldivmod.S12
-rw-r--r--lib/builtins/arm/comparesf2.S3
-rw-r--r--lib/builtins/arm/divsi3.S20
-rw-r--r--lib/builtins/arm/udivsi3.S133
-rw-r--r--lib/builtins/arm64/Makefile.mk20
-rw-r--r--lib/builtins/armv6m/Makefile.mk20
-rw-r--r--lib/builtins/assembly.h3
-rw-r--r--lib/builtins/atomic.c11
-rw-r--r--lib/builtins/clear_cache.c4
-rw-r--r--lib/builtins/i386/Makefile.mk20
-rw-r--r--lib/builtins/int_lib.h6
-rw-r--r--lib/builtins/mingw_fixfloat.c36
-rw-r--r--lib/builtins/ppc/Makefile.mk20
-rw-r--r--lib/builtins/x86_64/Makefile.mk20
-rw-r--r--lib/cfi/CMakeLists.txt4
-rw-r--r--lib/cfi/cfi.cc2
-rw-r--r--lib/dfsan/CMakeLists.txt5
-rw-r--r--lib/dfsan/dfsan.cc25
-rw-r--r--lib/dfsan/dfsan.h3
-rw-r--r--lib/dfsan/dfsan_interceptors.cc2
-rw-r--r--lib/dfsan/dfsan_platform.h17
-rw-r--r--lib/dfsan/done_abilist.txt8
-rw-r--r--lib/esan/CMakeLists.txt5
-rw-r--r--lib/esan/cache_frag.cpp4
-rw-r--r--lib/esan/esan.cpp8
-rw-r--r--lib/esan/esan.h1
-rw-r--r--lib/esan/esan_flags.cpp2
-rw-r--r--lib/esan/esan_hashtable.h381
-rw-r--r--lib/esan/esan_interceptors.cpp42
-rw-r--r--lib/esan/esan_interface_internal.h3
-rw-r--r--lib/esan/esan_linux.cpp2
-rw-r--r--lib/esan/esan_shadow.h131
-rw-r--r--lib/interception/interception.h6
-rw-r--r--lib/interception/interception_win.cc138
-rw-r--r--lib/interception/tests/CMakeLists.txt1
-rw-r--r--lib/interception/tests/interception_linux_test.cc7
-rw-r--r--lib/interception/tests/interception_win_test.cc39
-rw-r--r--lib/lsan/CMakeLists.txt7
-rw-r--r--lib/lsan/lsan_allocator.cc19
-rw-r--r--lib/lsan/lsan_common.cc3
-rw-r--r--lib/lsan/lsan_common_linux.cc2
-rw-r--r--lib/lsan/lsan_thread.cc2
-rw-r--r--lib/msan/CMakeLists.txt4
-rw-r--r--lib/msan/msan.h97
-rw-r--r--lib/msan/msan_allocator.cc48
-rw-r--r--lib/msan/msan_interceptors.cc86
-rw-r--r--lib/msan/msan_interface_internal.h10
-rw-r--r--lib/msan/msan_linux.cc3
-rw-r--r--lib/profile/CMakeLists.txt5
-rw-r--r--lib/profile/GCDAProfiling.c4
-rw-r--r--lib/profile/InstrProfData.inc11
-rw-r--r--lib/profile/InstrProfiling.c16
-rw-r--r--lib/profile/InstrProfiling.h63
-rw-r--r--lib/profile/InstrProfilingFile.c162
-rw-r--r--lib/profile/InstrProfilingInternal.h18
-rw-r--r--lib/profile/InstrProfilingPort.h6
-rw-r--r--lib/profile/InstrProfilingRuntime.cc3
-rw-r--r--lib/profile/InstrProfilingUtil.c39
-rw-r--r--lib/profile/InstrProfilingUtil.h21
-rw-r--r--lib/profile/InstrProfilingValue.c2
-rw-r--r--lib/profile/WindowsMMap.c3
-rw-r--r--lib/safestack/CMakeLists.txt4
-rw-r--r--lib/safestack/safestack.cc2
-rw-r--r--lib/sanitizer_common/.clang-tidy4
-rw-r--r--lib/sanitizer_common/CMakeLists.txt21
-rw-r--r--lib/sanitizer_common/sanitizer_addrhashmap.h12
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc34
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h1452
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_bytemap.h103
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h233
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_interface.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_internal.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h249
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h310
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary64.h522
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h282
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_size_class_map.h217
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_stats.h107
-rw-r--r--lib/sanitizer_common/sanitizer_atomic.h5
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_common.h96
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc330
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors_format.inc7
-rwxr-xr-xlib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc3
-rw-r--r--lib/sanitizer_common/sanitizer_common_libcdep.cc22
-rw-r--r--lib/sanitizer_common/sanitizer_common_nolibc.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep.cc45
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc165
-rw-r--r--lib/sanitizer_common/sanitizer_dbghelp.h42
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc5
-rw-r--r--lib/sanitizer_common/sanitizer_flags.inc21
-rw-r--r--lib/sanitizer_common/sanitizer_interface_internal.h11
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h23
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h10
-rw-r--r--lib/sanitizer_common/sanitizer_libignore.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc24
-rw-r--r--lib/sanitizer_common/sanitizer_linux.h2
-rw-r--r--lib/sanitizer_common/sanitizer_linux_libcdep.cc9
-rw-r--r--lib/sanitizer_common/sanitizer_linux_mips64.S23
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc126
-rw-r--r--lib/sanitizer_common/sanitizer_mac.h6
-rw-r--r--lib/sanitizer_common/sanitizer_malloc_mac.inc36
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h8
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h12
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_linux.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.cc13
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.h93
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h13
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_freebsd.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_linux.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_mac.cc91
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h1
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc57
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.cc29
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.h7
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc32
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_mac.cc7
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc113
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_win.cc40
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc92
-rwxr-xr-xlib/sanitizer_common/scripts/gen_dynamic_list.py8
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc72
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc175
-rwxr-xr-xlib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh187
-rw-r--r--lib/sanitizer_common/symbolizer/scripts/global_symbols.txt137
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt5
-rw-r--r--lib/sanitizer_common/tests/malloc_stress_transfer_test.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc267
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc53
-rw-r--r--lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_libc_test.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_procmaps_test.cc21
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_main.cc2
-rw-r--r--lib/scudo/CMakeLists.txt8
-rw-r--r--lib/scudo/scudo_allocator.cpp407
-rw-r--r--lib/scudo/scudo_allocator.h49
-rw-r--r--lib/scudo/scudo_allocator_secondary.h188
-rw-r--r--lib/scudo/scudo_flags.cpp26
-rw-r--r--lib/scudo/scudo_flags.h2
-rw-r--r--lib/scudo/scudo_interceptors.cpp2
-rw-r--r--lib/scudo/scudo_new_delete.cpp2
-rw-r--r--lib/scudo/scudo_termination.cpp19
-rw-r--r--lib/scudo/scudo_utils.cpp158
-rw-r--r--lib/scudo/scudo_utils.h26
-rw-r--r--lib/stats/stats_client.cc1
-rw-r--r--lib/tsan/CMakeLists.txt12
-rw-r--r--lib/tsan/go/build.bat2
-rwxr-xr-xlib/tsan/go/buildgo.sh2
-rw-r--r--lib/tsan/go/tsan_go.cc5
-rw-r--r--lib/tsan/rtl/tsan_clock.cc2
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc77
-rw-r--r--lib/tsan/rtl/tsan_defs.h14
-rw-r--r--lib/tsan/rtl/tsan_flags.cc2
-rw-r--r--lib/tsan/rtl/tsan_flags.inc3
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc43
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc68
-rw-r--r--lib/tsan/rtl/tsan_interface.h19
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc20
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h8
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc17
-rw-r--r--lib/tsan/rtl/tsan_interface_java.h4
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc75
-rw-r--r--lib/tsan/rtl/tsan_mman.cc29
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h4
-rw-r--r--lib/tsan/rtl/tsan_platform.h223
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc31
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc82
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc2
-rw-r--r--lib/tsan/rtl/tsan_platform_windows.cc4
-rw-r--r--lib/tsan/rtl/tsan_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc93
-rw-r--r--lib/tsan/rtl/tsan_rtl.h37
-rw-r--r--lib/tsan/rtl/tsan_rtl_aarch64.S76
-rw-r--r--lib/tsan/rtl/tsan_rtl_mips64.S214
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc8
-rw-r--r--lib/tsan/rtl/tsan_rtl_proc.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc18
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc25
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.h8
-rw-r--r--lib/tsan/rtl/tsan_trace.h4
-rw-r--r--lib/tsan/tests/CMakeLists.txt6
-rw-r--r--lib/tsan/tests/rtl/tsan_posix.cc69
-rw-r--r--lib/tsan/tests/rtl/tsan_posix_util.h77
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_posix.cc53
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc8
-rw-r--r--lib/ubsan/CMakeLists.txt22
-rw-r--r--lib/ubsan/ubsan_init.cc1
-rw-r--r--lib/ubsan/ubsan_type_hash_itanium.cc14
-rw-r--r--lib/xray/CMakeLists.txt79
-rw-r--r--lib/xray/tests/CMakeLists.txt59
-rw-r--r--lib/xray/tests/unit/CMakeLists.txt2
-rw-r--r--lib/xray/tests/unit/buffer_queue_test.cc81
-rw-r--r--lib/xray/tests/unit/xray_unit_test_main.cc18
-rw-r--r--lib/xray/xray_AArch64.cc119
-rw-r--r--lib/xray/xray_arm.cc156
-rw-r--r--lib/xray/xray_buffer_queue.cc65
-rw-r--r--lib/xray/xray_buffer_queue.h86
-rw-r--r--lib/xray/xray_defs.h22
-rw-r--r--lib/xray/xray_emulate_tsc.h40
-rw-r--r--lib/xray/xray_flags.cc62
-rw-r--r--lib/xray/xray_flags.h37
-rw-r--r--lib/xray/xray_flags.inc22
-rw-r--r--lib/xray/xray_init.cc70
-rw-r--r--lib/xray/xray_inmemory_log.cc185
-rw-r--r--lib/xray/xray_interface.cc207
-rw-r--r--lib/xray/xray_interface_internal.h69
-rw-r--r--lib/xray/xray_trampoline_AArch64.S89
-rw-r--r--lib/xray/xray_trampoline_arm.S65
-rw-r--r--lib/xray/xray_trampoline_x86_64.S147
-rw-r--r--lib/xray/xray_x86_64.cc202
-rw-r--r--lib/xray/xray_x86_64.h32
276 files changed, 12510 insertions, 4363 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index a2b55c4e35c5..4ab1e933af3a 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -4,56 +4,59 @@
include(AddCompilerRT)
include(SanitizerUtils)
+# Hoist the building of sanitizer_common on whether we're building either the
+# sanitizers or xray (or both).
+#
+#TODO: Refactor sanitizer_common into smaller pieces (e.g. flag parsing, utils).
+if (COMPILER_RT_HAS_SANITIZER_COMMON AND
+ (COMPILER_RT_BUILD_SANITIZERS OR COMPILER_RT_BUILD_XRAY))
+ add_subdirectory(sanitizer_common)
+endif()
+
if(COMPILER_RT_BUILD_BUILTINS)
add_subdirectory(builtins)
endif()
-if(COMPILER_RT_BUILD_SANITIZERS)
- if(COMPILER_RT_HAS_INTERCEPTION)
- add_subdirectory(interception)
+function(compiler_rt_build_runtime runtime)
+ string(TOUPPER ${runtime} runtime_uppercase)
+ if(COMPILER_RT_HAS_${runtime_uppercase})
+ add_subdirectory(${runtime})
+ foreach(directory ${ARGN})
+ add_subdirectory(${directory})
+ endforeach()
endif()
+endfunction()
+
+function(compiler_rt_build_sanitizer sanitizer)
+ string(TOUPPER ${sanitizer} sanitizer_uppercase)
+ string(TOLOWER ${sanitizer} sanitizer_lowercase)
+ list(FIND COMPILER_RT_SANITIZERS_TO_BUILD ${sanitizer_lowercase} result)
+ if(NOT ${result} EQUAL -1)
+ compiler_rt_build_runtime(${sanitizer} ${ARGN})
+ endif()
+endfunction()
+
+if(COMPILER_RT_BUILD_SANITIZERS)
+ compiler_rt_build_runtime(interception)
if(COMPILER_RT_HAS_SANITIZER_COMMON)
- add_subdirectory(sanitizer_common)
add_subdirectory(stats)
add_subdirectory(lsan)
add_subdirectory(ubsan)
endif()
- if(COMPILER_RT_HAS_ASAN)
- add_subdirectory(asan)
- endif()
-
- if(COMPILER_RT_HAS_DFSAN)
- add_subdirectory(dfsan)
- endif()
-
- if(COMPILER_RT_HAS_MSAN)
- add_subdirectory(msan)
- endif()
+ compiler_rt_build_sanitizer(asan)
+ compiler_rt_build_sanitizer(dfsan)
+ compiler_rt_build_sanitizer(msan)
+ compiler_rt_build_sanitizer(tsan tsan/dd)
+ compiler_rt_build_sanitizer(safestack)
+ compiler_rt_build_sanitizer(cfi)
+ compiler_rt_build_sanitizer(esan)
+ compiler_rt_build_sanitizer(scudo)
- if(COMPILER_RT_HAS_PROFILE)
- add_subdirectory(profile)
- endif()
-
- if(COMPILER_RT_HAS_TSAN)
- add_subdirectory(tsan)
- add_subdirectory(tsan/dd)
- endif()
-
- if(COMPILER_RT_HAS_SAFESTACK)
- add_subdirectory(safestack)
- endif()
-
- if(COMPILER_RT_HAS_CFI)
- add_subdirectory(cfi)
- endif()
-
- if(COMPILER_RT_HAS_ESAN)
- add_subdirectory(esan)
- endif()
+ compiler_rt_build_runtime(profile)
+endif()
- if(COMPILER_RT_HAS_SCUDO)
- add_subdirectory(scudo)
- endif()
+if(COMPILER_RT_BUILD_XRAY)
+ compiler_rt_build_runtime(xray)
endif()
diff --git a/lib/Makefile.mk b/lib/Makefile.mk
deleted file mode 100644
index b1540bdce705..000000000000
--- a/lib/Makefile.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-#===- lib/Makefile.mk --------------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-SubDirs :=
-
-# Add submodules.
-SubDirs += builtins
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index b7e41fc7021c..1258ef6165b4 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -4,9 +4,12 @@ set(ASAN_SOURCES
asan_allocator.cc
asan_activation.cc
asan_debugging.cc
+ asan_descriptions.cc
+ asan_errors.cc
asan_fake_stack.cc
asan_flags.cc
asan_globals.cc
+ asan_globals_win.cc
asan_interceptors.cc
asan_linux.cc
asan_mac.cc
@@ -35,14 +38,9 @@ include_directories(..)
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF ASAN_CFLAGS)
-set(ASAN_COMMON_DEFINITIONS
- ASAN_HAS_EXCEPTIONS=1)
-
set(ASAN_DYNAMIC_LINK_FLAGS)
if(ANDROID)
- list(APPEND ASAN_COMMON_DEFINITIONS
- ASAN_LOW_MEMORY=1)
# On Android, -z global does not do what it is documented to do.
# On Android, -z global moves the library ahead in the lookup order,
# placing it right after the LD_PRELOADs. This is used to compensate for the fact
@@ -105,8 +103,7 @@ if(NOT APPLE)
endif()
# Build ASan runtimes shipped with Clang.
-add_custom_target(asan)
-set_target_properties(asan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(asan)
if(APPLE)
add_compiler_rt_runtime(clang_rt.asan
@@ -207,15 +204,25 @@ else()
STATIC
ARCHS ${arch}
SOURCES asan_win_dll_thunk.cc
+ asan_globals_win.cc
$<TARGET_OBJECTS:RTInterception.${arch}>
CFLAGS ${ASAN_CFLAGS} -DASAN_DLL_THUNK
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
+
+ set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DASAN_DYNAMIC_RUNTIME_THUNK")
+ if(MSVC)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
+ elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
+ endif()
+
add_compiler_rt_runtime(clang_rt.asan_dynamic_runtime_thunk
STATIC
ARCHS ${arch}
SOURCES asan_win_dynamic_runtime_thunk.cc
- CFLAGS ${ASAN_CFLAGS} -DASAN_DYNAMIC_RUNTIME_THUNK -Zl
+ asan_globals_win.cc
+ CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
endif()
@@ -223,7 +230,6 @@ else()
endif()
add_compiler_rt_resource_file(asan_blacklist asan_blacklist.txt asan)
-add_dependencies(compiler-rt asan)
add_subdirectory(scripts)
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index a5ace85038a6..bb41a0eb559e 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -79,11 +79,13 @@ static struct AsanDeactivatedFlags {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s, "
+ "allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
- allocator_options.may_return_null, coverage, coverage_dir);
+ allocator_options.may_return_null, coverage, coverage_dir,
+ allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
diff --git a/lib/asan/asan_activation_flags.inc b/lib/asan/asan_activation_flags.inc
index d4c089ec6538..67440e6fde03 100644
--- a/lib/asan/asan_activation_flags.inc
+++ b/lib/asan/asan_activation_flags.inc
@@ -33,3 +33,4 @@ COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
+COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index 6a5d227ca54c..36bd04689dbf 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -207,25 +207,27 @@ QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
+ thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+ release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
+ f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+ cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
- static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
@@ -254,7 +256,7 @@ struct Allocator {
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
- kMaxThreadLocalQuarantine);
+ (uptr)options.thread_local_quarantine_size_kb << 10);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
@@ -262,22 +264,59 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null);
+ allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
+ void RePoisonChunk(uptr chunk) {
+ // This could a user-facing chunk (with redzones), or some internal
+ // housekeeping chunk, like TransferBatch. Start by assuming the former.
+ AsanChunk *ac = GetAsanChunk((void *)chunk);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize(true);
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk. Or maybe not. Be conservative and only
+ // poison the redzones.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ } else {
+ // This can not be an AsanChunk. Poison everything. It may be reused as
+ // AsanChunk later.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+ }
+
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
+ allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
+
+ // Poison all existing allocation's redzones.
+ if (CanPoisonMemory()) {
+ allocator.ForceLock();
+ allocator.ForEachChunk(
+ [](uptr chunk, void *alloc) {
+ ((Allocator *)alloc)->RePoisonChunk(chunk);
+ },
+ this);
+ allocator.ForceUnlock();
+ }
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
@@ -356,7 +395,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@@ -373,8 +412,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
- if (!allocated)
- return allocator.ReturnNullOrDie();
+ if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -530,7 +568,7 @@ struct Allocator {
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
- ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
QuarantineChunk(m, ptr, stack, alloc_type);
@@ -563,7 +601,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@@ -673,6 +711,9 @@ uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+AllocType AsanChunkView::GetAllocType() {
+ return (AllocType)chunk_->alloc_type;
+}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
@@ -707,6 +748,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
+AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
+ return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
+}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 2f9f7aaf8316..51de67858721 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -33,10 +33,12 @@ struct AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
+ u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
+ s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
@@ -62,6 +64,7 @@ class AsanChunkView {
u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
+ AllocType GetAllocType();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@@ -90,6 +93,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
+AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
@@ -117,18 +121,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__aarch64__) && SANITIZER_ANDROID
+const uptr kAllocatorSpace = 0x3000000000ULL;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
-// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
+// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif SANITIZER_WINDOWS
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# endif
typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+# endif
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __asan::SizeClassMap SizeClassMap;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc
index 7c3a8a73bd4e..37c5922dc0fc 100644
--- a/lib/asan/asan_debugging.cc
+++ b/lib/asan/asan_debugging.cc
@@ -14,74 +14,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
-namespace __asan {
-
-void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
- descr->region_kind = "stack";
+namespace {
+using namespace __asan;
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access))
- return;
+static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
+ char *name, uptr name_size,
+ uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
- if (access.offset <= vars[i].beg + vars[i].size) {
- internal_strncat(descr->name, vars[i].name_pos,
- Min(descr->name_size, vars[i].name_len));
- descr->region_address = addr - (access.offset - vars[i].beg);
- descr->region_size = vars[i].size;
+ if (offset <= vars[i].beg + vars[i].size) {
+ // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
+ // if we're limiting the copy due to name_len, we add 1 to ensure we copy
+ // the whole name and then terminate with '\0'.
+ internal_strlcpy(name, vars[i].name_pos,
+ Min(name_size, vars[i].name_len + 1));
+ region_address = addr - (offset - vars[i].beg);
+ region_size = vars[i].size;
return;
}
}
}
-void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
-
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
-
- if (!chunk.IsValid()) {
- descr->region_kind = "heap-invalid";
- return;
- }
-
- descr->region_address = chunk.Beg();
- descr->region_size = chunk.UsedSize();
- descr->region_kind = "heap";
-}
-
-void AsanLocateAddress(uptr addr, AddressDescription *descr) {
- if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
- return;
- }
- if (GetInfoForAddressIfGlobal(addr, descr)) {
- return;
- }
- asanThreadRegistry().Lock();
- AsanThread *thread = FindThreadByStackAddress(addr);
- asanThreadRegistry().Unlock();
- if (thread) {
- GetInfoForStackVar(addr, descr, thread);
- return;
- }
- GetInfoForHeapAddress(addr, descr);
-}
-
-static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
@@ -108,18 +73,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
-} // namespace __asan
-
-using namespace __asan;
+} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
- uptr *region_address, uptr *region_size) {
- AddressDescription descr = { name, name_size, 0, 0, nullptr };
- AsanLocateAddress(addr, &descr);
- if (region_address) *region_address = descr.region_address;
- if (region_size) *region_size = descr.region_size;
- return descr.region_kind;
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ AddressDescription descr(addr);
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (auto shadow = descr.AsShadow()) {
+ // region_{address,size} are already 0
+ switch (shadow->kind) {
+ case kShadowKindLow:
+ region_kind = "low shadow";
+ break;
+ case kShadowKindGap:
+ region_kind = "shadow gap";
+ break;
+ case kShadowKindHigh:
+ region_kind = "high shadow";
+ break;
+ }
+ } else if (auto heap = descr.AsHeap()) {
+ region_kind = "heap";
+ region_address = heap->chunk_access.chunk_begin;
+ region_size = heap->chunk_access.chunk_size;
+ } else if (auto stack = descr.AsStack()) {
+ region_kind = "stack";
+ if (!stack->frame_descr) {
+ // region_{address,size} are already 0
+ } else {
+ FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
+ name_size, region_address, region_size);
+ }
+ } else if (auto global = descr.AsGlobal()) {
+ region_kind = "global";
+ auto &g = global->globals[0];
+ internal_strlcpy(name, g.name, name_size);
+ region_address = g.beg;
+ region_size = g.size;
+ } else {
+ // region_{address,size} are already 0
+ region_kind = "heap-invalid";
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/asan/asan_descriptions.cc b/lib/asan/asan_descriptions.cc
new file mode 100644
index 000000000000..0ecbe091c8b1
--- /dev/null
+++ b/lib/asan/asan_descriptions.cc
@@ -0,0 +1,486 @@
+//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan functions for getting information about an address and/or printing it.
+//===----------------------------------------------------------------------===//
+
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+// Return " (thread_name) " or an empty string if the name is empty.
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len) {
+ const char *name = t->name;
+ if (name[0] == '\0') return "";
+ buff[0] = 0;
+ internal_strncat(buff, " (", 3);
+ internal_strncat(buff, name, buff_len - 4);
+ internal_strncat(buff, ")", 2);
+ return buff;
+}
+
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
+ if (tid == kInvalidTid) return "";
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *t = GetThreadContextByTidLocked(tid);
+ return ThreadNameWithParenthesis(t, buff, buff_len);
+}
+
+void DescribeThread(AsanThreadContext *context) {
+ CHECK(context);
+ asanThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == 0 || context->announced) {
+ return;
+ }
+ context->announced = true;
+ char tname[128];
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
+ // Recursively described parent thread if needed.
+ if (flags()->print_full_thread_history) {
+ AsanThreadContext *parent_context =
+ GetThreadContextByTidLocked(context->parent_tid);
+ DescribeThread(parent_context);
+ }
+}
+
+// Shadow descriptions
+static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
+ CHECK(!AddrIsInMem(addr));
+ if (AddrIsInShadowGap(addr)) {
+ *shadow_kind = kShadowKindGap;
+ } else if (AddrIsInHighShadow(addr)) {
+ *shadow_kind = kShadowKindHigh;
+ } else if (AddrIsInLowShadow(addr)) {
+ *shadow_kind = kShadowKindLow;
+ } else {
+ CHECK(0 && "Address is not in memory and not in shadow?");
+ return false;
+ }
+ return true;
+}
+
+bool DescribeAddressIfShadow(uptr addr) {
+ ShadowAddressDescription descr;
+ if (!GetShadowAddressInformation(addr, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
+ if (AddrIsInMem(addr)) return false;
+ ShadowKind shadow_kind;
+ if (!GetShadowKind(addr, &shadow_kind)) return false;
+ if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
+ descr->addr = addr;
+ descr->kind = shadow_kind;
+ return true;
+}
+
+// Heap descriptions
+static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
+ AsanChunkView chunk, uptr addr,
+ uptr access_size) {
+ descr->bad_addr = addr;
+ if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeLeft;
+ } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeRight;
+ if (descr->offset < 0) {
+ descr->bad_addr -= descr->offset;
+ descr->offset = 0;
+ }
+ } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeInside;
+ } else {
+ descr->access_type = kAccessTypeUnknown;
+ }
+ descr->chunk_begin = chunk.Beg();
+ descr->chunk_size = chunk.UsedSize();
+ descr->alloc_type = chunk.GetAllocType();
+}
+
+static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
+ Decorator d;
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
+ switch (descr.access_type) {
+ case kAccessTypeLeft:
+ str.append("%p is located %zd bytes to the left of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeRight:
+ str.append("%p is located %zd bytes to the right of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeInside:
+ str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
+ break;
+ case kAccessTypeUnknown:
+ str.append(
+ "%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)descr.bad_addr);
+ }
+ str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
+}
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) {
+ return false;
+ }
+ descr->addr = addr;
+ GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
+ access_size);
+ CHECK_NE(chunk.AllocTid(), kInvalidTid);
+ descr->alloc_tid = chunk.AllocTid();
+ descr->alloc_stack_id = chunk.GetAllocStackId();
+ descr->free_tid = chunk.FreeTid();
+ if (descr->free_tid != kInvalidTid)
+ descr->free_stack_id = chunk.GetFreeStackId();
+ return true;
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
+ HeapAddressDescription descr;
+ if (!GetHeapAddressInformation(addr, access_size, &descr)) {
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+ return false;
+ }
+ descr.Print();
+ return true;
+}
+
+// Stack descriptions
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr) {
+ AsanThread *t = FindThreadByStackAddress(addr);
+ if (!t) return false;
+
+ descr->addr = addr;
+ descr->tid = t->tid();
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ descr->frame_descr = nullptr;
+ return true;
+ }
+
+ descr->offset = access.offset;
+ descr->access_size = access_size;
+ descr->frame_pc = access.frame_pc;
+ descr->frame_descr = access.frame_descr;
+
+#if SANITIZER_PPC64V1
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
+#endif
+ descr->frame_pc += 16;
+
+ return true;
+}
+
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
+ uptr addr_end = addr + access_size;
+ const char *pos_descr = nullptr;
+ // If the variable [var.beg, var_end) is the nearest variable to the
+ // current memory access, indicate it in the log.
+ if (addr >= var.beg) {
+ if (addr_end <= var_end)
+ pos_descr = "is inside"; // May happen if this is a use-after-return.
+ else if (addr < var_end)
+ pos_descr = "partially overflows";
+ else if (addr_end <= next_var_beg &&
+ next_var_beg - addr_end >= addr - var_end)
+ pos_descr = "overflows";
+ } else {
+ if (addr_end > var.beg)
+ pos_descr = "partially underflows";
+ else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
+ pos_descr = "underflows";
+ }
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
+ if (pos_descr) {
+ Decorator d;
+ // FIXME: we may want to also print the size of the access here,
+ // but in case of accesses generated by memset it may be confusing.
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
+ } else {
+ str.append("\n");
+ }
+ Printf("%s", str.data());
+}
+
+bool DescribeAddressIfStack(uptr addr, uptr access_size) {
+ StackAddressDescription descr;
+ if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+// Global descriptions
+static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
+ const __asan_global &g) {
+ InternalScopedString str(4096);
+ Decorator d;
+ str.append("%s", d.Location());
+ if (addr < g.beg) {
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
+ } else if (addr + access_size > g.beg + g.size) {
+ if (addr < g.beg + g.size) addr = g.beg + g.size;
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
+ } else {
+ // Can it happen?
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ }
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
+}
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr) {
+ descr->addr = addr;
+ int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
+ ARRAY_SIZE(descr->globals));
+ descr->size = globals_num;
+ descr->access_size = access_size;
+ return globals_num != 0;
+}
+
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
+ const char *bug_type) {
+ GlobalAddressDescription descr;
+ if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
+
+ descr.Print(bug_type);
+ return true;
+}
+
+void ShadowAddressDescription::Print() const {
+ Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
+}
+
+void GlobalAddressDescription::Print(const char *bug_type) const {
+ for (int i = 0; i < size; i++) {
+ DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
+ if (bug_type &&
+ 0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
+ reg_sites[i]) {
+ Printf(" registered at:\n");
+ StackDepotGet(reg_sites[i]).Print();
+ }
+ }
+}
+
+void StackAddressDescription::Print() const {
+ Decorator d;
+ char tname[128];
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%d%s", addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+
+ if (!frame_descr) {
+ Printf("%s\n", d.EndLocation());
+ return;
+ }
+ Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
+
+ // Now we print the frame where the alloca has happened.
+ // We print this frame as a stack trace with one element.
+ // The symbolizer may print more than one frame if inlining was involved.
+ // The frame numbers may be different than those in the stack trace printed
+ // previously. That's unfortunate, but I have no better solution,
+ // especially given that the alloca may be from entirely different place
+ // (e.g. use-after-scope, or different thread's stack).
+ Printf("%s", d.EndLocation());
+ StackTrace alloca_stack(&frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ Printf(
+ "AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n",
+ frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return;
+ }
+ uptr n_objects = vars.size();
+ // Report the number of stack objects.
+ Printf(" This frame has %zu object(s):\n", n_objects);
+
+ // Report all objects in this frame.
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
+ uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
+ PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
+ next_var_beg);
+ }
+ Printf(
+ "HINT: this may be a false positive if your program uses "
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
+ DescribeThread(GetThreadContextByTidLocked(tid));
+}
+
+void HeapAddressDescription::Print() const {
+ PrintHeapChunkAccess(addr, chunk_access);
+
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
+ StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
+
+ char tname[128];
+ Decorator d;
+ AsanThreadContext *free_thread = nullptr;
+ if (free_tid != kInvalidTid) {
+ free_thread = GetThreadContextByTidLocked(free_tid);
+ Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
+ free_thread->tid,
+ ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ StackTrace free_stack = GetStackTraceFromId(free_stack_id);
+ free_stack.Print();
+ Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ } else {
+ Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ }
+ alloc_stack.Print();
+ DescribeThread(GetCurrentThread());
+ if (free_thread) DescribeThread(free_thread);
+ DescribeThread(alloc_thread);
+}
+
+AddressDescription::AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry) {
+ if (GetShadowAddressInformation(addr, &data.shadow)) {
+ data.kind = kAddressKindShadow;
+ return;
+ }
+ if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
+ data.kind = kAddressKindHeap;
+ return;
+ }
+
+ bool isStackMemory = false;
+ if (shouldLockThreadRegistry) {
+ ThreadRegistryLock l(&asanThreadRegistry());
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ } else {
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ }
+ if (isStackMemory) {
+ data.kind = kAddressKindStack;
+ return;
+ }
+
+ if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
+ data.kind = kAddressKindGlobal;
+ return;
+ }
+ data.kind = kAddressKindWild;
+ addr = 0;
+}
+
+void PrintAddressDescription(uptr addr, uptr access_size,
+ const char *bug_type) {
+ ShadowAddressDescription shadow_descr;
+ if (GetShadowAddressInformation(addr, &shadow_descr)) {
+ shadow_descr.Print();
+ return;
+ }
+
+ GlobalAddressDescription global_descr;
+ if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
+ global_descr.Print(bug_type);
+ return;
+ }
+
+ StackAddressDescription stack_descr;
+ if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
+ stack_descr.Print();
+ return;
+ }
+
+ HeapAddressDescription heap_descr;
+ if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
+ heap_descr.Print();
+ return;
+ }
+
+ // We exhausted our possibilities. Bail out.
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+}
+} // namespace __asan
diff --git a/lib/asan/asan_descriptions.h b/lib/asan/asan_descriptions.h
new file mode 100644
index 000000000000..0ee677eb7d0e
--- /dev/null
+++ b/lib/asan/asan_descriptions.h
@@ -0,0 +1,253 @@
+//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_descriptions.cc.
+// TODO(filcab): Most struct definitions should move to the interface headers.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_DESCRIPTIONS_H
+#define ASAN_DESCRIPTIONS_H
+
+#include "asan_allocator.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __asan {
+
+void DescribeThread(AsanThreadContext *context);
+static inline void DescribeThread(AsanThread *t) {
+ if (t) DescribeThread(t->context());
+}
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len);
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Access() { return Blue(); }
+ const char *EndAccess() { return Default(); }
+ const char *Location() { return Green(); }
+ const char *EndLocation() { return Default(); }
+ const char *Allocation() { return Magenta(); }
+ const char *EndAllocation() { return Default(); }
+
+ const char *ShadowByte(u8 byte) {
+ switch (byte) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ return Red();
+ case kAsanHeapFreeMagic:
+ return Magenta();
+ case kAsanStackLeftRedzoneMagic:
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ return Red();
+ case kAsanStackAfterReturnMagic:
+ return Magenta();
+ case kAsanInitializationOrderMagic:
+ return Cyan();
+ case kAsanUserPoisonedMemoryMagic:
+ case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ return Blue();
+ case kAsanStackUseAfterScopeMagic:
+ return Magenta();
+ case kAsanGlobalRedzoneMagic:
+ return Red();
+ case kAsanInternalHeapMagic:
+ return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
+ default:
+ return Default();
+ }
+ }
+ const char *EndShadowByte() { return Default(); }
+ const char *MemoryByte() { return Magenta(); }
+ const char *EndMemoryByte() { return Default(); }
+};
+
+enum ShadowKind : u8 {
+ kShadowKindLow,
+ kShadowKindGap,
+ kShadowKindHigh,
+};
+static const char *const ShadowNames[] = {"low shadow", "shadow gap",
+ "high shadow"};
+
+struct ShadowAddressDescription {
+ uptr addr;
+ ShadowKind kind;
+ u8 shadow_byte;
+
+ void Print() const;
+};
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
+bool DescribeAddressIfShadow(uptr addr);
+
+enum AccessType {
+ kAccessTypeLeft,
+ kAccessTypeRight,
+ kAccessTypeInside,
+ kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
+};
+
+struct ChunkAccess {
+ uptr bad_addr;
+ sptr offset;
+ uptr chunk_begin;
+ uptr chunk_size;
+ u32 access_type : 2;
+ u32 alloc_type : 2;
+};
+
+struct HeapAddressDescription {
+ uptr addr;
+ uptr alloc_tid;
+ uptr free_tid;
+ u32 alloc_stack_id;
+ u32 free_stack_id;
+ ChunkAccess chunk_access;
+
+ void Print() const;
+};
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr);
+bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
+
+struct StackAddressDescription {
+ uptr addr;
+ uptr tid;
+ uptr offset;
+ uptr frame_pc;
+ uptr access_size;
+ const char *frame_descr;
+
+ void Print() const;
+};
+
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr);
+
+struct GlobalAddressDescription {
+ uptr addr;
+ // Assume address is close to at most four globals.
+ static const int kMaxGlobals = 4;
+ __asan_global globals[kMaxGlobals];
+ u32 reg_sites[kMaxGlobals];
+ uptr access_size;
+ u8 size;
+
+ void Print(const char *bug_type = "") const;
+};
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr);
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
+
+// General function to describe an address. Will try to describe the address as
+// a shadow, global (variable), stack, or heap address.
+// bug_type is optional and is used for checking if we're reporting an
+// initialization-order-fiasco
+// The proper access_size should be passed for stack, global, and heap
+// addresses. Defaults to 1.
+// Each of the *AddressDescription functions has its own Print() member, which
+// may take access_size and bug_type parameters if needed.
+void PrintAddressDescription(uptr addr, uptr access_size = 1,
+ const char *bug_type = "");
+
+enum AddressKind {
+ kAddressKindWild,
+ kAddressKindShadow,
+ kAddressKindHeap,
+ kAddressKindStack,
+ kAddressKindGlobal,
+};
+
+class AddressDescription {
+ struct AddressDescriptionData {
+ AddressKind kind;
+ union {
+ ShadowAddressDescription shadow;
+ HeapAddressDescription heap;
+ StackAddressDescription stack;
+ GlobalAddressDescription global;
+ uptr addr;
+ };
+ };
+
+ AddressDescriptionData data;
+
+ public:
+ AddressDescription() = default;
+ // shouldLockThreadRegistry allows us to skip locking if we're sure we already
+ // have done it.
+ AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
+ : AddressDescription(addr, 1, shouldLockThreadRegistry) {}
+ AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry = true);
+
+ uptr Address() const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ return data.addr;
+ case kAddressKindShadow:
+ return data.shadow.addr;
+ case kAddressKindHeap:
+ return data.heap.addr;
+ case kAddressKindStack:
+ return data.stack.addr;
+ case kAddressKindGlobal:
+ return data.global.addr;
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+ void Print(const char *bug_descr = nullptr) const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ Printf("Address %p is a wild pointer.\n", data.addr);
+ return;
+ case kAddressKindShadow:
+ return data.shadow.Print();
+ case kAddressKindHeap:
+ return data.heap.Print();
+ case kAddressKindStack:
+ return data.stack.Print();
+ case kAddressKindGlobal:
+ // initialization-order-fiasco has a special Print()
+ return data.global.Print(bug_descr);
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+
+ void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
+
+ const ShadowAddressDescription *AsShadow() const {
+ return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
+ }
+ const HeapAddressDescription *AsHeap() const {
+ return data.kind == kAddressKindHeap ? &data.heap : nullptr;
+ }
+ const StackAddressDescription *AsStack() const {
+ return data.kind == kAddressKindStack ? &data.stack : nullptr;
+ }
+ const GlobalAddressDescription *AsGlobal() const {
+ return data.kind == kAddressKindGlobal ? &data.global : nullptr;
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_DESCRIPTIONS_H
diff --git a/lib/asan/asan_errors.cc b/lib/asan/asan_errors.cc
new file mode 100644
index 000000000000..c287ba1b4be6
--- /dev/null
+++ b/lib/asan/asan_errors.cc
@@ -0,0 +1,503 @@
+//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan implementation for error structures.
+//===----------------------------------------------------------------------===//
+
+#include "asan_errors.h"
+#include <signal.h>
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+void ErrorStackOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s on address %p"
+ " (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(),
+ (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+static void MaybeDumpInstructionBytes(uptr pc) {
+ if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void MaybeDumpRegisters(void *context) {
+ if (!flags()->dump_registers) return;
+ SignalContext::DumpAllRegisters(context);
+}
+
+void ErrorDeadlySignal::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ const char *description = DescribeSignalOrException(signo);
+ Report(
+ "ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
+ "T%d)\n",
+ description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
+ if (is_memory_access) {
+ const char *access_type =
+ write_flag == SignalContext::WRITE
+ ? "WRITE"
+ : (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ Report("The signal is caused by a %s memory access.\n", access_type);
+ if (addr < GetPageSizeCached())
+ Report("Hint: address points to the zero page.\n");
+ }
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ MaybeDumpInstructionBytes(pc);
+ MaybeDumpRegisters(context);
+ Printf("AddressSanitizer can not provide additional info.\n");
+ ReportErrorSummary(description, &stack);
+}
+
+void ErrorDoubleFree::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting %s on %p in "
+ "thread T%d%s:\n",
+ scariness.GetDescription(), addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
+ second_free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorNewDeleteSizeMismatch::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: %s on %p in thread "
+ "T%d%s:\n",
+ scariness.GetDescription(), addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(
+ " size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ addr_description.chunk_access.chunk_size, delete_size);
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ErrorFreeNotMalloced::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting free on address "
+ "which was not malloc()-ed: %p in thread T%d%s\n",
+ addr_description.Address(), tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorAllocTypeMismatch::Print() {
+ static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
+ "operator new []"};
+ static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
+ "operator delete []"};
+ CHECK_NE(alloc_type, dealloc_type);
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
+ scariness.GetDescription(),
+ alloc_names[alloc_type], dealloc_names[dealloc_type],
+ addr_description.addr);
+ Printf("%s", d.EndWarning());
+ CHECK_GT(dealloc_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+}
+
+void ErrorMallocUsableSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
+ "pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call "
+ "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorStringFunctionMemoryRangesOverlap::Print() {
+ Decorator d;
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
+ "overlap\n",
+ bug_type, addr1_description.Address(),
+ addr1_description.Address() + length1, addr2_description.Address(),
+ addr2_description.Address() + length2);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ErrorStringFunctionSizeOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
+ scariness.GetDescription(), size);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorBadParamsToAnnotateContiguousContainer::Print() {
+ Report(
+ "ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
+ stack->Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorODRViolation::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
+ global1.beg);
+ Printf("%s", d.EndWarning());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, global1);
+ PrintGlobalLocation(&g2_loc, global2);
+ Printf(" [1] size=%zd '%s' %s\n", global1.size,
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", global2.size,
+ MaybeDemangleGlobalName(global2.name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+void ErrorInvalidPointerPair::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
+ addr1_description.Address(), addr2_description.Address());
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
+ return s[-1] > 127 && s[1] > 127;
+}
+
+ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
+ bool is_write_, uptr access_size_)
+ : ErrorBase(tid),
+ addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ access_size(access_size_),
+ is_write(is_write_),
+ shadow_val(0) {
+ scariness.Clear();
+ if (access_size) {
+ if (access_size <= 9) {
+ char desr[] = "?-byte";
+ desr[0] = '0' + access_size;
+ scariness.Scare(access_size + access_size / 2, desr);
+ } else if (access_size >= 10) {
+ scariness.Scare(15, "multi-byte");
+ }
+ is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
+
+ // Determine the error type.
+ bug_descr = "unknown-crash";
+ if (AddrIsInMem(addr)) {
+ u8 *shadow_addr = (u8 *)MemToShadow(addr);
+ // If we are accessing 16 bytes, look at the second shadow byte.
+ if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ // If we are in the partial right redzone, look at the next shadow byte.
+ if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
+ bool far_from_bounds = false;
+ shadow_val = *shadow_addr;
+ int bug_type_score = 0;
+ // For use-after-frees reads are almost as bad as writes.
+ int read_after_free_bonus = 0;
+ switch (shadow_val) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ bug_descr = "heap-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanHeapFreeMagic:
+ bug_descr = "heap-use-after-free";
+ bug_type_score = 20;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanStackLeftRedzoneMagic:
+ bug_descr = "stack-buffer-underflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanInitializationOrderMagic:
+ bug_descr = "initialization-order-fiasco";
+ bug_type_score = 1;
+ break;
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ bug_descr = "stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanStackAfterReturnMagic:
+ bug_descr = "stack-use-after-return";
+ bug_type_score = 30;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanUserPoisonedMemoryMagic:
+ bug_descr = "use-after-poison";
+ bug_type_score = 20;
+ break;
+ case kAsanContiguousContainerOOBMagic:
+ bug_descr = "container-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanStackUseAfterScopeMagic:
+ bug_descr = "stack-use-after-scope";
+ bug_type_score = 10;
+ break;
+ case kAsanGlobalRedzoneMagic:
+ bug_descr = "global-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ }
+ scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
+ if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
+ }
+ }
+}
+
+static void PrintContainerOverflowHint() {
+ Printf("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_container_overflow=0.\n"
+ "If you suspect a false positive see also: "
+ "https://github.com/google/sanitizers/wiki/"
+ "AddressSanitizerContainerOverflow.\n");
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
+ kAsanContiguousContainerOOBMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
+}
+
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
+ Decorator d;
+ if (before) str->append("%s%p:", before, bytes);
+ for (uptr i = 0; i < n; i++) {
+ u8 *p = bytes + i;
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *after = p == guilty ? "]" : "";
+ PrintShadowByte(str, before, *p, after);
+ }
+ str->append("\n");
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
+ uptr shadow_addr = MemToShadow(addr);
+ const uptr n_bytes_per_row = 16;
+ uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
+ for (int i = -5; i <= 5; i++) {
+ const char *prefix = (i == 0) ? "=>" : " ";
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
+ }
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
+}
+
+void ErrorGeneric::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ uptr addr = addr_description.Address();
+ Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
+ bug_descr, (void *)addr, pc, bp, sp);
+ Printf("%s", d.EndWarning());
+
+ char tname[128];
+ Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
+ (void *)addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
+
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+
+ // Pass bug_descr because we have a special case for
+ // initialization-order-fiasco
+ addr_description.Print(bug_descr);
+ if (shadow_val == kAsanContiguousContainerOOBMagic)
+ PrintContainerOverflowHint();
+ ReportErrorSummary(bug_descr, &stack);
+ PrintShadowMemoryForAddress(addr);
+}
+
+} // namespace __asan
diff --git a/lib/asan/asan_errors.h b/lib/asan/asan_errors.h
new file mode 100644
index 000000000000..9a124924470e
--- /dev/null
+++ b/lib/asan/asan_errors.h
@@ -0,0 +1,390 @@
+//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for error structures.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ERRORS_H
+#define ASAN_ERRORS_H
+
+#include "asan_descriptions.h"
+#include "asan_scariness_score.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __asan {
+
+struct ErrorBase {
+ ErrorBase() = default;
+ explicit ErrorBase(u32 tid_) : tid(tid_) {}
+ ScarinessScoreBase scariness;
+ u32 tid;
+};
+
+struct ErrorStackOverflow : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorStackOverflow never owns the context.
+ void *context;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStackOverflow() = default;
+ ErrorStackOverflow(u32 tid, const SignalContext &sig)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context) {
+ scariness.Clear();
+ scariness.Scare(10, "stack-overflow");
+ }
+ void Print();
+};
+
+struct ErrorDeadlySignal : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorDeadlySignal never owns the context.
+ void *context;
+ int signo;
+ SignalContext::WriteFlag write_flag;
+ bool is_memory_access;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDeadlySignal() = default;
+ ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context),
+ signo(signo_),
+ write_flag(sig.write_flag),
+ is_memory_access(sig.is_memory_access) {
+ scariness.Clear();
+ if (is_memory_access) {
+ if (addr < GetPageSizeCached()) {
+ scariness.Scare(10, "null-deref");
+ } else if (addr == pc) {
+ scariness.Scare(60, "wild-jump");
+ } else if (write_flag == SignalContext::WRITE) {
+ scariness.Scare(30, "wild-addr-write");
+ } else if (write_flag == SignalContext::READ) {
+ scariness.Scare(20, "wild-addr-read");
+ } else {
+ scariness.Scare(25, "wild-addr");
+ }
+ } else {
+ scariness.Scare(10, "signal");
+ }
+ }
+ void Print();
+};
+
+struct ErrorDoubleFree : ErrorBase {
+ // ErrorDoubleFree doesn't own the stack trace.
+ const BufferedStackTrace *second_free_stack;
+ HeapAddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDoubleFree() = default;
+ ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid), second_free_stack(stack) {
+ CHECK_GT(second_free_stack->size, 0);
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(42, "double-free");
+ }
+ void Print();
+};
+
+struct ErrorNewDeleteSizeMismatch : ErrorBase {
+ // ErrorNewDeleteSizeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ HeapAddressDescription addr_description;
+ uptr delete_size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorNewDeleteSizeMismatch() = default;
+ ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ uptr delete_size_)
+ : ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "new-delete-type-mismatch");
+ }
+ void Print();
+};
+
+struct ErrorFreeNotMalloced : ErrorBase {
+ // ErrorFreeNotMalloced doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorFreeNotMalloced() = default;
+ ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid),
+ free_stack(stack),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(40, "bad-free");
+ }
+ void Print();
+};
+
+struct ErrorAllocTypeMismatch : ErrorBase {
+ // ErrorAllocTypeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *dealloc_stack;
+ HeapAddressDescription addr_description;
+ AllocType alloc_type, dealloc_type;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorAllocTypeMismatch() = default;
+ ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ AllocType alloc_type_, AllocType dealloc_type_)
+ : ErrorBase(tid),
+ dealloc_stack(stack),
+ alloc_type(alloc_type_),
+ dealloc_type(dealloc_type_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "alloc-dealloc-mismatch");
+ };
+ void Print();
+};
+
+struct ErrorMallocUsableSizeNotOwned : ErrorBase {
+ // ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorMallocUsableSizeNotOwned() = default;
+ ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-malloc_usable_size");
+ }
+ void Print();
+};
+
+struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
+ // ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorSanitizerGetAllocatedSizeNotOwned() = default;
+ ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
+ uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
+ // ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr length1, length2;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ const char *function;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionMemoryRangesOverlap() = default;
+ ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
+ uptr addr1, uptr length1_, uptr addr2,
+ uptr length2_, const char *function_)
+ : ErrorBase(tid),
+ stack(stack_),
+ length1(length1_),
+ length2(length2_),
+ addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
+ function(function_) {
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ scariness.Clear();
+ scariness.Scare(10, bug_type);
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionSizeOverflow : ErrorBase {
+ // ErrorStringFunctionSizeOverflow doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ uptr size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionSizeOverflow() = default;
+ ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
+ uptr addr, uptr size_)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false),
+ size(size_) {
+ scariness.Clear();
+ scariness.Scare(10, "negative-size-param");
+ }
+ void Print();
+};
+
+struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
+ // ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr beg, end, old_mid, new_mid;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorBadParamsToAnnotateContiguousContainer() = default;
+ // PS4: Do we want an AddressDescription for beg?
+ ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
+ BufferedStackTrace *stack_,
+ uptr beg_, uptr end_,
+ uptr old_mid_, uptr new_mid_)
+ : ErrorBase(tid),
+ stack(stack_),
+ beg(beg_),
+ end(end_),
+ old_mid(old_mid_),
+ new_mid(new_mid_) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
+ }
+ void Print();
+};
+
+struct ErrorODRViolation : ErrorBase {
+ __asan_global global1, global2;
+ u32 stack_id1, stack_id2;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorODRViolation() = default;
+ ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
+ const __asan_global *g2, u32 stack_id2_)
+ : ErrorBase(tid),
+ global1(*g1),
+ global2(*g2),
+ stack_id1(stack_id1_),
+ stack_id2(stack_id2_) {
+ scariness.Clear();
+ scariness.Scare(10, "odr-violation");
+ }
+ void Print();
+};
+
+struct ErrorInvalidPointerPair : ErrorBase {
+ uptr pc, bp, sp;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorInvalidPointerPair() = default;
+ ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
+ uptr p2)
+ : ErrorBase(tid),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "invalid-pointer-pair");
+ }
+ void Print();
+};
+
+struct ErrorGeneric : ErrorBase {
+ AddressDescription addr_description;
+ uptr pc, bp, sp;
+ uptr access_size;
+ const char *bug_descr;
+ bool is_write;
+ u8 shadow_val;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorGeneric() = default;
+ ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
+ uptr access_size_);
+ void Print();
+};
+
+// clang-format off
+#define ASAN_FOR_EACH_ERROR_KIND(macro) \
+ macro(StackOverflow) \
+ macro(DeadlySignal) \
+ macro(DoubleFree) \
+ macro(NewDeleteSizeMismatch) \
+ macro(FreeNotMalloced) \
+ macro(AllocTypeMismatch) \
+ macro(MallocUsableSizeNotOwned) \
+ macro(SanitizerGetAllocatedSizeNotOwned) \
+ macro(StringFunctionMemoryRangesOverlap) \
+ macro(StringFunctionSizeOverflow) \
+ macro(BadParamsToAnnotateContiguousContainer) \
+ macro(ODRViolation) \
+ macro(InvalidPointerPair) \
+ macro(Generic)
+// clang-format on
+
+#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
+#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
+#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
+ ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
+#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
+ case kErrorKind##name: \
+ return name.Print();
+
+enum ErrorKind {
+ kErrorKindInvalid = 0,
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
+};
+
+struct ErrorDescription {
+ ErrorKind kind;
+ // We're using a tagged union because it allows us to have a trivially
+ // copiable type and use the same structures as the public interface.
+ //
+ // We can add a wrapper around it to make it "more c++-like", but that would
+ // add a lot of code and the benefit wouldn't be that big.
+ union {
+ ErrorBase Base;
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
+ };
+
+ ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
+
+ bool IsValid() { return kind != kErrorKindInvalid; }
+ void Print() {
+ switch (kind) {
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
+ case kErrorKindInvalid:
+ CHECK(0);
+ }
+ CHECK(0);
+ }
+};
+
+#undef ASAN_FOR_EACH_ERROR_KIND
+#undef ASAN_DEFINE_ERROR_KIND
+#undef ASAN_ERROR_DESCRIPTION_MEMBER
+#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
+#undef ASAN_ERROR_DESCRIPTION_PRINT
+
+} // namespace __asan
+
+#endif // ASAN_ERRORS_H
diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc
index 16feccd0d54a..017b7d2af129 100644
--- a/lib/asan/asan_fake_stack.cc
+++ b/lib/asan/asan_fake_stack.cc
@@ -100,7 +100,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
- // with regular non-atimic load and store (at least I was not able to make
+ // with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
diff --git a/lib/asan/asan_fake_stack.h b/lib/asan/asan_fake_stack.h
index 74ca02df9056..da9a91c23025 100644
--- a/lib/asan/asan_fake_stack.h
+++ b/lib/asan/asan_fake_stack.h
@@ -52,7 +52,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
-// frames in round robin fasion to maximize the delay between a deallocation
+// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
@@ -99,12 +99,12 @@ class FakeStack {
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
- // Divide n by the numbe of frames in size class.
+ // Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
- // The the pointer to the flags of the given class_id.
+ // The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
index 345a35ce3bb3..4db407d4512b 100644
--- a/lib/asan/asan_flags.cc
+++ b/lib/asan/asan_flags.cc
@@ -156,9 +156,19 @@ void InitializeFlags() {
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
- (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
+ if (f->thread_local_quarantine_size_kb < 0) {
+ const u32 kDefaultThreadLocalQuarantineSizeKb =
+ // It is not advised to go lower than 64Kb, otherwise quarantine batches
+ // pushed from thread local quarantine to global one will create too
+ // much overhead. One quarantine batch size is 8Kb and it holds up to
+ // 1021 chunk, which amounts to 1/8 memory overhead per batch when
+ // thread local quarantine is set to 64Kb.
+ (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
+ f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
+ }
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
index 9496a47490c7..4712efb86224 100644
--- a/lib/asan/asan_flags.inc
+++ b/lib/asan/asan_flags.inc
@@ -23,6 +23,12 @@ ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
+ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
+ "Size (in Kb) of thread local quarantine used to detect "
+ "use-after-free errors. Lower value may reduce memory usage but "
+ "increase the chance of false negatives. It is not advised to go "
+ "lower than 64Kb, otherwise frequent transfers to global quarantine "
+ "might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
@@ -102,7 +108,7 @@ ASAN_FLAG(bool, poison_array_cookie, true,
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
- (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
@@ -133,6 +139,9 @@ ASAN_FLAG(int, detect_odr_violation, 2,
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
+ASAN_FLAG(bool, dump_registers, true,
+ "If true, dump values of CPU registers when SEGV happens. Only "
+ "available on OS X for now.")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index f185761809c7..b7233067358c 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@@ -123,18 +124,6 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
- Global g = {};
- if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
- internal_strncpy(descr->name, g.name, descr->name_size);
- descr->region_address = g.beg;
- descr->region_size = g.size;
- descr->region_kind = "global";
- return true;
- }
- return false;
-}
-
enum GlobalSymbolState {
UNREGISTERED = 0,
REGISTERED = 1
@@ -279,6 +268,46 @@ void StopInitOrderChecking() {
}
}
+static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
+
+const char *MaybeDemangleGlobalName(const char *name) {
+ // We can spoil names of globals with C linkage, so use an heuristic
+ // approach to check if the name should be demangled.
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
+}
+
+// Check if the global is a zero-terminated ASCII string. If so, print it.
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
+ for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
+ unsigned char c = *(unsigned char *)p;
+ if (c == '\0' || !IsASCII(c)) return;
+ }
+ if (*(char *)(g.beg + g.size - 1) != '\0') return;
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
+}
+
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location) res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location) return;
+ if (g.location->line_no) str->append(":%d", g.location->line_no);
+ if (g.location->column_no) str->append(":%d", g.location->column_no);
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -319,6 +348,20 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // The MSVC incremental linker may pad globals out to 256 bytes. As long
+ // as __asan_global is less than 256 bytes large and its size is a power
+ // of two, we can skip over the padding.
+ static_assert(
+ sizeof(__asan_global) < 256 &&
+ (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
+ "sizeof(__asan_global) incompatible with incremental linker padding");
+ // If these are padding bytes, the rest of the global should be zero.
+ CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
+ globals[i].name == nullptr && globals[i].module_name == nullptr &&
+ globals[i].odr_indicator == 0);
+ continue;
+ }
RegisterGlobal(&globals[i]);
}
}
@@ -329,6 +372,11 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // Skip globals that look like padding from the MSVC incremental linker.
+ // See comment in __asan_register_globals.
+ continue;
+ }
UnregisterGlobal(&globals[i]);
}
}
@@ -339,10 +387,10 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
- !CanPoisonMemory())
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
return;
bool strict_init_order = flags()->strict_init_order;
- CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
@@ -365,7 +413,8 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
- !CanPoisonMemory())
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
diff --git a/lib/asan/asan_globals_win.cc b/lib/asan/asan_globals_win.cc
new file mode 100644
index 000000000000..56c0d1a532f9
--- /dev/null
+++ b/lib/asan/asan_globals_win.cc
@@ -0,0 +1,62 @@
+//===-- asan_globals_win.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Global registration code that is linked into every Windows DLL and EXE.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_interface_internal.h"
+#if SANITIZER_WINDOWS
+
+namespace __asan {
+
+#pragma section(".ASAN$GA", read, write) // NOLINT
+#pragma section(".ASAN$GZ", read, write) // NOLINT
+extern "C" __declspec(allocate(".ASAN$GA"))
+__asan_global __asan_globals_start = {};
+extern "C" __declspec(allocate(".ASAN$GZ"))
+__asan_global __asan_globals_end = {};
+#pragma comment(linker, "/merge:.ASAN=.data")
+
+static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
+ __asan_global *start = &__asan_globals_start + 1;
+ __asan_global *end = &__asan_globals_end;
+ uptr bytediff = (uptr)end - (uptr)start;
+ if (bytediff % sizeof(__asan_global) != 0) {
+#ifdef ASAN_DLL_THUNK
+ __debugbreak();
+#else
+ CHECK("corrupt asan global array");
+#endif
+ }
+ // We know end >= start because the linker sorts the portion after the dollar
+ // sign alphabetically.
+ uptr n = end - start;
+ hook(start, n);
+}
+
+static void register_dso_globals() {
+ call_on_globals(&__asan_register_globals);
+}
+
+static void unregister_dso_globals() {
+ call_on_globals(&__asan_unregister_globals);
+}
+
+// Register globals
+#pragma section(".CRT$XCU", long, read) // NOLINT
+#pragma section(".CRT$XTX", long, read) // NOLINT
+extern "C" __declspec(allocate(".CRT$XCU"))
+void (*const __asan_dso_reg_hook)() = &register_dso_globals;
+extern "C" __declspec(allocate(".CRT$XTX"))
+void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
+
+} // namespace __asan
+
+#endif // SANITIZER_WINDOWS
diff --git a/lib/asan/asan_globals_win.h b/lib/asan/asan_globals_win.h
new file mode 100644
index 000000000000..d4ed9c1f38e1
--- /dev/null
+++ b/lib/asan/asan_globals_win.h
@@ -0,0 +1,34 @@
+//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to the Windows-specific global management code. Separated into a
+// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
+// which defines symbols that clash with other sanitizer headers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_GLOBALS_WIN_H
+#define ASAN_GLOBALS_WIN_H
+
+#if !defined(_MSC_VER)
+#error "this file is Windows-only, and uses MSVC pragmas"
+#endif
+
+#if defined(_WIN64)
+#define SANITIZER_SYM_PREFIX
+#else
+#define SANITIZER_SYM_PREFIX "_"
+#endif
+
+// Use this macro to force linking asan_globals_win.cc into the DSO.
+#define ASAN_LINK_GLOBALS_WIN() \
+ __pragma( \
+ comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
+
+#endif // ASAN_GLOBALS_WIN_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index 518ceebf62a5..606016d4f4d3 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -81,6 +81,51 @@ struct AsanInterceptorContext {
} \
} while (0)
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
@@ -198,10 +243,25 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
-// Asan needs custom handling of these:
-#undef SANITIZER_INTERCEPT_MEMSET
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#undef SANITIZER_INTERCEPT_MEMCPY
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
@@ -389,90 +449,18 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
-// memcpy is called during __asan_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- if (to != from) { \
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
- } \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
-// memset is called inside Printf.
-#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
- if (asan_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_WRITE_RANGE(ctx, block, size); \
- } \
- return REAL(memset)(block, c, size); \
- } while (0)
-
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
-#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) \
- return internal_memmove(to, from, size); \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return internal_memmove(to, from, size); \
- } while (0)
-
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memmove);
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
-#if !SANITIZER_MAC
- ASAN_MEMCPY_IMPL(ctx, to, from, size);
-#else
- // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
- // with WRAP(memcpy). As a result, false positives are reported for memmove()
- // calls. If we just disable error reporting with
- // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
- // internal_memcpy(), which may lead to crashes, see
- // http://llvm.org/bugs/show_bug.cgi?id=16362.
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-#endif // !SANITIZER_MAC
-}
-
-INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memset);
- ASAN_MEMSET_IMPL(ctx, block, c, size);
-}
-
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@@ -720,19 +708,10 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
- CHECK(was_called_once == false);
+ CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
- // Intercept mem* functions.
- ASAN_INTERCEPT_FUNC(memcpy);
- ASAN_INTERCEPT_FUNC(memset);
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
- // In asan, REAL(memmove) is not used, but it is used in msan.
- ASAN_INTERCEPT_FUNC(memmove);
- }
- CHECK(REAL(memcpy));
-
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h
index 3cf3413dc126..8cd424cc03e8 100644
--- a/lib/asan/asan_interface_internal.h
+++ b/lib/asan/asan_interface_internal.h
@@ -23,6 +23,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
+using __sanitizer::u64;
+using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
@@ -79,6 +81,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
+ // Sets bytes of the given range of the shadow memory into specific value.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_00(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f1(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f2(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f3(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f5(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f8(uptr addr, uptr size);
+
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
@@ -156,6 +172,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr __asan_shadow_memory_dynamic_address;
+
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index 20142372e2e3..1dc678c0c357 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -36,7 +36,7 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
+# if SANITIZER_IOS || SANITIZER_ANDROID
# define ASAN_LOW_MEMORY 1
# else
# define ASAN_LOW_MEMORY 0
@@ -65,6 +65,9 @@ void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
+// asan_win.cc / asan_posix.cc
+const char *DescribeSignalOrException(int signo);
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
@@ -100,17 +103,6 @@ void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-// Platform-specific options.
-#if SANITIZER_MAC
-bool PlatformHasDifferentMemcpyAndMemmove();
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
- (PlatformHasDifferentMemcpyAndMemmove())
-#elif SANITIZER_WINDOWS64
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
-#else
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
-#endif // SANITIZER_MAC
-
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
@@ -132,12 +124,10 @@ extern bool asan_init_is_running;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
-const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
-const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index 525864f72d32..baf533ac96ac 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -49,15 +49,6 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
-bool PlatformHasDifferentMemcpyAndMemmove() {
- // On OS X 10.7 memcpy() and memmove() are both resolved
- // into memmove$VARIANT$sse42.
- // See also https://github.com/google/sanitizers/issues/34.
- // TODO(glider): need to check dynamically that memcpy() and memmove() are
- // actually the same function.
- return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
-}
-
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc
index 162abd29f191..a78767c19f0f 100644
--- a/lib/asan/asan_malloc_linux.cc
+++ b/lib/asan/asan_malloc_linux.cc
@@ -78,7 +78,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr = asan_malloc(size, &stack);
+ void *new_ptr;
+ if (UNLIKELY(!asan_inited)) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ copy_size = size;
+ new_ptr = asan_malloc(copy_size, &stack);
+ }
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc
index 4a233dfe968c..05148d51e6b8 100644
--- a/lib/asan/asan_malloc_win.cc
+++ b/lib/asan/asan_malloc_win.cc
@@ -125,6 +125,11 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc_base(void *p, size_t n, size_t elem_size) {
+ return _recalloc(p, n, elem_size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
@@ -223,6 +228,7 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_realloc_base", (uptr)realloc);
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
TryToOverrideFunction("_msize", (uptr)_msize);
TryToOverrideFunction("_expand", (uptr)_expand);
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 52c4f67a5d04..d8e60a4b34a9 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -125,6 +125,7 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
@@ -140,7 +141,6 @@ static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
-static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
#define SHADOW_SCALE kDefaultShadowScale
@@ -168,7 +168,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# else
-# define SHADOW_OFFSET kIosShadowOffset64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# endif
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
@@ -183,7 +183,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_WINDOWS64
-# define SHADOW_OFFSET kWindowsShadowOffset64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
@@ -269,9 +269,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ if (kMidMemBeg) {
+ if (a <= kShadowGapEnd)
+ return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
+ (a >= kShadowGap3Beg && a <= kShadowGap3End);
+ }
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
- return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
+ return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@@ -295,21 +311,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
-static inline bool AddrIsInShadowGap(uptr a) {
- PROFILE_ASAN_MAPPING();
- if (kMidMemBeg) {
- if (a <= kShadowGapEnd)
- return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
- return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
- (a >= kShadowGap3Beg && a <= kShadowGap3End);
- }
- // In zero-based shadow mode we treat addresses near zero as addresses
- // in shadow gap as well.
- if (SHADOW_OFFSET == 0)
- return a <= kShadowGapEnd;
- return a >= kShadowGapBeg && a <= kShadowGapEnd;
-}
-
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;
diff --git a/lib/asan/asan_memory_profile.cc b/lib/asan/asan_memory_profile.cc
index ba0051634b92..c55264ef5d57 100644
--- a/lib/asan/asan_memory_profile.cc
+++ b/lib/asan/asan_memory_profile.cc
@@ -74,7 +74,7 @@ class HeapProfile {
static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
- AsanChunkView cv = FindHeapChunkByAddress(chunk);
+ AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc
index fef66041f606..3283fb3942cf 100644
--- a/lib/asan/asan_new_delete.cc
+++ b/lib/asan/asan_new_delete.cc
@@ -45,17 +45,30 @@
using namespace __asan; // NOLINT
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
-// Fake std::nothrow_t to avoid including <new>.
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
struct nothrow_t {};
+enum class align_val_t: size_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
+#define OPERATOR_NEW_BODY_ALIGN(type) \
+ GET_STACK_TRACE_MALLOC;\
+ return asan_memalign((uptr)align, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
@@ -65,15 +78,6 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
-// FreeBSD prior v9.2 have wrong definition of 'size_t'.
-// http://svnweb.freebsd.org/base?view=revision&revision=232261
-#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-#include <sys/param.h>
-#if __FreeBSD_version <= 902001 // v9.2
-#define size_t unsigned
-#endif // __FreeBSD_version
-#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
@@ -84,6 +88,18 @@ void *operator new(size_t size, std::nothrow_t const&)
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
@@ -131,6 +147,32 @@ void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 50877ae59115..abb75ab3bf92 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -64,12 +64,9 @@ struct ShadowSegmentEndpoint {
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@@ -117,9 +114,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@@ -131,7 +128,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@@ -157,9 +154,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@@ -167,7 +164,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@@ -314,6 +311,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@@ -388,7 +409,7 @@ const void *__sanitizer_contiguous_container_find_bad_address(
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
- uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h
index 6344225f0f64..cc3281e08a71 100644
--- a/lib/asan/asan_poisoning.h
+++ b/lib/asan/asan_poisoning.h
@@ -86,8 +86,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
-// Calls __sanitizer::FlushUnneededShadowMemory() on
-// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+// Calls __sanitizer::ReleaseMemoryPagesToOS() on
+// [MemToShadow(p), MemToShadow(p+size)].
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
index 84a29ec6a9d9..8e5676309ae0 100644
--- a/lib/asan/asan_posix.cc
+++ b/lib/asan/asan_posix.cc
@@ -33,6 +33,19 @@
namespace __asan {
+const char *DescribeSignalOrException(int signo) {
+ switch (signo) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ default:
+ return "SEGV";
+ }
+}
+
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
@@ -84,12 +97,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
- else if (signo == SIGFPE)
- ReportDeadlySignal("FPE", sig);
- else if (signo == SIGILL)
- ReportDeadlySignal("ILL", sig);
else
- ReportDeadlySignal("SEGV", sig);
+ ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 9f2f12d51eaa..937ba4077d43 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -12,7 +12,9 @@
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+#include "asan_errors.h"
#include "asan_flags.h"
+#include "asan_descriptions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
@@ -35,19 +37,6 @@ static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
static const unsigned kAsanBuggyPcPoolSize = 25;
static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
-struct ReportData {
- uptr pc;
- uptr sp;
- uptr bp;
- uptr addr;
- bool is_write;
- uptr access_size;
- const char *description;
-};
-
-static bool report_happened = false;
-static ReportData report_data = {};
-
void AppendToErrorMessageBuffer(const char *buffer) {
BlockingMutexLock l(&error_message_buf_mutex);
if (!error_message_buffer) {
@@ -65,60 +54,10 @@ void AppendToErrorMessageBuffer(const char *buffer) {
error_message_buffer_pos += Min(remaining, length);
}
-// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: public __sanitizer::SanitizerCommonDecorator {
- public:
- Decorator() : SanitizerCommonDecorator() { }
- const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
- const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
- const char *Allocation() { return Magenta(); }
- const char *EndAllocation() { return Default(); }
-
- const char *ShadowByte(u8 byte) {
- switch (byte) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- return Red();
- case kAsanHeapFreeMagic:
- return Magenta();
- case kAsanStackLeftRedzoneMagic:
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- return Red();
- case kAsanStackAfterReturnMagic:
- return Magenta();
- case kAsanInitializationOrderMagic:
- return Cyan();
- case kAsanUserPoisonedMemoryMagic:
- case kAsanContiguousContainerOOBMagic:
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- return Blue();
- case kAsanStackUseAfterScopeMagic:
- return Magenta();
- case kAsanGlobalRedzoneMagic:
- return Red();
- case kAsanInternalHeapMagic:
- return Yellow();
- case kAsanIntraObjectRedzone:
- return Yellow();
- default:
- return Default();
- }
- }
- const char *EndShadowByte() { return Default(); }
- const char *MemoryByte() { return Magenta(); }
- const char *EndMemoryByte() { return Default(); }
-};
-
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintMemoryByte(InternalScopedString *str, const char *before,
- u8 byte, bool in_shadow, const char *after = "\n") {
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after) {
Decorator d;
str->append("%s%s%x%x%s%s", before,
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
@@ -126,99 +65,6 @@ static void PrintMemoryByte(InternalScopedString *str, const char *before,
in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
}
-static void PrintShadowByte(InternalScopedString *str, const char *before,
- u8 byte, const char *after = "\n") {
- PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
-}
-
-static void PrintShadowBytes(InternalScopedString *str, const char *before,
- u8 *bytes, u8 *guilty, uptr n) {
- Decorator d;
- if (before) str->append("%s%p:", before, bytes);
- for (uptr i = 0; i < n; i++) {
- u8 *p = bytes + i;
- const char *before =
- p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
- const char *after = p == guilty ? "]" : "";
- PrintShadowByte(str, before, *p, after);
- }
- str->append("\n");
-}
-
-static void PrintLegend(InternalScopedString *str) {
- str->append(
- "Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n",
- (int)SHADOW_GRANULARITY);
- PrintShadowByte(str, " Addressable: ", 0);
- str->append(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
- str->append("\n");
- PrintShadowByte(str, " Heap left redzone: ",
- kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(str, " Heap right redzone: ",
- kAsanHeapRightRedzoneMagic);
- PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(str, " Stack left redzone: ",
- kAsanStackLeftRedzoneMagic);
- PrintShadowByte(str, " Stack mid redzone: ",
- kAsanStackMidRedzoneMagic);
- PrintShadowByte(str, " Stack right redzone: ",
- kAsanStackRightRedzoneMagic);
- PrintShadowByte(str, " Stack partial redzone: ",
- kAsanStackPartialRedzoneMagic);
- PrintShadowByte(str, " Stack after return: ",
- kAsanStackAfterReturnMagic);
- PrintShadowByte(str, " Stack use after scope: ",
- kAsanStackUseAfterScopeMagic);
- PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(str, " Global init order: ",
- kAsanInitializationOrderMagic);
- PrintShadowByte(str, " Poisoned by user: ",
- kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(str, " Container overflow: ",
- kAsanContiguousContainerOOBMagic);
- PrintShadowByte(str, " Array cookie: ",
- kAsanArrayCookieMagic);
- PrintShadowByte(str, " Intra object redzone: ",
- kAsanIntraObjectRedzone);
- PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
- PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
- PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
-}
-
-void MaybeDumpInstructionBytes(uptr pc) {
- if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
- return;
- InternalScopedString str(1024);
- str.append("First 16 instruction bytes at pc: ");
- if (IsAccessibleMemoryRange(pc, 16)) {
- for (int i = 0; i < 16; ++i) {
- PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " ");
- }
- str.append("\n");
- } else {
- str.append("unaccessible\n");
- }
- Report("%s", str.data());
-}
-
-static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr)) return;
- uptr shadow_addr = MemToShadow(addr);
- const uptr n_bytes_per_row = 16;
- uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- InternalScopedString str(4096 * 8);
- str.append("Shadow bytes around the buggy address:\n");
- for (int i = -5; i <= 5; i++) {
- const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
- (u8 *)shadow_addr, n_bytes_per_row);
- }
- if (flags()->print_legend) PrintLegend(&str);
- Printf("%s", str.data());
-}
-
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
const char *zone_name) {
if (zone_ptr) {
@@ -234,191 +80,8 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
}
}
-static void DescribeThread(AsanThread *t) {
- if (t)
- DescribeThread(t->context());
-}
-
// ---------------------- Address Descriptions ------------------- {{{1
-static bool IsASCII(unsigned char c) {
- return /*0x00 <= c &&*/ c <= 0x7F;
-}
-
-static const char *MaybeDemangleGlobalName(const char *name) {
- // We can spoil names of globals with C linkage, so use an heuristic
- // approach to check if the name should be demangled.
- bool should_demangle = false;
- if (name[0] == '_' && name[1] == 'Z')
- should_demangle = true;
- else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
- should_demangle = true;
-
- return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
-}
-
-// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(InternalScopedString *str,
- const __asan_global &g) {
- for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
- unsigned char c = *(unsigned char*)p;
- if (c == '\0' || !IsASCII(c)) return;
- }
- if (*(char*)(g.beg + g.size - 1) != '\0') return;
- str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
- (char *)g.beg);
-}
-
-static const char *GlobalFilename(const __asan_global &g) {
- const char *res = g.module_name;
- // Prefer the filename from source location, if is available.
- if (g.location)
- res = g.location->filename;
- CHECK(res);
- return res;
-}
-
-static void PrintGlobalLocation(InternalScopedString *str,
- const __asan_global &g) {
- str->append("%s", GlobalFilename(g));
- if (!g.location)
- return;
- if (g.location->line_no)
- str->append(":%d", g.location->line_no);
- if (g.location->column_no)
- str->append(":%d", g.location->column_no);
-}
-
-static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
- const __asan_global &g) {
- InternalScopedString str(4096);
- Decorator d;
- str.append("%s", d.Location());
- if (addr < g.beg) {
- str.append("%p is located %zd bytes to the left", (void *)addr,
- g.beg - addr);
- } else if (addr + size > g.beg + g.size) {
- if (addr < g.beg + g.size)
- addr = g.beg + g.size;
- str.append("%p is located %zd bytes to the right", (void *)addr,
- addr - (g.beg + g.size));
- } else {
- // Can it happen?
- str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
- }
- str.append(" of global variable '%s' defined in '",
- MaybeDemangleGlobalName(g.name));
- PrintGlobalLocation(&str, g);
- str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.EndLocation());
- PrintGlobalNameIfASCII(&str, g);
- Printf("%s", str.data());
-}
-
-static bool DescribeAddressIfGlobal(uptr addr, uptr size,
- const char *bug_type) {
- // Assume address is close to at most four globals.
- const int kMaxGlobalsInReport = 4;
- __asan_global globals[kMaxGlobalsInReport];
- u32 reg_sites[kMaxGlobalsInReport];
- int globals_num =
- GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
- if (globals_num == 0)
- return false;
- for (int i = 0; i < globals_num; i++) {
- DescribeAddressRelativeToGlobal(addr, size, globals[i]);
- if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
- reg_sites[i]) {
- Printf(" registered at:\n");
- StackDepotGet(reg_sites[i]).Print();
- }
- }
- return true;
-}
-
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) {
- if (AddrIsInMem(addr))
- return false;
- const char *area_type = nullptr;
- if (AddrIsInShadowGap(addr)) area_type = "shadow gap";
- else if (AddrIsInHighShadow(addr)) area_type = "high shadow";
- else if (AddrIsInLowShadow(addr)) area_type = "low shadow";
- if (area_type != nullptr) {
- if (print) {
- Printf("Address %p is located in the %s area.\n", addr, area_type);
- } else {
- CHECK(descr);
- descr->region_kind = area_type;
- }
- return true;
- }
- CHECK(0 && "Address is not in memory and not in shadow?");
- return false;
-}
-
-// Return " (thread_name) " or an empty string if the name is empty.
-const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
- uptr buff_len) {
- const char *name = t->name;
- if (name[0] == '\0') return "";
- buff[0] = 0;
- internal_strncat(buff, " (", 3);
- internal_strncat(buff, name, buff_len - 4);
- internal_strncat(buff, ")", 2);
- return buff;
-}
-
-const char *ThreadNameWithParenthesis(u32 tid, char buff[],
- uptr buff_len) {
- if (tid == kInvalidTid) return "";
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *t = GetThreadContextByTidLocked(tid);
- return ThreadNameWithParenthesis(t, buff, buff_len);
-}
-
-static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
- uptr access_size, uptr prev_var_end,
- uptr next_var_beg) {
- uptr var_end = var.beg + var.size;
- uptr addr_end = addr + access_size;
- const char *pos_descr = nullptr;
- // If the variable [var.beg, var_end) is the nearest variable to the
- // current memory access, indicate it in the log.
- if (addr >= var.beg) {
- if (addr_end <= var_end)
- pos_descr = "is inside"; // May happen if this is a use-after-return.
- else if (addr < var_end)
- pos_descr = "partially overflows";
- else if (addr_end <= next_var_beg &&
- next_var_beg - addr_end >= addr - var_end)
- pos_descr = "overflows";
- } else {
- if (addr_end > var.beg)
- pos_descr = "partially underflows";
- else if (addr >= prev_var_end &&
- addr - prev_var_end >= var.beg - addr_end)
- pos_descr = "underflows";
- }
- InternalScopedString str(1024);
- str.append(" [%zd, %zd)", var.beg, var_end);
- // Render variable name.
- str.append(" '");
- for (uptr i = 0; i < var.name_len; ++i) {
- str.append("%c", var.name_pos[i]);
- }
- str.append("'");
- if (pos_descr) {
- Decorator d;
- // FIXME: we may want to also print the size of the access here,
- // but in case of accesses generated by memset it may be confusing.
- str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
- } else {
- str.append("\n");
- }
- Printf("%s", str.data());
-}
-
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars) {
CHECK(frame_descr);
@@ -446,195 +109,17 @@ bool ParseFrameDescription(const char *frame_descr,
return true;
}
-bool DescribeAddressIfStack(uptr addr, uptr access_size) {
- AsanThread *t = FindThreadByStackAddress(addr);
- if (!t) return false;
-
- Decorator d;
- char tname[128];
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(),
- ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)));
-
- // Try to fetch precise stack frame for this access.
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access)) {
- Printf("%s\n", d.EndLocation());
- return true;
- }
- Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation());
-
- // Now we print the frame where the alloca has happened.
- // We print this frame as a stack trace with one element.
- // The symbolizer may print more than one frame if inlining was involved.
- // The frame numbers may be different than those in the stack trace printed
- // previously. That's unfortunate, but I have no better solution,
- // especially given that the alloca may be from entirely different place
- // (e.g. use-after-scope, or different thread's stack).
-#if SANITIZER_PPC64V1
- // On PowerPC64 ELFv1, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
-#endif
- access.frame_pc += 16;
- Printf("%s", d.EndLocation());
- StackTrace alloca_stack(&access.frame_pc, 1);
- alloca_stack.Print();
-
- InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", access.frame_descr);
- // 'addr' is a stack address, so return true even if we can't parse frame
- return true;
- }
- uptr n_objects = vars.size();
- // Report the number of stack objects.
- Printf(" This frame has %zu object(s):\n", n_objects);
-
- // Report all objects in this frame.
- for (uptr i = 0; i < n_objects; i++) {
- uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
- uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
- PrintAccessAndVarIntersection(vars[i], access.offset, access_size,
- prev_var_end, next_var_beg);
- }
- Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n");
- if (SANITIZER_WINDOWS)
- Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
- else
- Printf(" (longjmp and C++ exceptions *are* supported)\n");
-
- DescribeThread(t);
- return true;
-}
-
-static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
- uptr access_size) {
- sptr offset;
- Decorator d;
- InternalScopedString str(4096);
- str.append("%s", d.Location());
- if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
- } else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
- if (offset < 0) {
- addr -= offset;
- offset = 0;
- }
- str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
- } else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes inside of", (void*)addr, offset);
- } else {
- str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void *)addr);
- }
- str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void *)(chunk.Beg()), (void *)(chunk.End()));
- str.append("%s", d.EndLocation());
- Printf("%s", str.data());
-}
-
-void DescribeHeapAddress(uptr addr, uptr access_size) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
- if (!chunk.IsValid()) {
- Printf("AddressSanitizer can not describe address in more detail "
- "(wild memory access suspected).\n");
- return;
- }
- DescribeAccessToHeapChunk(chunk, addr, access_size);
- CHECK(chunk.AllocTid() != kInvalidTid);
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *alloc_thread =
- GetThreadContextByTidLocked(chunk.AllocTid());
- StackTrace alloc_stack = chunk.GetAllocStack();
- char tname[128];
- Decorator d;
- AsanThreadContext *free_thread = nullptr;
- if (chunk.FreeTid() != kInvalidTid) {
- free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
- Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
- free_thread->tid,
- ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
- d.EndAllocation());
- StackTrace free_stack = chunk.GetFreeStack();
- free_stack.Print();
- Printf("%spreviously allocated by thread T%d%s here:%s\n",
- d.Allocation(), alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- } else {
- Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
- alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- }
- alloc_stack.Print();
- DescribeThread(GetCurrentThread());
- if (free_thread)
- DescribeThread(free_thread);
- DescribeThread(alloc_thread);
-}
-
-static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
- // Check if this is shadow or shadow gap.
- if (DescribeAddressIfShadow(addr))
- return;
- CHECK(AddrIsInMem(addr));
- if (DescribeAddressIfGlobal(addr, access_size, bug_type))
- return;
- if (DescribeAddressIfStack(addr, access_size))
- return;
- // Assume it is a heap address.
- DescribeHeapAddress(addr, access_size);
-}
-
-// ------------------- Thread description -------------------- {{{1
-
-void DescribeThread(AsanThreadContext *context) {
- CHECK(context);
- asanThreadRegistry().CheckLocked();
- // No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
- return;
- }
- context->announced = true;
- char tname[128];
- InternalScopedString str(1024);
- str.append("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
- Printf("%s", str.data());
- return;
- }
- str.append(
- " created by T%d%s here:\n", context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
- Printf("%s", str.data());
- StackDepotGet(context->stack_id).Print();
- // Recursively described parent thread if needed.
- if (flags()->print_full_thread_history) {
- AsanThreadContext *parent_context =
- GetThreadContextByTidLocked(context->parent_tid);
- DescribeThread(parent_context);
- }
-}
-
// -------------------- Different kinds of reports ----------------- {{{1
// Use ScopedInErrorReport to run common actions just before and
// immediately after printing error report.
class ScopedInErrorReport {
public:
- explicit ScopedInErrorReport(ReportData *report = nullptr,
- bool fatal = false) {
+ explicit ScopedInErrorReport(bool fatal = false) {
halt_on_error_ = fatal || flags()->halt_on_error;
if (lock_.TryLock()) {
- StartReporting(report);
+ StartReporting();
return;
}
@@ -676,10 +161,13 @@ class ScopedInErrorReport {
lock_.Lock();
}
- StartReporting(report);
+ StartReporting();
}
~ScopedInErrorReport() {
+ ASAN_ON_ERROR();
+ if (current_error_.IsValid()) current_error_.Print();
+
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
// We may want to grab this lock again when printing stats.
@@ -705,6 +193,12 @@ class ScopedInErrorReport {
if (error_report_callback) {
error_report_callback(buffer_copy.data());
}
+
+ // In halt_on_error = false mode, reset the current error object (before
+ // unlocking).
+ if (!halt_on_error_)
+ internal_memset(&current_error_, 0, sizeof(current_error_));
+
CommonSanitizerReportMutex.Unlock();
reporting_thread_tid_ = kInvalidTid;
lock_.Unlock();
@@ -714,11 +208,18 @@ class ScopedInErrorReport {
}
}
+ void ReportError(const ErrorDescription &description) {
+ // Can only report one error per ScopedInErrorReport.
+ CHECK_EQ(current_error_.kind, kErrorKindInvalid);
+ current_error_ = description;
+ }
+
+ static ErrorDescription &CurrentError() {
+ return current_error_;
+ }
+
private:
- void StartReporting(ReportData *report) {
- if (report) report_data = *report;
- report_happened = true;
- ASAN_ON_ERROR();
+ void StartReporting() {
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
@@ -732,181 +233,69 @@ class ScopedInErrorReport {
static StaticSpinMutex lock_;
static u32 reporting_thread_tid_;
+ // Error currently being reported. This enables the destructor to interact
+ // with the debugger and point it to an error description.
+ static ErrorDescription current_error_;
bool halt_on_error_;
};
StaticSpinMutex ScopedInErrorReport::lock_;
u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
+ErrorDescription ScopedInErrorReport::current_error_;
void ReportStackOverflow(const SignalContext &sig) {
- ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: stack-overflow on address %p"
- " (pc %p bp %p sp %p T%d)\n",
- (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
- GetCurrentTidOrInvalid());
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, "stack-overflow");
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- ReportErrorSummary("stack-overflow", &stack);
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig);
+ in_report.ReportError(error);
}
-void ReportDeadlySignal(const char *description, const SignalContext &sig) {
- ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: %s on unknown address %p"
- " (pc %p bp %p sp %p T%d)\n",
- description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
- (void *)sig.sp, GetCurrentTidOrInvalid());
- Printf("%s", d.EndWarning());
- ScarinessScore SS;
- if (sig.pc < GetPageSizeCached())
- Report("Hint: pc points to the zero page.\n");
- if (sig.is_memory_access) {
- const char *access_type =
- sig.write_flag == SignalContext::WRITE
- ? "WRITE"
- : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
- Report("The signal is caused by a %s memory access.\n", access_type);
- if (sig.addr < GetPageSizeCached()) {
- Report("Hint: address points to the zero page.\n");
- SS.Scare(10, "null-deref");
- } else if (sig.addr == sig.pc) {
- SS.Scare(60, "wild-jump");
- } else if (sig.write_flag == SignalContext::WRITE) {
- SS.Scare(30, "wild-addr-write");
- } else if (sig.write_flag == SignalContext::READ) {
- SS.Scare(20, "wild-addr-read");
- } else {
- SS.Scare(25, "wild-addr");
- }
- } else {
- SS.Scare(10, "signal");
- }
- SS.Print();
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- MaybeDumpInstructionBytes(sig.pc);
- Printf("AddressSanitizer can not provide additional info.\n");
- ReportErrorSummary(description, &stack);
+void ReportDeadlySignal(int signo, const SignalContext &sig) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo);
+ in_report.ReportError(error);
}
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting double-free on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(42, "double-free");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("double-free", &stack);
+ ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
-void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
- Printf(" size of the allocated type: %zd bytes;\n"
- " size of the deallocated type: %zd bytes.\n",
- alloc_size, delete_size);
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(10, "new-delete-type-mismatch");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("new-delete-type-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+ ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ delete_size);
+ in_report.ReportError(error);
}
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting free on address "
- "which was not malloc()-ed: %p in thread T%d%s\n", addr,
- curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(40, "bad-free");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-free", &stack);
+ ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
- static const char *alloc_names[] =
- {"INVALID", "malloc", "operator new", "operator new []"};
- static const char *dealloc_names[] =
- {"INVALID", "free", "operator delete", "operator delete []"};
- CHECK_NE(alloc_type, dealloc_type);
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
- alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(10, "alloc-dealloc-mismatch");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("alloc-dealloc-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+ ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ alloc_type, dealloc_type);
+ in_report.ReportError(error);
}
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "malloc_usable_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-malloc_usable_size", stack);
+ ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
+ in_report.ReportError(error);
}
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "__sanitizer_get_allocated_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
+ ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
+ addr);
+ in_report.ReportError(error);
}
void ReportStringFunctionMemoryRangesOverlap(const char *function,
@@ -914,96 +303,43 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset2, uptr length2,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- char bug_type[100];
- internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: "
- "memory ranges [%p,%p) and [%p, %p) overlap\n", \
- bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, bug_type);
- stack->Print();
- DescribeAddress((uptr)offset1, length1, bug_type);
- DescribeAddress((uptr)offset2, length2, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionMemoryRangesOverlap error(
+ GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
+ length2, function);
+ in_report.ReportError(error);
}
void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- const char *bug_type = "negative-size-param";
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, bug_type);
- stack->Print();
- DescribeAddress(offset, size, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
+ size);
+ in_report.ReportError(error);
}
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Report("ERROR: AddressSanitizer: bad parameters to "
- "__sanitizer_annotate_contiguous_container:\n"
- " beg : %p\n"
- " end : %p\n"
- " old_mid : %p\n"
- " new_mid : %p\n",
- beg, end, old_mid, new_mid);
- uptr granularity = SHADOW_GRANULARITY;
- if (!IsAligned(beg, granularity))
- Report("ERROR: beg is not aligned by %d\n", granularity);
- stack->Print();
- ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+ ErrorBadParamsToAnnotateContiguousContainer error(
+ GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
+ in_report.ReportError(error);
}
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
- Printf("%s", d.EndWarning());
- InternalScopedString g1_loc(256), g2_loc(256);
- PrintGlobalLocation(&g1_loc, *g1);
- PrintGlobalLocation(&g2_loc, *g2);
- Printf(" [1] size=%zd '%s' %s\n", g1->size,
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- Printf(" [2] size=%zd '%s' %s\n", g2->size,
- MaybeDemangleGlobalName(g2->name), g2_loc.data());
- if (stack_id1 && stack_id2) {
- Printf("These globals were registered at these points:\n");
- Printf(" [1]:\n");
- StackDepotGet(stack_id1).Print();
- Printf(" [2]:\n");
- StackDepotGet(stack_id2).Print();
- }
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=detect_odr_violation=0\n");
- InternalScopedString error_msg(256);
- error_msg.append("odr-violation: global '%s' at %s",
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- ReportErrorSummary(error_msg.data());
+ ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
+ stack_id2);
+ in_report.ReportError(error);
}
// ----------------------- CheckForInvalidPointerPair ----------- {{{1
-static NOINLINE void
-ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
+ uptr a1, uptr a2) {
ScopedInErrorReport in_report;
- const char *bug_type = "invalid-pointer-pair";
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
- Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
- DescribeAddress(a1, 1, bug_type);
- DescribeAddress(a2, 1, bug_type);
- ReportErrorSummary(bug_type, &stack);
+ ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
+ in_report.ReportError(error);
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
@@ -1029,7 +365,7 @@ void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
stack->Print();
- DescribeHeapAddress(addr, 1);
+ DescribeAddressIfHeap(addr);
}
// -------------- SuppressErrorReport -------------- {{{1
@@ -1046,34 +382,10 @@ static bool SuppressErrorReport(uptr pc) {
Die();
}
-static void PrintContainerOverflowHint() {
- Printf("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=detect_container_overflow=0.\n"
- "If you suspect a false positive see also: "
- "https://github.com/google/sanitizers/wiki/"
- "AddressSanitizerContainerOverflow.\n");
-}
-
-static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
- return s[-1] > 127 && s[1] > 127;
-}
-
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal) {
if (!fatal && SuppressErrorReport(pc)) return;
ENABLE_FRAME_POINTER;
- ScarinessScore SS;
-
- if (access_size) {
- if (access_size <= 9) {
- char desr[] = "?-byte";
- desr[0] = '0' + access_size;
- SS.Scare(access_size + access_size / 2, desr);
- } else if (access_size >= 10) {
- SS.Scare(15, "multi-byte");
- }
- is_write ? SS.Scare(20, "write") : SS.Scare(1, "read");
- }
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
@@ -1084,118 +396,10 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
// The reaction to a non-zero value of exp is to be defined.
(void)exp;
- // Determine the error type.
- const char *bug_descr = "unknown-crash";
- u8 shadow_val = 0;
- if (AddrIsInMem(addr)) {
- u8 *shadow_addr = (u8*)MemToShadow(addr);
- // If we are accessing 16 bytes, look at the second shadow byte.
- if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY)
- shadow_addr++;
- // If we are in the partial right redzone, look at the next shadow byte.
- if (*shadow_addr > 0 && *shadow_addr < 128)
- shadow_addr++;
- bool far_from_bounds = false;
- shadow_val = *shadow_addr;
- int bug_type_score = 0;
- // For use-after-frees reads are almost as bad as writes.
- int read_after_free_bonus = 0;
- switch (shadow_val) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- bug_descr = "heap-buffer-overflow";
- bug_type_score = 10;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanHeapFreeMagic:
- bug_descr = "heap-use-after-free";
- bug_type_score = 20;
- if (!is_write) read_after_free_bonus = 18;
- break;
- case kAsanStackLeftRedzoneMagic:
- bug_descr = "stack-buffer-underflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanInitializationOrderMagic:
- bug_descr = "initialization-order-fiasco";
- bug_type_score = 1;
- break;
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- bug_descr = "stack-buffer-overflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanStackAfterReturnMagic:
- bug_descr = "stack-use-after-return";
- bug_type_score = 30;
- if (!is_write) read_after_free_bonus = 18;
- break;
- case kAsanUserPoisonedMemoryMagic:
- bug_descr = "use-after-poison";
- bug_type_score = 20;
- break;
- case kAsanContiguousContainerOOBMagic:
- bug_descr = "container-overflow";
- bug_type_score = 10;
- break;
- case kAsanStackUseAfterScopeMagic:
- bug_descr = "stack-use-after-scope";
- bug_type_score = 10;
- break;
- case kAsanGlobalRedzoneMagic:
- bug_descr = "global-buffer-overflow";
- bug_type_score = 10;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanIntraObjectRedzone:
- bug_descr = "intra-object-overflow";
- bug_type_score = 10;
- break;
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- bug_descr = "dynamic-stack-buffer-overflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- }
- SS.Scare(bug_type_score + read_after_free_bonus, bug_descr);
- if (far_from_bounds)
- SS.Scare(10, "far-from-bounds");
- }
-
- ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
- bug_descr };
- ScopedInErrorReport in_report(&report, fatal);
-
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc %p bp %p sp %p\n",
- bug_descr, (void*)addr, pc, bp, sp);
- Printf("%s", d.EndWarning());
-
- u32 curr_tid = GetCurrentTidOrInvalid();
- char tname[128];
- Printf("%s%s of size %zu at %p thread T%d%s%s\n",
- d.Access(),
- access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
- access_size, (void*)addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
- d.EndAccess());
-
- SS.Print();
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
-
- DescribeAddress(addr, access_size, bug_descr);
- if (shadow_val == kAsanContiguousContainerOOBMagic)
- PrintContainerOverflowHint();
- ReportErrorSummary(bug_descr, &stack);
- PrintShadowMemoryForAddress(addr);
+ ScopedInErrorReport in_report(fatal);
+ ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
+ access_size);
+ in_report.ReportError(error);
}
} // namespace __asan
@@ -1218,40 +422,57 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
void __asan_describe_address(uptr addr) {
// Thread registry must be locked while we're describing an address.
asanThreadRegistry().Lock();
- DescribeAddress(addr, 1, "");
+ PrintAddressDescription(addr, 1, "");
asanThreadRegistry().Unlock();
}
int __asan_report_present() {
- return report_happened ? 1 : 0;
+ return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
}
uptr __asan_get_report_pc() {
- return report_data.pc;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.pc;
+ return 0;
}
uptr __asan_get_report_bp() {
- return report_data.bp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bp;
+ return 0;
}
uptr __asan_get_report_sp() {
- return report_data.sp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.sp;
+ return 0;
}
uptr __asan_get_report_address() {
- return report_data.addr;
+ ErrorDescription &err = ScopedInErrorReport::CurrentError();
+ if (err.kind == kErrorKindGeneric)
+ return err.Generic.addr_description.Address();
+ else if (err.kind == kErrorKindDoubleFree)
+ return err.DoubleFree.addr_description.addr;
+ return 0;
}
int __asan_get_report_access_type() {
- return report_data.is_write ? 1 : 0;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.is_write;
+ return 0;
}
uptr __asan_get_report_access_size() {
- return report_data.access_size;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.access_size;
+ return 0;
}
const char *__asan_get_report_description() {
- return report_data.description;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bug_descr;
+ return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
}
extern "C" {
diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h
index 03f096581a1c..5ebfda693d0c 100644
--- a/lib/asan/asan_report.h
+++ b/lib/asan/asan_report.h
@@ -25,35 +25,29 @@ struct StackVarDescr {
uptr name_len;
};
-struct AddressDescription {
- char *name;
- uptr name_size;
- uptr region_address;
- uptr region_size;
- const char *region_kind;
-};
-
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
+
+const char *MaybeDemangleGlobalName(const char *name);
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after = "\n");
+
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
-void DescribeHeapAddress(uptr addr, uptr access_size);
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
- bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
-bool DescribeAddressIfStack(uptr addr, uptr access_size);
-void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
-void ReportDeadlySignal(const char *description, const SignalContext &sig);
-void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
+void ReportDeadlySignal(int signo, const SignalContext &sig);
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 4962b9ee1561..fee7b8a2dbaf 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -32,6 +32,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
+uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@@ -263,6 +264,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
+ // clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@@ -302,7 +304,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
+ case 40: __asan_set_shadow_00(0, 0); break;
+ case 41: __asan_set_shadow_f1(0, 0); break;
+ case 42: __asan_set_shadow_f2(0, 0); break;
+ case 43: __asan_set_shadow_f3(0, 0); break;
+ case 44: __asan_set_shadow_f5(0, 0); break;
+ case 45: __asan_set_shadow_f8(0, 0); break;
}
+ // clang-format on
}
static void asan_atexit() {
@@ -326,8 +335,21 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr addr, uptr size) {
- if (!flags()->protect_shadow_gap)
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf("protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
+ GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
return;
+ }
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
@@ -388,6 +410,8 @@ static void PrintAddressSpaceLayout() {
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
+ Printf("thread_local_quarantine_size_kb=%zuK\n",
+ (uptr)flags()->thread_local_quarantine_size_kb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -401,6 +425,79 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
+static void InitializeShadowMemory() {
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
+ uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
+ if (kLowShadowBeg)
+ shadow_start -= GetMmapGranularity();
+ bool full_shadow_is_available =
+ MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
+
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
+ if (!full_shadow_is_available) {
+ kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
+ kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
+ }
+#endif
+
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ if (full_shadow_is_available) {
+ // mmap the low shadow plus at least one page at the left.
+ if (kLowShadowBeg)
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gap.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ } else if (kMidMemBeg &&
+ MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
+ MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
+ CHECK(kLowShadowBeg != kLowShadowEnd);
+ // mmap the low shadow plus at least one page at the left.
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the mid shadow.
+ ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gaps.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
+ ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
+ } else {
+ Report("Shadow memory range interleaves with an existing memory mapping. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
+ DumpProcessMap();
+ Die();
+ }
+}
+
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@@ -434,7 +531,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
- // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@@ -453,61 +549,9 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
- uptr shadow_start = kLowShadowBeg;
- if (kLowShadowBeg)
- shadow_start -= GetMmapGranularity();
- bool full_shadow_is_available =
- MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-
-#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
- !ASAN_FIXED_MAPPING
- if (!full_shadow_is_available) {
- kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
- kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
- }
-#elif SANITIZER_WINDOWS64
- // Disable the "mid mem" shadow layout.
- if (!full_shadow_is_available) {
- kMidMemBeg = 0;
- kMidMemEnd = 0;
- }
-#endif
-
- if (Verbosity()) PrintAddressSpaceLayout();
-
DisableCoreDumperIfNecessary();
- if (full_shadow_is_available) {
- // mmap the low shadow plus at least one page at the left.
- if (kLowShadowBeg)
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gap.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
- } else if (kMidMemBeg &&
- MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
- MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
- CHECK(kLowShadowBeg != kLowShadowEnd);
- // mmap the low shadow plus at least one page at the left.
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the mid shadow.
- ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gaps.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
- ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
- } else {
- Report("Shadow memory range interleaves with an existing memory mapping. "
- "ASan cannot proceed correctly. ABORTING.\n");
- Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
- shadow_start, kHighShadowEnd);
- DumpProcessMap();
- Die();
- }
+ InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
@@ -599,6 +643,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
diff --git a/lib/asan/asan_scariness_score.h b/lib/asan/asan_scariness_score.h
index 492eb561c6e2..7f1571416fd5 100644
--- a/lib/asan/asan_scariness_score.h
+++ b/lib/asan/asan_scariness_score.h
@@ -34,10 +34,10 @@
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
-class ScarinessScore {
- public:
- ScarinessScore() {
+struct ScarinessScoreBase {
+ void Clear() {
descr[0] = 0;
+ score = 0;
}
void Scare(int add_to_score, const char *reason) {
if (descr[0])
@@ -52,16 +52,23 @@ class ScarinessScore {
Printf("SCARINESS: %d (%s)\n", score, descr);
}
static void PrintSimple(int score, const char *descr) {
- ScarinessScore SS;
- SS.Scare(score, descr);
- SS.Print();
+ ScarinessScoreBase SSB;
+ SSB.Clear();
+ SSB.Scare(score, descr);
+ SSB.Print();
}
private:
- int score = 0;
+ int score;
char descr[1024];
};
+struct ScarinessScore : ScarinessScoreBase {
+ ScarinessScore() {
+ Clear();
+ }
+};
+
} // namespace __asan
#endif // ASAN_SCARINESS_SCORE_H
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index d7e2cca65660..537b53d9e0c3 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -141,7 +141,9 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
-void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
+ uptr *bottom_old,
+ uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
@@ -152,6 +154,10 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
fake_stack_ = fake_stack_save;
}
+ if (bottom_old)
+ *bottom_old = stack_bottom_;
+ if (size_old)
+ *size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
@@ -345,7 +351,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
- if (ThreadStackContainsAddress(tctx, &context)) {
+ if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
@@ -447,12 +453,16 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_finish_switch_fiber(void* fakestack) {
+void __sanitizer_finish_switch_fiber(void* fakestack,
+ const void **bottom_old,
+ uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
- t->FinishSwitchFiber((FakeStack*)fakestack);
+ t->FinishSwitchFiber((FakeStack*)fakestack,
+ (uptr*)bottom_old,
+ (uptr*)size_old);
}
}
diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h
index 92a92a2e863e..f53dfb712449 100644
--- a/lib/asan/asan_thread.h
+++ b/lib/asan/asan_thread.h
@@ -94,7 +94,8 @@ class AsanThread {
}
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
- void FinishSwitchFiber(FakeStack *fake_stack_save);
+ void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
+ uptr *size_old);
bool has_fake_stack() {
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
index 2ef78cd15508..78268d83e539 100644
--- a/lib/asan/asan_win.cc
+++ b/lib/asan/asan_win.cc
@@ -19,6 +19,7 @@
#include <stdlib.h>
+#include "asan_globals_win.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_report.h"
@@ -37,7 +38,13 @@ int __asan_should_detect_stack_use_after_return() {
return __asan_option_detect_stack_use_after_return;
}
-// -------------------- A workaround for the abscence of weak symbols ----- {{{
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_shadow_memory_dynamic_address() {
+ __asan_init();
+ return __asan_shadow_memory_dynamic_address;
+}
+
+// -------------------- A workaround for the absence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
@@ -64,14 +71,22 @@ void __asan_default_on_error() {}
// }}}
} // extern "C"
-// ---------------------- Windows-specific inteceptors ---------------- {{{
+// ---------------------- Windows-specific interceptors ---------------- {{{
+INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
+ CHECK(REAL(RtlRaiseException));
+ // This is a noreturn function, unless it's one of the exceptions raised to
+ // communicate with the debugger, such as the one from OutputDebugString.
+ if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
+ __asan_handle_no_return();
+ REAL(RtlRaiseException)(ExceptionRecord);
+}
+
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
-
#ifdef _WIN64
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
@@ -123,44 +138,12 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
asan_thread_start, t, thr_flags, tid);
}
-namespace {
-BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
-
-void EnsureWorkerThreadRegistered() {
- // FIXME: GetCurrentThread relies on TSD, which might not play well with
- // system thread pools. We might want to use something like reference
- // counting to zero out GetCurrentThread() underlying storage when the last
- // work item finishes? Or can we disable reclaiming of threads in the pool?
- BlockingMutexLock l(&mu_for_thread_tracking);
- if (__asan::GetCurrentThread())
- return;
-
- AsanThread *t = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr,
- /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
- t->Init();
- asanThreadRegistry().StartThread(t->tid(), 0, 0);
- SetCurrentThread(t);
-}
-} // namespace
-
-INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
- // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
- // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
- // System worker pool threads are created at arbitraty point in time and
- // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
- // instead and don't register a specific parent_tid/stack.
- EnsureWorkerThreadRegistered();
- return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
-}
-
// }}}
namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
- ASAN_INTERCEPT_FUNC(RaiseException);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
@@ -169,11 +152,15 @@ void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(_except_handler4);
#endif
- // NtWaitForWorkViaWorkerFactory is always linked dynamically.
- CHECK(::__interception::OverrideFunction(
- "NtWaitForWorkViaWorkerFactory",
- (uptr)WRAP(NtWaitForWorkViaWorkerFactory),
- (uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
+ // Try to intercept kernel32!RaiseException, and if that fails, intercept
+ // ntdll!RtlRaiseException instead.
+ if (!::__interception::OverrideFunction("RaiseException",
+ (uptr)WRAP(RaiseException),
+ (uptr *)&REAL(RaiseException))) {
+ CHECK(::__interception::OverrideFunction("RtlRaiseException",
+ (uptr)WRAP(RtlRaiseException),
+ (uptr *)&REAL(RtlRaiseException)));
+ }
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@@ -229,8 +216,7 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
// Exception handler for dealing with shadow memory.
static LONG CALLBACK
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
- static uptr page_size = GetPageSizeCached();
- static uptr alloc_granularity = GetMmapGranularity();
+ uptr page_size = GetPageSizeCached();
// Only handle access violations.
if (exception_pointers->ExceptionRecord->ExceptionCode !=
EXCEPTION_ACCESS_VIOLATION) {
@@ -276,22 +262,57 @@ void InitializePlatformExceptionHandlers() {
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
- EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
- CONTEXT *context = info->ContextRecord;
+// Check based on flags if we should report this exception.
+static bool ShouldReportDeadlyException(unsigned code) {
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_BREAKPOINT:
+ case EXCEPTION_ILLEGAL_INSTRUCTION: {
+ return common_flags()->handle_sigill;
+ }
+ }
+ return false;
+}
- if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
- exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
- const char *description =
- (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
- ? "access-violation"
- : "in-page-error";
- SignalContext sig = SignalContext::Create(exception_record, context);
- ReportDeadlySignal(description, sig);
+// Return the textual name for this exception.
+const char *DescribeSignalOrException(int signo) {
+ unsigned code = signo;
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
}
+ return nullptr;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+ // Continue the search if the signal wasn't deadly.
+ if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
+ return EXCEPTION_CONTINUE_SEARCH;
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+ SignalContext sig = SignalContext::Create(exception_record, context);
+ ReportDeadlySignal(exception_record->ExceptionCode, sig);
+ UNREACHABLE("returned from reporting deadly signal");
+}
+
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ __asan_unhandled_exception_filter(info);
+
+ // Bubble out to the default exception filter.
return default_seh_handler(info);
}
@@ -331,10 +352,25 @@ int __asan_set_seh_filter() {
// immediately after the CRT runs. This way, our exception filter is called
// first and we can delegate to their filter if appropriate.
#pragma section(".CRT$XCAB", long, read) // NOLINT
-__declspec(allocate(".CRT$XCAB"))
- int (*__intercept_seh)() = __asan_set_seh_filter;
+__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
+ __asan_set_seh_filter;
+
+// Piggyback on the TLS initialization callback directory to initialize asan as
+// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
+// which run before the CRT. Users also add code to .CRT$XLC, so it's important
+// to run our initializers first.
+static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
#endif
+
+ASAN_LINK_GLOBALS_WIN()
+
// }}}
} // namespace __asan
-#endif // _WIN32
+#endif // SANITIZER_WINDOWS
diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc
index f55588613066..4764fd0a736c 100644
--- a/lib/asan/asan_win_dll_thunk.cc
+++ b/lib/asan/asan_win_dll_thunk.cc
@@ -15,21 +15,30 @@
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dll_thunk.lib
+// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
+#include "asan_globals_win.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#ifdef _M_IX86
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
+void *WINAPI GetModuleHandleA(const char *module_name);
+void *WINAPI GetProcAddress(void *module, const char *proc_name);
void abort();
}
+using namespace __sanitizer;
+
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
@@ -105,7 +114,7 @@ static void InterceptHooks();
// ---------- Function wrapping helpers ----------------------------------- {{{1
#define WRAP_V_V(name) \
extern "C" void name() { \
- typedef void (*fntype)(); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
} \
@@ -113,7 +122,7 @@ static void InterceptHooks();
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
} \
@@ -121,7 +130,7 @@ static void InterceptHooks();
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
} \
@@ -129,7 +138,7 @@ static void InterceptHooks();
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
} \
@@ -137,7 +146,7 @@ static void InterceptHooks();
#define WRAP_W_V(name) \
extern "C" void *name() { \
- typedef void *(*fntype)(); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
} \
@@ -145,7 +154,7 @@ static void InterceptHooks();
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
} \
@@ -153,7 +162,7 @@ static void InterceptHooks();
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
} \
@@ -161,7 +170,7 @@ static void InterceptHooks();
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
} \
@@ -169,7 +178,7 @@ static void InterceptHooks();
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
} \
@@ -178,7 +187,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
} \
@@ -187,7 +196,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
} \
@@ -198,9 +207,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
+WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
+ uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
@@ -214,7 +225,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
-
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
@@ -224,6 +236,7 @@ extern "C" void __asan_version_mismatch_check() {
}
INTERFACE_FUNCTION(__asan_handle_no_return)
+INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
@@ -257,6 +270,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
+INTERFACE_FUNCTION(__asan_set_shadow_00);
+INTERFACE_FUNCTION(__asan_set_shadow_f1);
+INTERFACE_FUNCTION(__asan_set_shadow_f2);
+INTERFACE_FUNCTION(__asan_set_shadow_f3);
+INTERFACE_FUNCTION(__asan_set_shadow_f5);
+INTERFACE_FUNCTION(__asan_set_shadow_f8);
+
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
@@ -306,17 +326,18 @@ INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
-INTERFACE_FUNCTION(__sanitizer_get_coverage_pc_buffer)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@@ -327,6 +348,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
@@ -347,6 +370,7 @@ INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@@ -368,6 +392,7 @@ WRAP_W_WW(realloc)
WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
+WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
@@ -444,4 +469,15 @@ static int call_asan_init() {
#pragma section(".CRT$XIB", long, read) // NOLINT
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+static void WINAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
+ASAN_LINK_GLOBALS_WIN()
+
#endif // ASAN_DLL_THUNK
diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc
index 1175522e7441..8e42f03c1a0d 100644
--- a/lib/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/lib/asan/asan_win_dynamic_runtime_thunk.cc
@@ -1,4 +1,4 @@
-//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,22 +16,25 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
-// - installing a custom SEH handlerx
+// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+#include "asan_globals_win.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// First, declare CRT sections we'll be using in this file
+#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
+#pragma section(".CRT$XLAB", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
@@ -42,14 +45,37 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
-// just to work around this issue, let's clone the a variable that is
-// constant after initialization anyways.
+// just to work around this issue, let's clone the variable that is constant
+// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
-int __asan_option_detect_stack_use_after_return =
+int __asan_option_detect_stack_use_after_return;
+
+__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
+void* __asan_shadow_memory_dynamic_address;
+}
+
+static int InitializeClonedVariables() {
+ __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
+ __asan_shadow_memory_dynamic_address =
+ __asan_get_shadow_memory_dynamic_address();
+ return 0;
+}
+
+static void NTAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
}
+// Our cloned variables must be initialized before C/C++ constructors. If TLS
+// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
+// initializer is needed as a backup.
+__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
+ InitializeClonedVariables;
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
@@ -73,6 +99,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
+} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
@@ -81,8 +108,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
-} // namespace
-
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
@@ -97,4 +122,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
+ASAN_LINK_GLOBALS_WIN()
+
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup
index 52794b1a441b..fdfc46f6e5b8 100755
--- a/lib/asan/scripts/asan_device_setup
+++ b/lib/asan/scripts/asan_device_setup
@@ -300,20 +300,22 @@ if [[ -n "$ASAN_RT64" ]]; then
cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/"
fi
-# FIXME: alloc_dealloc_mismatch=0 prevents a failure in libdvm startup,
-# which may or may not be a real bug (probably not).
-ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0,malloc_context_size=0
+ASAN_OPTIONS=start_deactivated=1,malloc_context_size=0
-function generate_zygote_wrapper { # from, to, asan_rt
+# The name of a symlink to libclang_rt.asan-$ARCH-android.so used in LD_PRELOAD.
+# The idea is to have the same name in lib and lib64 to keep it from falling
+# apart when a 64-bit process spawns a 32-bit one, inheriting the environment.
+ASAN_RT_SYMLINK=symlink-to-libclang_rt.asan
+
+function generate_zygote_wrapper { # from, to
local _from=$1
local _to=$2
- local _asan_rt=$3
if [[ PRE_L -eq 0 ]]; then
# LD_PRELOAD parsing is broken in N if it starts with ":". Luckily, it is
# unset in the system environment since L.
- local _ld_preload=$_asan_rt
+ local _ld_preload=$ASAN_RT_SYMLINK
else
- local _ld_preload=\$LD_PRELOAD:$_asan_rt
+ local _ld_preload=\$LD_PRELOAD:$ASAN_RT_SYMLINK
fi
cat <<EOF >"$TMPDIR/$_from"
#!/system/bin/sh-from-zygote
@@ -342,18 +344,18 @@ if [[ -f "$TMPDIR/app_process64" ]]; then
mv "$TMPDIR/app_process32" "$TMPDIR/app_process32.real"
mv "$TMPDIR/app_process64" "$TMPDIR/app_process64.real"
fi
- generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real" "$ASAN_RT"
- generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real" "$ASAN_RT64"
+ generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real"
+ generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real"
else
# A 32-bit device.
- generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32" "$ASAN_RT"
+ generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32"
fi
# General command-line tool wrapper (use for anything that's not started as
# zygote).
cat <<EOF >"$TMPDIR/asanwrapper"
#!/system/bin/sh
-LD_PRELOAD=$ASAN_RT \\
+LD_PRELOAD=$ASAN_RT_SYMLINK \\
exec \$@
EOF
@@ -361,7 +363,7 @@ EOF
if [[ -n "$ASAN_RT64" ]]; then
cat <<EOF >"$TMPDIR/asanwrapper64"
#!/system/bin/sh
-LD_PRELOAD=$ASAN_RT64 \\
+LD_PRELOAD=$ASAN_RT_SYMLINK \\
exec \$@
EOF
@@ -412,12 +414,17 @@ if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
install "$TMPDIR/app_process64.real" /system/bin 755 $CTX
install "$TMPDIR/asanwrapper" /system/bin 755
install "$TMPDIR/asanwrapper64" /system/bin 755
+
+ adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
+ adb_shell ln -s $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK
else
install "$TMPDIR/$ASAN_RT" /system/lib 644
install "$TMPDIR/app_process32" /system/bin 755 $CTX
install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX
install "$TMPDIR/asanwrapper" /system/bin 755 $CTX
+ adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
+
adb_shell rm /system/bin/app_process
adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
fi
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
index e67d0fb0646f..3e56763a8041 100644
--- a/lib/asan/tests/CMakeLists.txt
+++ b/lib/asan/tests/CMakeLists.txt
@@ -106,12 +106,13 @@ set(ASAN_UNITTEST_INSTRUMENTED_LIBS)
append_list_if(ANDROID atomic ASAN_UNITTEST_INSTRUMENTED_LIBS)
set(ASAN_UNITTEST_NOINST_LINKFLAGS ${ASAN_UNITTEST_COMMON_LINKFLAGS})
-append_list_if(COMPILER_RT_HAS_LIBM -lm ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBDL -ldl ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBRT -lrt ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread
- ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS)
+if(NOT APPLE)
+ append_list_if(COMPILER_RT_HAS_LIBM -lm ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBDL -ldl ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBRT -lrt ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS)
+endif()
# TODO(eugenis): move all -l flags above to _LIBS?
set(ASAN_UNITTEST_NOINST_LIBS)
@@ -193,6 +194,7 @@ set(ASAN_INST_TEST_SOURCES
asan_asm_test.cc
asan_globals_test.cc
asan_interface_test.cc
+ asan_internal_interface_test.cc
asan_test.cc
asan_oob_test.cc
asan_mem_test.cc
@@ -220,6 +222,23 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC ${ARGN})
endif()
+ if (MSVC)
+ # With the MSVC CRT, the choice between static and dynamic CRT is made at
+ # compile time with a macro. Simulate the effect of passing /MD to clang-cl.
+ set(ASAN_INST_DYNAMIC_TEST_OBJECTS)
+ foreach(src ${ASAN_INST_TEST_SOURCES})
+ asan_compile(ASAN_INST_DYNAMIC_TEST_OBJECTS ${src} ${arch} ${kind}
+ ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -D_MT -D_DLL ${ARGN})
+ endforeach()
+ # Clang links the static CRT by default. Override that to use the dynamic
+ # CRT.
+ set(ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS
+ ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS}
+ -Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames)
+ else()
+ set(ASAN_INST_DYNAMIC_TEST_OBJECTS ${ASAN_INST_TEST_OBJECTS})
+ endif()
+
# Create the 'default' folder where ASAN tests are produced.
if(CMAKE_CONFIGURATION_TYPES)
foreach(build_mode ${CMAKE_CONFIGURATION_TYPES})
@@ -245,7 +264,7 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
add_asan_test(AsanDynamicUnitTests "Asan-${arch}${kind}-Dynamic-Test"
${arch} ${kind} SUBDIR "dynamic"
- OBJECTS ${ASAN_INST_TEST_OBJECTS}
+ OBJECTS ${ASAN_INST_DYNAMIC_TEST_OBJECTS}
LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
endif()
diff --git a/lib/asan/tests/asan_asm_test.cc b/lib/asan/tests/asan_asm_test.cc
index 09af5c386079..2bb37946bb4a 100644
--- a/lib/asan/tests/asan_asm_test.cc
+++ b/lib/asan/tests/asan_asm_test.cc
@@ -57,12 +57,13 @@ template<> Type asm_read<Type>(Type *ptr) { \
return res; \
}
-#define DECLARE_ASM_REP_MOVS(Type, Movs) \
- template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
- __asm__("rep " Movs " \n\t" \
- : \
- : "D"(dst), "S"(src), "c"(size) \
- : "rsi", "rdi", "rcx", "memory"); \
+#define DECLARE_ASM_REP_MOVS(Type, Movs) \
+ template <> \
+ void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
+ __asm__("rep " Movs " \n\t" \
+ : "+D"(dst), "+S"(src), "+c"(size) \
+ : \
+ : "memory"); \
}
DECLARE_ASM_WRITE(U8, "8", "movq", "r");
@@ -99,12 +100,13 @@ template<> Type asm_read<Type>(Type *ptr) { \
return res; \
}
-#define DECLARE_ASM_REP_MOVS(Type, Movs) \
- template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
- __asm__("rep " Movs " \n\t" \
- : \
- : "D"(dst), "S"(src), "c"(size) \
- : "esi", "edi", "ecx", "memory"); \
+#define DECLARE_ASM_REP_MOVS(Type, Movs) \
+ template <> \
+ void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
+ __asm__("rep " Movs " \n\t" \
+ : "+D"(dst), "+S"(src), "+c"(size) \
+ : \
+ : "memory"); \
}
} // End of anonymous namespace
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index f5bfb8046b0a..fd43f17716b6 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -100,6 +100,9 @@ TEST(AddressSanitizerInterface, GetHeapSizeTest) {
}
}
+#ifndef __powerpc64__
+// FIXME: This has not reliably worked on powerpc since r279664. Re-enable
+// this once the problem is tracked down and fixed.
static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
static const size_t kManyThreadsIterations = 250;
static const size_t kManyThreadsNumThreads =
@@ -133,6 +136,7 @@ TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
// so we can't check for equality here.
EXPECT_LT(after_test, before_test + (1UL<<20));
}
+#endif
static void DoDoubleFree() {
int *x = Ident(new int);
diff --git a/lib/asan/tests/asan_internal_interface_test.cc b/lib/asan/tests/asan_internal_interface_test.cc
new file mode 100644
index 000000000000..ae4759478170
--- /dev/null
+++ b/lib/asan/tests/asan_internal_interface_test.cc
@@ -0,0 +1,36 @@
+//===-- asan_internal_interface_test.cc -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+//===----------------------------------------------------------------------===//
+#include "asan_interface_internal.h"
+#include "asan_test_utils.h"
+
+TEST(AddressSanitizerInternalInterface, SetShadow) {
+ std::vector<char> buffer(17, 0xff);
+
+ __asan_set_shadow_00((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0x00), buffer);
+
+ __asan_set_shadow_f1((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf1), buffer);
+
+ __asan_set_shadow_f2((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf2), buffer);
+
+ __asan_set_shadow_f3((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf3), buffer);
+
+ __asan_set_shadow_f5((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf5), buffer);
+
+ __asan_set_shadow_f8((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf8), buffer);
+}
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index 3872dd7a7190..65acb2839ba1 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -26,6 +26,8 @@
#include <vector>
#include <limits>
+using namespace __sanitizer;
+
// ATTENTION!
// Please don't call intercepted functions (including malloc() and friends)
// in this test. The static runtime library is linked explicitly (without
@@ -168,6 +170,12 @@ void *ThreadedQuarantineTestWorker(void *unused) {
// Check that the thread local allocators are flushed when threads are
// destroyed.
TEST(AddressSanitizer, ThreadedQuarantineTest) {
+ // Run the routine once to warm up ASAN internal structures to get more
+ // predictable incremental memory changes.
+ pthread_t t;
+ PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
+ PTHREAD_JOIN(t, 0);
+
const int n_threads = 3000;
size_t mmaped1 = __sanitizer_get_heap_size();
for (int i = 0; i < n_threads; i++) {
@@ -175,6 +183,7 @@ TEST(AddressSanitizer, ThreadedQuarantineTest) {
PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
PTHREAD_JOIN(t, 0);
size_t mmaped2 = __sanitizer_get_heap_size();
+ // Figure out why this much memory is required.
EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
}
}
diff --git a/lib/asan/tests/asan_str_test.cc b/lib/asan/tests/asan_str_test.cc
index dd755875e740..c790088f8f9e 100644
--- a/lib/asan/tests/asan_str_test.cc
+++ b/lib/asan/tests/asan_str_test.cc
@@ -127,7 +127,15 @@ TEST(AddressSanitizer, StrNLenOOBTest) {
}
#endif // SANITIZER_TEST_HAS_STRNLEN
-TEST(AddressSanitizer, StrDupOOBTest) {
+// This test fails with the WinASan dynamic runtime because we fail to intercept
+// strdup.
+#if defined(_MSC_VER) && defined(_DLL)
+#define MAYBE_StrDupOOBTest DISABLED_StrDupOOBTest
+#else
+#define MAYBE_StrDupOOBTest StrDupOOBTest
+#endif
+
+TEST(AddressSanitizer, MAYBE_StrDupOOBTest) {
size_t size = Ident(42);
char *str = MallocAndMemsetString(size);
char *new_str;
@@ -457,12 +465,14 @@ TEST(AddressSanitizer, StrArgsOverlapTest) {
#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \
(MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7)
// Check "memcpy". Use Ident() to avoid inlining.
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
memset(str, 'z', size);
Ident(memcpy)(str + 1, str + 11, 10);
Ident(memcpy)(str, str, 0);
EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy"));
EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy"));
#endif
+#endif
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 6a95c3fe1049..424a79e00a4a 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -692,7 +692,7 @@ TEST(AddressSanitizer, ThreadStackReuseTest) {
PTHREAD_JOIN(t, 0);
}
-#if defined(__i686__) || defined(__x86_64__)
+#if defined(__SSE2__)
#include <emmintrin.h>
TEST(AddressSanitizer, Store128Test) {
char *a = Ident((char*)malloc(Ident(12)));
diff --git a/lib/asan/tests/asan_test_main.cc b/lib/asan/tests/asan_test_main.cc
index d4d6de77b91b..1071d4474674 100644
--- a/lib/asan/tests/asan_test_main.cc
+++ b/lib/asan/tests/asan_test_main.cc
@@ -28,9 +28,18 @@ extern "C" const char* __asan_default_options() {
namespace __sanitizer {
bool ReexecDisabled() {
+#if __has_feature(address_sanitizer) && SANITIZER_MAC
+ // Allow re-exec in instrumented unit tests on Darwin. Technically, we only
+ // need this for 10.10 and below, where re-exec is required for the
+ // interceptors to work, but to avoid duplicating the version detection logic,
+ // let's just allow re-exec for all Darwin versions. On newer OS versions,
+ // returning 'false' doesn't do anything anyway, because we don't re-exec.
+ return false;
+#else
return true;
+#endif
}
-}
+} // namespace __sanitizer
int main(int argc, char **argv) {
testing::GTEST_FLAG(death_test_style) = "threadsafe";
diff --git a/lib/asan/tests/asan_test_utils.h b/lib/asan/tests/asan_test_utils.h
index 03d17cfb26a7..f16d939c94aa 100644
--- a/lib/asan/tests/asan_test_utils.h
+++ b/lib/asan/tests/asan_test_utils.h
@@ -62,7 +62,9 @@ typedef uint64_t U8;
static const int kPageSize = 4096;
-const size_t kLargeMalloc = 1 << 24;
+// Big enough to be handled by secondary allocator and small enough to fit into
+// quarantine for all configurations.
+const size_t kLargeMalloc = 1 << 22;
extern void free_aaa(void *p);
extern void *malloc_aaa(size_t size);
diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt
index 44a660f5d49d..b33786a858e8 100644
--- a/lib/builtins/CMakeLists.txt
+++ b/lib/builtins/CMakeLists.txt
@@ -13,6 +13,10 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
"${CMAKE_SOURCE_DIR}/../../cmake/Modules")
include(base-config-ix)
include(CompilerRTUtils)
+
+ load_llvm_config()
+ construct_compiler_rt_default_triple()
+
if(APPLE)
include(CompilerRTDarwinUtils)
endif()
@@ -160,7 +164,11 @@ set(GENERIC_SOURCES
umodsi3.c
umodti3.c)
-if(COMPILER_RT_SUPPORTS_ATOMIC_KEYWORD)
+option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN
+ "Skip the atomic builtin (this may be needed if system headers are unavailable)"
+ Off)
+
+if(COMPILER_RT_HAS_ATOMIC_KEYWORD AND NOT COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN)
set(GENERIC_SOURCES
${GENERIC_SOURCES}
atomic.c)
@@ -261,8 +269,40 @@ else () # MSVC
endif () # if (NOT MSVC)
set(arm_SOURCES
- arm/adddf3vfp.S
- arm/addsf3vfp.S
+ arm/bswapdi2.S
+ arm/bswapsi2.S
+ arm/clzdi2.S
+ arm/clzsi2.S
+ arm/comparesf2.S
+ arm/divmodsi4.S
+ arm/divsi3.S
+ arm/modsi3.S
+ arm/sync_fetch_and_add_4.S
+ arm/sync_fetch_and_add_8.S
+ arm/sync_fetch_and_and_4.S
+ arm/sync_fetch_and_and_8.S
+ arm/sync_fetch_and_max_4.S
+ arm/sync_fetch_and_max_8.S
+ arm/sync_fetch_and_min_4.S
+ arm/sync_fetch_and_min_8.S
+ arm/sync_fetch_and_nand_4.S
+ arm/sync_fetch_and_nand_8.S
+ arm/sync_fetch_and_or_4.S
+ arm/sync_fetch_and_or_8.S
+ arm/sync_fetch_and_sub_4.S
+ arm/sync_fetch_and_sub_8.S
+ arm/sync_fetch_and_umax_4.S
+ arm/sync_fetch_and_umax_8.S
+ arm/sync_fetch_and_umin_4.S
+ arm/sync_fetch_and_umin_8.S
+ arm/sync_fetch_and_xor_4.S
+ arm/sync_fetch_and_xor_8.S
+ arm/udivmodsi4.S
+ arm/udivsi3.S
+ arm/umodsi3.S
+ ${GENERIC_SOURCES})
+
+set(arm_EABI_SOURCES
arm/aeabi_cdcmp.S
arm/aeabi_cdcmpeq_check_nan.c
arm/aeabi_cfcmp.S
@@ -279,16 +319,20 @@ set(arm_SOURCES
arm/aeabi_memmove.S
arm/aeabi_memset.S
arm/aeabi_uidivmod.S
- arm/aeabi_uldivmod.S
- arm/bswapdi2.S
- arm/bswapsi2.S
- arm/clzdi2.S
- arm/clzsi2.S
- arm/comparesf2.S
+ arm/aeabi_uldivmod.S)
+set(arm_Thumb1_JT_SOURCES
+ arm/switch16.S
+ arm/switch32.S
+ arm/switch8.S
+ arm/switchu8.S)
+set(arm_Thumb1_SjLj_EH_SOURCES
+ arm/restore_vfp_d8_d15_regs.S
+ arm/save_vfp_d8_d15_regs.S)
+set(arm_Thumb1_VFPv2_SOURCES
+ arm/adddf3vfp.S
+ arm/addsf3vfp.S
arm/divdf3vfp.S
- arm/divmodsi4.S
arm/divsf3vfp.S
- arm/divsi3.S
arm/eqdf2vfp.S
arm/eqsf2vfp.S
arm/extendsfdf2vfp.S
@@ -308,49 +352,56 @@ set(arm_SOURCES
arm/lesf2vfp.S
arm/ltdf2vfp.S
arm/ltsf2vfp.S
- arm/modsi3.S
arm/muldf3vfp.S
arm/mulsf3vfp.S
arm/nedf2vfp.S
arm/negdf2vfp.S
arm/negsf2vfp.S
arm/nesf2vfp.S
- arm/restore_vfp_d8_d15_regs.S
- arm/save_vfp_d8_d15_regs.S
arm/subdf3vfp.S
arm/subsf3vfp.S
- arm/switch16.S
- arm/switch32.S
- arm/switch8.S
- arm/switchu8.S
- arm/sync_fetch_and_add_4.S
- arm/sync_fetch_and_add_8.S
- arm/sync_fetch_and_and_4.S
- arm/sync_fetch_and_and_8.S
- arm/sync_fetch_and_max_4.S
- arm/sync_fetch_and_max_8.S
- arm/sync_fetch_and_min_4.S
- arm/sync_fetch_and_min_8.S
- arm/sync_fetch_and_nand_4.S
- arm/sync_fetch_and_nand_8.S
- arm/sync_fetch_and_or_4.S
- arm/sync_fetch_and_or_8.S
- arm/sync_fetch_and_sub_4.S
- arm/sync_fetch_and_sub_8.S
- arm/sync_fetch_and_umax_4.S
- arm/sync_fetch_and_umax_8.S
- arm/sync_fetch_and_umin_4.S
- arm/sync_fetch_and_umin_8.S
- arm/sync_fetch_and_xor_4.S
- arm/sync_fetch_and_xor_8.S
- arm/sync_synchronize.S
arm/truncdfsf2vfp.S
- arm/udivmodsi4.S
- arm/udivsi3.S
- arm/umodsi3.S
arm/unorddf2vfp.S
- arm/unordsf2vfp.S
- ${GENERIC_SOURCES})
+ arm/unordsf2vfp.S)
+set(arm_Thumb1_icache_SOURCES
+ arm/sync_synchronize.S)
+set(arm_Thumb1_SOURCES
+ ${arm_Thumb1_JT_SOURCES}
+ ${arm_Thumb1_SjLj_EH_SOURCES}
+ ${arm_Thumb1_VFPv2_SOURCES}
+ ${arm_Thumb1_icache_SOURCES})
+
+if(MINGW)
+ set(arm_SOURCES
+ arm/aeabi_idivmod.S
+ arm/aeabi_ldivmod.S
+ arm/aeabi_uidivmod.S
+ arm/aeabi_uldivmod.S
+ divmoddi4.c
+ divmodsi4.c
+ divdi3.c
+ divsi3.c
+ fixdfdi.c
+ fixsfdi.c
+ fixunsdfdi.c
+ fixunssfdi.c
+ floatdidf.c
+ floatdisf.c
+ floatundidf.c
+ floatundisf.c
+ mingw_fixfloat.c
+ moddi3.c
+ udivmoddi4.c
+ udivmodsi4.c
+ udivsi3.c
+ umoddi3.c)
+elseif(NOT WIN32)
+ # TODO the EABI sources should only be added to EABI targets
+ set(arm_SOURCES
+ ${arm_SOURCES}
+ ${arm_EABI_SOURCES}
+ ${arm_Thumb1_SOURCES})
+endif()
set(aarch64_SOURCES
comparetf2.c
@@ -398,7 +449,24 @@ if (APPLE)
add_subdirectory(macho_embedded)
darwin_add_builtin_libraries(${BUILTIN_SUPPORTED_OS})
else ()
- append_string_if(COMPILER_RT_HAS_STD_C99_FLAG -std=gnu99 maybe_stdc99)
+ set(BUILTIN_CFLAGS "")
+
+ append_list_if(COMPILER_RT_HAS_STD_C11_FLAG -std=c11 BUILTIN_CFLAGS)
+
+ # These flags would normally be added to CMAKE_C_FLAGS by the llvm
+ # cmake step. Add them manually if this is a standalone build.
+ if(COMPILER_RT_STANDALONE_BUILD)
+ append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS)
+ append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin BUILTIN_CFLAGS)
+ append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG -fvisibility=hidden BUILTIN_CFLAGS)
+ if(NOT COMPILER_RT_DEBUG)
+ append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fomit-frame-pointer BUILTIN_CFLAGS)
+ endif()
+ endif()
+
+ set(BUILTIN_DEFS "")
+
+ append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG VISIBILITY_HIDDEN BUILTIN_DEFS)
foreach (arch ${BUILTIN_SUPPORTED_ARCH})
if (CAN_TARGET_${arch})
@@ -413,11 +481,18 @@ else ()
endif ()
endforeach ()
+ # Needed for clear_cache on debug mode, due to r7's usage in inline asm.
+ # Release mode already sets it via -O2/3, Debug mode doesn't.
+ if (${arch} STREQUAL "armhf")
+ list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer)
+ endif()
+
add_compiler_rt_runtime(clang_rt.builtins
STATIC
ARCHS ${arch}
SOURCES ${${arch}_SOURCES}
- CFLAGS ${maybe_stdc99}
+ DEFS ${BUILTIN_DEFS}
+ CFLAGS ${BUILTIN_CFLAGS}
PARENT_TARGET builtins)
endif ()
endforeach ()
diff --git a/lib/builtins/Makefile.mk b/lib/builtins/Makefile.mk
deleted file mode 100644
index 00e2f53fc401..000000000000
--- a/lib/builtins/Makefile.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-#===- lib/builtins/Makefile.mk -----------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-
-# Add arch specific optimized implementations.
-SubDirs += i386 ppc x86_64 arm armv6m
-
-# Add ARM64 dir.
-SubDirs += arm64
-
-# Define the variables for this specific directory.
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o)
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
diff --git a/lib/builtins/arm/Makefile.mk b/lib/builtins/arm/Makefile.mk
deleted file mode 100644
index ed2e8323e391..000000000000
--- a/lib/builtins/arm/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := armv5 armv6 armv7 armv7k armv7m armv7em armv7s
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/arm/aeabi_idivmod.S b/lib/builtins/arm/aeabi_idivmod.S
index 2fcad862f73a..b43ea699058d 100644
--- a/lib/builtins/arm/aeabi_idivmod.S
+++ b/lib/builtins/arm/aeabi_idivmod.S
@@ -15,16 +15,34 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_idivmod __rt_sdiv
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+#if __ARM_ARCH_ISA_THUMB == 1
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__divsi3)
+ pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
+ muls r2, r2, r0 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+#else
push { lr }
sub sp, sp, #4
mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
bl SYMBOL_NAME(__divmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
+#endif // __ARM_ARCH_ISA_THUMB == 1
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_ldivmod.S b/lib/builtins/arm/aeabi_ldivmod.S
index 9f161f3007f6..3dae14ef07ec 100644
--- a/lib/builtins/arm/aeabi_ldivmod.S
+++ b/lib/builtins/arm/aeabi_ldivmod.S
@@ -16,6 +16,10 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_ldivmod __rt_sdiv64
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
@@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
+#if defined(__MINGW32__)
+ mov r12, r0
+ mov r0, r2
+ mov r2, r12
+ mov r12, r1
+ mov r1, r3
+ mov r3, r12
+#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
diff --git a/lib/builtins/arm/aeabi_uidivmod.S b/lib/builtins/arm/aeabi_uidivmod.S
index e1e12d97aa00..7098bc6ff92e 100644
--- a/lib/builtins/arm/aeabi_uidivmod.S
+++ b/lib/builtins/arm/aeabi_uidivmod.S
@@ -16,16 +16,40 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_uidivmod __rt_udiv
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+#if __ARM_ARCH_ISA_THUMB == 1
+ cmp r0, r1
+ bcc LOCAL_LABEL(case_denom_larger)
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__aeabi_uidiv)
+ pop {r1, r2, r3}
+ muls r2, r2, r0 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+LOCAL_LABEL(case_denom_larger):
+ movs r1, r0
+ movs r0, #0
+ JMP (lr)
+#else
push { lr }
sub sp, sp, #4
mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
bl SYMBOL_NAME(__udivmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
+#endif
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_uldivmod.S b/lib/builtins/arm/aeabi_uldivmod.S
index e8aaef282e90..bc26e5674ca0 100644
--- a/lib/builtins/arm/aeabi_uldivmod.S
+++ b/lib/builtins/arm/aeabi_uldivmod.S
@@ -16,6 +16,10 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
@@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
+#if defined(__MINGW32__)
+ mov r12, r0
+ mov r0, r2
+ mov r2, r12
+ mov r12, r1
+ mov r1, r3
+ mov r3, r12
+#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
diff --git a/lib/builtins/arm/comparesf2.S b/lib/builtins/arm/comparesf2.S
index 52597b673f96..6d7019545475 100644
--- a/lib/builtins/arm/comparesf2.S
+++ b/lib/builtins/arm/comparesf2.S
@@ -39,6 +39,9 @@
#include "../assembly.h"
.syntax unified
+#if __ARM_ARCH_ISA_THUMB == 2
+.thumb
+#endif
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
diff --git a/lib/builtins/arm/divsi3.S b/lib/builtins/arm/divsi3.S
index 7e23ba4fc237..f066f60ad96d 100644
--- a/lib/builtins/arm/divsi3.S
+++ b/lib/builtins/arm/divsi3.S
@@ -49,17 +49,37 @@ LOCAL_LABEL(divzero):
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
+# if __ARM_ARCH_ISA_THUMB == 1
+ movs r4, r0
+ eors r4, r1
+# else
eor r4, r0, r1
+# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
+# if __ARM_ARCH_ISA_THUMB == 1
+ asrs r2, r0, #31
+ asrs r3, r1, #31
+ eors r0, r2
+ eors r1, r3
+ subs r0, r0, r2
+ subs r1, r1, r3
+# else
eor r2, r0, r0, asr #31
eor r3, r1, r1, asr #31
sub r0, r2, r0, asr #31
sub r1, r3, r1, asr #31
+# endif
// abs(a) / abs(b)
bl SYMBOL_NAME(__udivsi3)
// Apply sign of quotient to result and return.
+# if __ARM_ARCH_ISA_THUMB == 1
+ asrs r4, #31
+ eors r0, r4
+ subs r0, r0, r4
+# else
eor r0, r0, r4, asr #31
sub r0, r0, r4, asr #31
+# endif
CLEAR_FRAME_AND_RETURN
#endif
END_COMPILERRT_FUNCTION(__divsi3)
diff --git a/lib/builtins/arm/udivsi3.S b/lib/builtins/arm/udivsi3.S
index 085f8fb9e2df..fcc472b4f3d9 100644
--- a/lib/builtins/arm/udivsi3.S
+++ b/lib/builtins/arm/udivsi3.S
@@ -40,12 +40,26 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
#else
cmp r1, #1
bcc LOCAL_LABEL(divby0)
+#if __ARM_ARCH_ISA_THUMB == 1
+ bne LOCAL_LABEL(num_neq_denom)
+ JMP(lr)
+LOCAL_LABEL(num_neq_denom):
+#else
IT(eq)
JMPc(lr, eq)
+#endif
cmp r0, r1
+#if __ARM_ARCH_ISA_THUMB == 1
+ bhs LOCAL_LABEL(num_ge_denom)
+ movs r0, #0
+ JMP(lr)
+LOCAL_LABEL(num_ge_denom):
+#else
ITT(cc)
movcc r0, #0
JMPc(lr, cc)
+#endif
+
/*
* Implement division using binary long division algorithm.
*
@@ -62,7 +76,7 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
* that (r0 << shift) < 2 * r1. The quotient is stored in r3.
*/
-# ifdef __ARM_FEATURE_CLZ
+# if defined(__ARM_FEATURE_CLZ)
clz ip, r0
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
@@ -77,49 +91,128 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
sub ip, ip, r3, lsl #3
mov r3, #0
bx ip
-# else
+# else /* No CLZ Feature */
# if __ARM_ARCH_ISA_THUMB == 2
# error THUMB mode requires CLZ or UDIV
# endif
+# if __ARM_ARCH_ISA_THUMB == 1
+# define BLOCK_SIZE 10
+# else
+# define BLOCK_SIZE 12
+# endif
+
mov r2, r0
+# if __ARM_ARCH_ISA_THUMB == 1
+ mov ip, r0
+ adr r0, LOCAL_LABEL(div0block)
+ adds r0, #1
+# else
adr ip, LOCAL_LABEL(div0block)
-
- lsr r3, r2, #16
+# endif
+ lsrs r3, r2, #16
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_16)
+ movs r2, r3
+ subs r0, r0, #(16 * BLOCK_SIZE)
+LOCAL_LABEL(skip_16):
+# else
movhs r2, r3
- subhs ip, ip, #(16 * 12)
+ subhs ip, ip, #(16 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #8
+ lsrs r3, r2, #8
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_8)
+ movs r2, r3
+ subs r0, r0, #(8 * BLOCK_SIZE)
+LOCAL_LABEL(skip_8):
+# else
movhs r2, r3
- subhs ip, ip, #(8 * 12)
+ subhs ip, ip, #(8 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #4
+ lsrs r3, r2, #4
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_4)
+ movs r2, r3
+ subs r0, r0, #(4 * BLOCK_SIZE)
+LOCAL_LABEL(skip_4):
+# else
movhs r2, r3
- subhs ip, #(4 * 12)
+ subhs ip, #(4 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #2
+ lsrs r3, r2, #2
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_2)
+ movs r2, r3
+ subs r0, r0, #(2 * BLOCK_SIZE)
+LOCAL_LABEL(skip_2):
+# else
movhs r2, r3
- subhs ip, ip, #(2 * 12)
+ subhs ip, ip, #(2 * BLOCK_SIZE)
+# endif
/* Last block, no need to update r2 or r3. */
+# if __ARM_ARCH_ISA_THUMB == 1
+ lsrs r3, r2, #1
+ cmp r3, r1
+ blo LOCAL_LABEL(skip_1)
+ subs r0, r0, #(1 * BLOCK_SIZE)
+LOCAL_LABEL(skip_1):
+ movs r2, r0
+ mov r0, ip
+ movs r3, #0
+ JMP (r2)
+
+# else
cmp r1, r2, lsr #1
- subls ip, ip, #(1 * 12)
+ subls ip, ip, #(1 * BLOCK_SIZE)
- mov r3, #0
+ movs r3, #0
JMP(ip)
-# endif
+# endif
+# endif /* __ARM_FEATURE_CLZ */
+
#define IMM #
+ /* due to the range limit of branch in Thumb1, we have to place the
+ block closer */
+LOCAL_LABEL(divby0):
+ movs r0, #0
+# if defined(__ARM_EABI__)
+ bl __aeabi_idiv0 // due to relocation limit, can't use b.
+# endif
+ JMP(lr)
+
+#if __ARM_ARCH_ISA_THUMB == 1
+#define block(shift) \
+ lsls r2, r1, IMM shift; \
+ cmp r0, r2; \
+ blo LOCAL_LABEL(block_skip_##shift); \
+ subs r0, r0, r2; \
+ LOCAL_LABEL(block_skip_##shift) :; \
+ adcs r3, r3 /* same as ((r3 << 1) | Carry). Carry is set if r0 >= r2. */
+
+ /* TODO: if current location counter is not not word aligned, we don't
+ need the .p2align and nop */
+ /* Label div0block must be word-aligned. First align block 31 */
+ .p2align 2
+ nop /* Padding to align div0block as 31 blocks = 310 bytes */
+
+#else
#define block(shift) \
cmp r0, r1, lsl IMM shift; \
ITT(hs); \
WIDE(addhs) r3, r3, IMM (1 << shift); \
WIDE(subhs) r0, r0, r1, lsl IMM shift
+#endif
block(31)
block(30)
@@ -159,12 +252,14 @@ LOCAL_LABEL(div0block):
JMP(lr)
#endif /* __ARM_ARCH_EXT_IDIV__ */
+#if __ARM_ARCH_EXT_IDIV__
LOCAL_LABEL(divby0):
- mov r0, #0
-#ifdef __ARM_EABI__
- b __aeabi_idiv0
-#else
- JMP(lr)
+ mov r0, #0
+# ifdef __ARM_EABI__
+ b __aeabi_idiv0
+# else
+ JMP(lr)
+# endif
#endif
END_COMPILERRT_FUNCTION(__udivsi3)
diff --git a/lib/builtins/arm64/Makefile.mk b/lib/builtins/arm64/Makefile.mk
deleted file mode 100644
index 7f7e38661303..000000000000
--- a/lib/builtins/arm64/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm64/Makefile.mk -----------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := arm64
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/armv6m/Makefile.mk b/lib/builtins/armv6m/Makefile.mk
deleted file mode 100644
index f3c1807f01b8..000000000000
--- a/lib/builtins/armv6m/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := armv6m
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/assembly.h b/lib/builtins/assembly.h
index 5fc74f68f603..29d9f8844a6a 100644
--- a/lib/builtins/assembly.h
+++ b/lib/builtins/assembly.h
@@ -70,7 +70,7 @@
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
-#if !defined(__ARM_FEATURE_CLZ) && \
+#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
@@ -149,6 +149,7 @@
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
diff --git a/lib/builtins/atomic.c b/lib/builtins/atomic.c
index f1ddc3e0c522..ee35e342eda5 100644
--- a/lib/builtins/atomic.c
+++ b/lib/builtins/atomic.c
@@ -229,13 +229,20 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
// Where the size is known at compile time, the compiler may emit calls to
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
+#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
- /* FIXME: __uint128_t isn't available on 32 bit platforms.
- OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
+ OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
+#else
+#define OPTIMISED_CASES\
+ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
+ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
+ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
+ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
+#endif
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_load_##n(type *src, int model) {\
diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c
index 55bbdd375891..4c2ac3b1eb05 100644
--- a/lib/builtins/clear_cache.c
+++ b/lib/builtins/clear_cache.c
@@ -110,10 +110,12 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__)
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
+ const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
__asm __volatile("svc 0x0"
: "=r"(start_reg)
- : "r"(syscall_nr), "r"(start_reg), "r"(end_reg));
+ : "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
+ "r"(flags));
if (start_reg != 0) {
compilerrt_abort();
}
diff --git a/lib/builtins/i386/Makefile.mk b/lib/builtins/i386/Makefile.mk
deleted file mode 100644
index f3776a02c0de..000000000000
--- a/lib/builtins/i386/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/i386/Makefile.mk ------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := i386
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/int_lib.h b/lib/builtins/int_lib.h
index 8dfe5672d131..39eee18d9149 100644
--- a/lib/builtins/int_lib.h
+++ b/lib/builtins/int_lib.h
@@ -91,14 +91,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
#include <intrin.h>
uint32_t __inline __builtin_ctz(uint32_t value) {
- uint32_t trailing_zero = 0;
+ unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value))
return trailing_zero;
return 32;
}
uint32_t __inline __builtin_clz(uint32_t value) {
- uint32_t leading_zero = 0;
+ unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value))
return 31 - leading_zero;
return 32;
@@ -106,7 +106,7 @@ uint32_t __inline __builtin_clz(uint32_t value) {
#if defined(_M_ARM) || defined(_M_X64)
uint32_t __inline __builtin_clzll(uint64_t value) {
- uint32_t leading_zero = 0;
+ unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value))
return 63 - leading_zero;
return 64;
diff --git a/lib/builtins/mingw_fixfloat.c b/lib/builtins/mingw_fixfloat.c
new file mode 100644
index 000000000000..c462e0dbf654
--- /dev/null
+++ b/lib/builtins/mingw_fixfloat.c
@@ -0,0 +1,36 @@
+/* ===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI di_int __fixdfdi(double a);
+COMPILER_RT_ABI di_int __fixsfdi(float a);
+COMPILER_RT_ABI du_int __fixunsdfdi(double a);
+COMPILER_RT_ABI du_int __fixunssfdi(float a);
+COMPILER_RT_ABI double __floatdidf(di_int a);
+COMPILER_RT_ABI float __floatdisf(di_int a);
+COMPILER_RT_ABI double __floatundidf(du_int a);
+COMPILER_RT_ABI float __floatundisf(du_int a);
+
+COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
+
+COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
+
+COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
+
+COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
+
+COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
+
+COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
+
+COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
+
+COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }
diff --git a/lib/builtins/ppc/Makefile.mk b/lib/builtins/ppc/Makefile.mk
deleted file mode 100644
index 0adc623aa041..000000000000
--- a/lib/builtins/ppc/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/ppc/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := ppc
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/x86_64/Makefile.mk b/lib/builtins/x86_64/Makefile.mk
deleted file mode 100644
index 83848dddd964..000000000000
--- a/lib/builtins/x86_64/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/x86_64/Makefile.mk ----------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := x86_64 x86_64h
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/cfi/CMakeLists.txt b/lib/cfi/CMakeLists.txt
index 56ef88204424..206400201466 100644
--- a/lib/cfi/CMakeLists.txt
+++ b/lib/cfi/CMakeLists.txt
@@ -1,5 +1,4 @@
-add_custom_target(cfi)
-set_target_properties(cfi PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(cfi)
set(CFI_SOURCES cfi.cc)
@@ -36,4 +35,3 @@ foreach(arch ${CFI_SUPPORTED_ARCH})
endforeach()
add_compiler_rt_resource_file(cfi_blacklist cfi_blacklist.txt cfi)
-add_dependencies(compiler-rt cfi)
diff --git a/lib/cfi/cfi.cc b/lib/cfi/cfi.cc
index ca2cf8f30617..d463ca8daf50 100644
--- a/lib/cfi/cfi.cc
+++ b/lib/cfi/cfi.cc
@@ -30,6 +30,8 @@ typedef ElfW(Ehdr) Elf_Ehdr;
#include "ubsan/ubsan_handlers.h"
#endif
+using namespace __sanitizer;
+
namespace __cfi {
#define kCfiShadowLimitsStorageSize 4096 // 1 page
diff --git a/lib/dfsan/CMakeLists.txt b/lib/dfsan/CMakeLists.txt
index eca402dd3b03..2c486bff821b 100644
--- a/lib/dfsan/CMakeLists.txt
+++ b/lib/dfsan/CMakeLists.txt
@@ -11,8 +11,7 @@ append_rtti_flag(OFF DFSAN_COMMON_CFLAGS)
append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding DFSAN_COMMON_CFLAGS)
# Static runtime library.
-add_custom_target(dfsan)
-set_target_properties(dfsan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(dfsan)
foreach(arch ${DFSAN_SUPPORTED_ARCH})
set(DFSAN_CFLAGS ${DFSAN_COMMON_CFLAGS})
@@ -46,5 +45,3 @@ add_custom_command(OUTPUT ${dfsan_abilist_filename}
add_dependencies(dfsan dfsan_abilist)
install(FILES ${dfsan_abilist_filename}
DESTINATION ${COMPILER_RT_INSTALL_PATH})
-
-add_dependencies(compiler-rt dfsan)
diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc
index 4156000a1cce..3aa99b7f9c27 100644
--- a/lib/dfsan/dfsan.cc
+++ b/lib/dfsan/dfsan.cc
@@ -114,6 +114,26 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
// | reserved by kernel |
// +--------------------+ 0x0000000000
+// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
+//
+// +--------------------+ 0x1000000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xffff00008000 (kAppAddr)
+// | unused |
+// +--------------------+ 0xaaaab0000000 (top of PIE address)
+// | application PIE |
+// +--------------------+ 0xaaaaa0000000 (top of PIE address)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x1200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x8000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
#ifdef DFSAN_RUNTIME_VMA
@@ -372,11 +392,12 @@ static void InitializePlatformEarly() {
#ifdef DFSAN_RUNTIME_VMA
__dfsan::vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
- if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42) {
+ if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
+ __dfsan::vmaSize == 48) {
__dfsan_shadow_ptr_mask = ShadowMask();
} else {
Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39 and 42\n", __dfsan::vmaSize);
+ Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
Die();
}
#endif
diff --git a/lib/dfsan/dfsan.h b/lib/dfsan/dfsan.h
index 81f949e3019e..33145deef12e 100644
--- a/lib/dfsan/dfsan.h
+++ b/lib/dfsan/dfsan.h
@@ -18,6 +18,9 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "dfsan_platform.h"
+using __sanitizer::uptr;
+using __sanitizer::u16;
+
// Copy declarations from public sanitizer/dfsan_interface.h header here.
typedef u16 dfsan_label;
diff --git a/lib/dfsan/dfsan_interceptors.cc b/lib/dfsan/dfsan_interceptors.cc
index 8b7d64e25a39..5ecbb43e7c46 100644
--- a/lib/dfsan/dfsan_interceptors.cc
+++ b/lib/dfsan/dfsan_interceptors.cc
@@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
+using namespace __sanitizer;
+
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
diff --git a/lib/dfsan/dfsan_platform.h b/lib/dfsan/dfsan_platform.h
index f1d9f108e908..98284bafd4ff 100644
--- a/lib/dfsan/dfsan_platform.h
+++ b/lib/dfsan/dfsan_platform.h
@@ -46,6 +46,13 @@ struct Mapping42 {
static const uptr kShadowMask = ~0x3c000000000;
};
+struct Mapping48 {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x8000000000;
+ static const uptr kAppAddr = 0xffff00008000;
+ static const uptr kShadowMask = ~0xfffff0000000;
+};
+
extern int vmaSize;
# define DFSAN_RUNTIME_VMA 1
#else
@@ -72,11 +79,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MappingImpl<Mapping39, Type>();
- else
- return MappingImpl<Mapping42, Type>();
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
DCHECK(0);
+ return 0;
#else
return MappingImpl<Mapping, Type>();
#endif
diff --git a/lib/dfsan/done_abilist.txt b/lib/dfsan/done_abilist.txt
index 7ca8aeba32fe..a00dc5426cd0 100644
--- a/lib/dfsan/done_abilist.txt
+++ b/lib/dfsan/done_abilist.txt
@@ -266,6 +266,14 @@ fun:reflect.makeFuncStub=discard
# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp
fun:__sanitizer_cov_trace_cmp=custom
fun:__sanitizer_cov_trace_cmp=uninstrumented
+fun:__sanitizer_cov_trace_cmp1=custom
+fun:__sanitizer_cov_trace_cmp1=uninstrumented
+fun:__sanitizer_cov_trace_cmp2=custom
+fun:__sanitizer_cov_trace_cmp2=uninstrumented
+fun:__sanitizer_cov_trace_cmp4=custom
+fun:__sanitizer_cov_trace_cmp4=uninstrumented
+fun:__sanitizer_cov_trace_cmp8=custom
+fun:__sanitizer_cov_trace_cmp8=uninstrumented
# Similar for __sanitizer_cov_trace_switch
fun:__sanitizer_cov_trace_switch=custom
fun:__sanitizer_cov_trace_switch=uninstrumented
diff --git a/lib/esan/CMakeLists.txt b/lib/esan/CMakeLists.txt
index 2a0a71b2e348..2012ab642bf1 100644
--- a/lib/esan/CMakeLists.txt
+++ b/lib/esan/CMakeLists.txt
@@ -1,7 +1,6 @@
# Build for the EfficiencySanitizer runtime support library.
-add_custom_target(esan)
-set_target_properties(esan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(esan)
set(ESAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF ESAN_RTL_CFLAGS)
@@ -36,8 +35,6 @@ foreach (arch ${ESAN_SUPPORTED_ARCH})
clang_rt.esan-${arch}-symbols)
endforeach()
-add_dependencies(compiler-rt esan)
-
if (COMPILER_RT_INCLUDE_TESTS)
# TODO(bruening): add tests via add_subdirectory(tests)
endif()
diff --git a/lib/esan/cache_frag.cpp b/lib/esan/cache_frag.cpp
index a3e612daceb1..5fa5c7d54244 100644
--- a/lib/esan/cache_frag.cpp
+++ b/lib/esan/cache_frag.cpp
@@ -94,8 +94,8 @@ static void reportStructCounter(StructHashMap::Handle &Handle) {
type = "struct";
start = &Struct->StructName[7];
}
- // Remove the suffixes with '#' during print.
- end = strchr(start, '#');
+ // Remove the suffixes with '$' during print.
+ end = strchr(start, '$');
CHECK(end != nullptr);
Report(" %s %.*s\n", type, end - start, start);
Report(" size = %u, count = %llu, ratio = %llu, array access = %llu\n",
diff --git a/lib/esan/esan.cpp b/lib/esan/esan.cpp
index 2fb77894d4fb..09b530b6645f 100644
--- a/lib/esan/esan.cpp
+++ b/lib/esan/esan.cpp
@@ -141,9 +141,17 @@ static bool verifyShadowScheme() {
}
#endif
+uptr VmaSize;
+
static void initializeShadow() {
verifyAddressSpace();
+ // This is based on the assumption that the intial stack is always allocated
+ // in the topmost segment of the user address space and the assumption
+ // holds true on all the platforms currently supported.
+ VmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+
DCHECK(verifyShadowScheme());
Mapping.initialize(ShadowScale[__esan_which_tool]);
diff --git a/lib/esan/esan.h b/lib/esan/esan.h
index 5a0dde627888..e73b21e56ff1 100644
--- a/lib/esan/esan.h
+++ b/lib/esan/esan.h
@@ -34,6 +34,7 @@ namespace __esan {
extern bool EsanIsInitialized;
extern bool EsanDuringInit;
+extern uptr VmaSize;
void initializeLibrary(ToolType Tool);
int finalizeLibrary();
diff --git a/lib/esan/esan_flags.cpp b/lib/esan/esan_flags.cpp
index 3b047e28be22..c90bf2493f7b 100644
--- a/lib/esan/esan_flags.cpp
+++ b/lib/esan/esan_flags.cpp
@@ -17,6 +17,8 @@
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
+using namespace __sanitizer;
+
namespace __esan {
static const char EsanOptsEnv[] = "ESAN_OPTIONS";
diff --git a/lib/esan/esan_hashtable.h b/lib/esan/esan_hashtable.h
new file mode 100644
index 000000000000..7bd829740400
--- /dev/null
+++ b/lib/esan/esan_hashtable.h
@@ -0,0 +1,381 @@
+//===-- esan_hashtable.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Generic resizing hashtable.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include <stddef.h>
+
+namespace __esan {
+
+//===----------------------------------------------------------------------===//
+// Default hash and comparison functions
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct DefaultHash {
+ size_t operator()(const T &Key) const {
+ return (size_t)Key;
+ }
+};
+
+template <typename T> struct DefaultEqual {
+ bool operator()(const T &Key1, const T &Key2) const {
+ return Key1 == Key2;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// HashTable declaration
+//===----------------------------------------------------------------------===//
+
+// A simple resizing and mutex-locked hashtable.
+//
+// If the default hash functor is used, KeyTy must have an operator size_t().
+// If the default comparison functor is used, KeyTy must have an operator ==.
+//
+// By default all operations are internally-synchronized with a mutex, with no
+// synchronization for payloads once hashtable functions return. If
+// ExternalLock is set to true, the caller should call the lock() and unlock()
+// routines around all hashtable operations and subsequent manipulation of
+// payloads.
+template <typename KeyTy, typename DataTy, bool ExternalLock = false,
+ typename HashFuncTy = DefaultHash<KeyTy>,
+ typename EqualFuncTy = DefaultEqual<KeyTy> >
+class HashTable {
+public:
+ // InitialCapacity must be a power of 2.
+ // ResizeFactor must be between 1 and 99 and indicates the
+ // maximum percentage full that the table should ever be.
+ HashTable(u32 InitialCapacity = 2048, u32 ResizeFactor = 70);
+ ~HashTable();
+ bool lookup(const KeyTy &Key, DataTy &Payload); // Const except for Mutex.
+ bool add(const KeyTy &Key, const DataTy &Payload);
+ bool remove(const KeyTy &Key);
+ u32 size(); // Const except for Mutex.
+ // If the table is internally-synchronized, this lock must not be held
+ // while a hashtable function is called as it will deadlock: the lock
+ // is not recursive. This is meant for use with externally-synchronized
+ // tables or with an iterator.
+ void lock();
+ void unlock();
+
+private:
+ struct HashEntry {
+ KeyTy Key;
+ DataTy Payload;
+ HashEntry *Next;
+ };
+
+public:
+ struct HashPair {
+ HashPair(KeyTy Key, DataTy Data) : Key(Key), Data(Data) {}
+ KeyTy Key;
+ DataTy Data;
+ };
+
+ // This iterator does not perform any synchronization.
+ // It expects the caller to lock the table across the whole iteration.
+ // Calling HashTable functions while using the iterator is not supported.
+ // The iterator returns copies of the keys and data.
+ class iterator {
+ public:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table);
+ iterator(const iterator &Src) = default;
+ iterator &operator=(const iterator &Src) = default;
+ HashPair operator*();
+ iterator &operator++();
+ iterator &operator++(int);
+ bool operator==(const iterator &Cmp) const;
+ bool operator!=(const iterator &Cmp) const;
+
+ private:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx);
+ friend HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table;
+ int Idx;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashEntry
+ *Entry;
+ };
+
+ // No erase or insert iterator supported
+ iterator begin();
+ iterator end();
+
+private:
+ void resize();
+
+ HashEntry **Table;
+ u32 Capacity;
+ u32 Entries;
+ const u32 ResizeFactor;
+ BlockingMutex Mutex;
+ const HashFuncTy HashFunc;
+ const EqualFuncTy EqualFunc;
+};
+
+//===----------------------------------------------------------------------===//
+// Hashtable implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashTable(
+ u32 InitialCapacity, u32 ResizeFactor)
+ : Capacity(InitialCapacity), Entries(0), ResizeFactor(ResizeFactor),
+ HashFunc(HashFuncTy()), EqualFunc(EqualFuncTy()) {
+ CHECK(IsPowerOfTwo(Capacity));
+ CHECK(ResizeFactor >= 1 && ResizeFactor <= 99);
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::~HashTable() {
+ for (u32 i = 0; i < Capacity; ++i) {
+ HashEntry *Entry = Table[i];
+ while (Entry != nullptr) {
+ HashEntry *Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ Entry = Next;
+ }
+ }
+ InternalFree(Table);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+u32 HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::size() {
+ u32 Res;
+ if (!ExternalLock)
+ Mutex.Lock();
+ Res = Entries;
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Res;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lookup(
+ const KeyTy &Key, DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Payload = Entry->Payload;
+ Found = true;
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::resize() {
+ if (!ExternalLock)
+ Mutex.CheckLocked();
+ size_t OldCapacity = Capacity;
+ HashEntry **OldTable = Table;
+ Capacity *= 2;
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+ // Re-hash
+ for (u32 i = 0; i < OldCapacity; ++i) {
+ HashEntry *OldEntry = OldTable[i];
+ while (OldEntry != nullptr) {
+ HashEntry *Next = OldEntry->Next;
+ size_t Hash = HashFunc(OldEntry->Key) % Capacity;
+ OldEntry->Next = Table[Hash];
+ Table[Hash] = OldEntry;
+ OldEntry = Next;
+ }
+ }
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::add(
+ const KeyTy &Key, const DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Exists = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Exists = true;
+ break;
+ }
+ }
+ if (!Exists) {
+ Entries++;
+ if (Entries * 100 >= Capacity * ResizeFactor) {
+ resize();
+ Hash = HashFunc(Key) % Capacity;
+ }
+ HashEntry *Add = (HashEntry *)InternalAlloc(sizeof(*Add));
+ Add->Key = Key;
+ Add->Payload = Payload;
+ Add->Next = Table[Hash];
+ Table[Hash] = Add;
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return !Exists;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::remove(
+ const KeyTy &Key) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ HashEntry *Prev = nullptr;
+ for (; Entry != nullptr; Prev = Entry, Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Found = true;
+ Entries--;
+ if (Prev == nullptr)
+ Table[Hash] = Entry->Next;
+ else
+ Prev->Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lock() {
+ Mutex.Lock();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::unlock() {
+ Mutex.Unlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Iterator implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table)
+ : Table(Table), Idx(-1), Entry(nullptr) {
+ operator++();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx)
+ : Table(Table), Idx(Idx), Entry(nullptr) {
+ CHECK(Idx >= (int)Table->Capacity); // Only used to create end().
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::HashPair
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator*() {
+ CHECK(Idx >= 0 && Idx < (int)Table->Capacity);
+ CHECK(Entry != nullptr);
+ return HashPair(Entry->Key, Entry->Payload);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++() {
+ if (Entry != nullptr)
+ Entry = Entry->Next;
+ while (Entry == nullptr) {
+ ++Idx;
+ if (Idx >= (int)Table->Capacity)
+ break; // At end().
+ Entry = Table->Table[Idx];
+ }
+ return *this;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++(int) {
+ iterator Temp(*this);
+ operator++();
+ return Temp;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator==(const iterator &Cmp) const {
+ return Cmp.Table == Table && Cmp.Idx == Idx && Cmp.Entry == Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator!=(const iterator &Cmp) const {
+ return Cmp.Table != Table || Cmp.Idx != Idx || Cmp.Entry != Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::begin() {
+ return iterator(this);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::end() {
+ return iterator(this, Capacity);
+}
+
+} // namespace __esan
diff --git a/lib/esan/esan_interceptors.cpp b/lib/esan/esan_interceptors.cpp
index 647f010852b0..9ae5482a3cad 100644
--- a/lib/esan/esan_interceptors.cpp
+++ b/lib/esan/esan_interceptors.cpp
@@ -461,28 +461,35 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
// Malloc interceptors
//===----------------------------------------------------------------------===//
-static char early_alloc_buf[128];
-static bool used_early_alloc_buf;
+static const uptr early_alloc_buf_size = 4096;
+static uptr allocated_bytes;
+static char early_alloc_buf[early_alloc_buf_size];
+
+static bool isInEarlyAllocBuf(const void *ptr) {
+ return ((uptr)ptr >= (uptr)early_alloc_buf &&
+ ((uptr)ptr - (uptr)early_alloc_buf) < sizeof(early_alloc_buf));
+}
static void *handleEarlyAlloc(uptr size) {
// If esan is initialized during an interceptor (which happens with some
// tcmalloc implementations that call pthread_mutex_lock), the call from
- // dlsym to calloc will deadlock. There is only one such calloc (dlsym
- // allocates a single pthread key), so we work around it by using a
- // static buffer for the calloc request. The loader currently needs
- // 32 bytes but we size at 128 to allow for future changes.
+ // dlsym to calloc will deadlock.
+ // dlsym may also call malloc before REAL(malloc) is retrieved from dlsym.
+ // We work around it by using a static buffer for the early malloc/calloc
+ // requests.
// This solution will also allow us to deliberately intercept malloc & family
// in the future (to perform tool actions on each allocation, without
// replacing the allocator), as it also solves the problem of intercepting
// calloc when it will itself be called before its REAL pointer is
// initialized.
- CHECK(!used_early_alloc_buf && size < sizeof(early_alloc_buf));
// We do not handle multiple threads here. This only happens at process init
// time, and while it's possible for a shared library to create early threads
// that race here, we consider that to be a corner case extreme enough that
// it's not worth the effort to handle.
- used_early_alloc_buf = true;
- return (void *)early_alloc_buf;
+ void *mem = (void *)&early_alloc_buf[allocated_bytes];
+ allocated_bytes += size;
+ CHECK_LT(allocated_bytes, early_alloc_buf_size);
+ return mem;
}
INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@@ -496,14 +503,20 @@ INTERCEPTOR(void*, calloc, uptr size, uptr n) {
return res;
}
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (EsanDuringInit && REAL(malloc) == nullptr)
+ return handleEarlyAlloc(size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, malloc, size);
+ return REAL(malloc)(size);
+}
+
INTERCEPTOR(void, free, void *p) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, free, p);
- if (p == (void *)early_alloc_buf) {
- // We expect just a singleton use but we clear this for cleanliness.
- used_early_alloc_buf = false;
+ // There are only a few early allocation requests, so we simply skip the free.
+ if (isInEarlyAllocBuf(p))
return;
- }
+ COMMON_INTERCEPTOR_ENTER(ctx, free, p);
REAL(free)(p);
}
@@ -534,6 +547,7 @@ void initializeInterceptors() {
ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK;
INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
// TODO(bruening): intercept routines that other sanitizers intercept that
diff --git a/lib/esan/esan_interface_internal.h b/lib/esan/esan_interface_internal.h
index 3b915d03e07a..df51aa609aff 100644
--- a/lib/esan/esan_interface_internal.h
+++ b/lib/esan/esan_interface_internal.h
@@ -21,6 +21,9 @@
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __esan_.
+using __sanitizer::uptr;
+using __sanitizer::u32;
+
extern "C" {
// This should be kept consistent with LLVM's EfficiencySanitizerOptions.
diff --git a/lib/esan/esan_linux.cpp b/lib/esan/esan_linux.cpp
index aa961b66116b..014205ce01eb 100644
--- a/lib/esan/esan_linux.cpp
+++ b/lib/esan/esan_linux.cpp
@@ -25,7 +25,7 @@
namespace __esan {
void verifyAddressSpace() {
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
// The kernel determines its mmap base from the stack size limit.
// Our Linux 64-bit shadow mapping assumes the stack limit is less than a
// terabyte, which keeps the mmap region above 0x7e00'.
diff --git a/lib/esan/esan_shadow.h b/lib/esan/esan_shadow.h
index f8f154ef7cca..72a919a190d8 100644
--- a/lib/esan/esan_shadow.h
+++ b/lib/esan/esan_shadow.h
@@ -15,6 +15,7 @@
#ifndef ESAN_SHADOW_H
#define ESAN_SHADOW_H
+#include "esan.h"
#include <sanitizer_common/sanitizer_platform.h>
#if SANITIZER_WORDSIZE != 64
@@ -23,6 +24,12 @@
namespace __esan {
+struct ApplicationRegion {
+ uptr Start;
+ uptr End;
+ bool ShadowMergedWithPrev;
+};
+
#if SANITIZER_LINUX && defined(__x86_64__)
// Linux x86_64
//
@@ -89,12 +96,6 @@ namespace __esan {
// [0x000015ff'ff601000, 0x00001600'00000000]
// [0x000015ff'ff600000, 0x000015ff'ff601000]
-struct ApplicationRegion {
- uptr Start;
- uptr End;
- bool ShadowMergedWithPrev;
-};
-
static const struct ApplicationRegion AppRegions[] = {
{0x0000000000000000ull, 0x0000010000000000u, false},
{0x0000550000000000u, 0x0000570000000000u, false},
@@ -105,6 +106,52 @@ static const struct ApplicationRegion AppRegions[] = {
{0x00007fffff601000u, 0x0000800000000000u, true},
{0xffffffffff600000u, 0xffffffffff601000u, true},
};
+
+#elif SANITIZER_LINUX && SANITIZER_MIPS64
+
+// Application memory falls into these 3 regions
+//
+// [0x00000001'00000000, 0x00000002'00000000) non-PIE + heap
+// [0x000000aa'00000000, 0x000000ab'00000000) PIE
+// [0x000000ff'00000000, 0x000000ff'ffffffff) libraries + stack
+//
+// This formula translates from application memory to shadow memory:
+//
+// shadow(app) = ((app & 0x00000f'ffffffff) + offset) >> scale
+//
+// Where the offset for 1:1 is 0x000013'00000000. For other scales, the
+// offset is shifted left by the scale, except for scales of 1 and 2 where
+// it must be tweaked in order to pass the double-shadow test
+// (see the "shadow(shadow)" comments below):
+// scale == 0: 0x000013'00000000
+// scale == 1: 0x000022'00000000
+// scale == 2: 0x000044'00000000
+// scale >= 3: (0x000013'00000000 << scale)
+//
+// The resulting shadow memory regions for a 0 scaling are:
+//
+// [0x00000014'00000000, 0x00000015'00000000)
+// [0x0000001d'00000000, 0x0000001e'00000000)
+// [0x00000022'00000000, 0x00000022'ffffffff)
+//
+// We also want to ensure that a wild access by the application into the shadow
+// regions will not corrupt our own shadow memory. shadow(shadow) ends up
+// disjoint from shadow(app):
+//
+// [0x00000017'00000000, 0x00000018'00000000)
+// [0x00000020'00000000, 0x00000021'00000000)
+// [0x00000015'00000000, 0x00000015'ffffffff]
+
+static const struct ApplicationRegion AppRegions[] = {
+ {0x0100000000u, 0x0200000000u, false},
+ {0xaa00000000u, 0xab00000000u, false},
+ {0xff00000000u, 0xffffffffffu, false},
+};
+
+#else
+#error Platform not supported
+#endif
+
static const u32 NumAppRegions = sizeof(AppRegions)/sizeof(AppRegions[0]);
// See the comment above: we do not currently support a stack size rlimit
@@ -113,29 +160,59 @@ static const uptr MaxStackSize = (1ULL << 40) - 4096;
class ShadowMapping {
public:
- static const uptr Mask = 0x00000fffffffffffu;
+
// The scale and offset vary by tool.
uptr Scale;
uptr Offset;
+
+ // TODO(sagar.thakur): Try to hardcode the mask as done in the compiler
+ // instrumentation to reduce the runtime cost of appToShadow.
+ struct ShadowMemoryMask40 {
+ static const uptr Mask = 0x0000000fffffffffu;
+ };
+
+ struct ShadowMemoryMask47 {
+ static const uptr Mask = 0x00000fffffffffffu;
+ };
+
void initialize(uptr ShadowScale) {
- static const uptr OffsetArray[3] = {
- 0x0000130000000000u,
- 0x0000220000000000u,
- 0x0000440000000000u,
+
+ const uptr OffsetArray40[3] = {
+ 0x0000001300000000u,
+ 0x0000002200000000u,
+ 0x0000004400000000u,
};
+
+ const uptr OffsetArray47[3] = {
+ 0x0000130000000000u,
+ 0x0000220000000000u,
+ 0x0000440000000000u,
+ };
+
Scale = ShadowScale;
- if (Scale <= 2)
- Offset = OffsetArray[Scale];
- else
- Offset = OffsetArray[0] << Scale;
+ switch (VmaSize) {
+ case 40: {
+ if (Scale <= 2)
+ Offset = OffsetArray40[Scale];
+ else
+ Offset = OffsetArray40[0] << Scale;
+ }
+ break;
+ case 47: {
+ if (Scale <= 2)
+ Offset = OffsetArray47[Scale];
+ else
+ Offset = OffsetArray47[0] << Scale;
+ }
+ break;
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
+ Die();
+ }
+ }
}
};
extern ShadowMapping Mapping;
-#else
-// We'll want to use templatized functions over the ShadowMapping once
-// we support more platforms.
-#error Platform not supported
-#endif
static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
if (i >= NumAppRegions)
@@ -154,9 +231,21 @@ bool isAppMem(uptr Mem) {
return false;
}
+template<typename Params>
+uptr appToShadowImpl(uptr App) {
+ return (((App & Params::Mask) + Mapping.Offset) >> Mapping.Scale);
+}
+
ALWAYS_INLINE
uptr appToShadow(uptr App) {
- return (((App & ShadowMapping::Mask) + Mapping.Offset) >> Mapping.Scale);
+ switch (VmaSize) {
+ case 40: return appToShadowImpl<ShadowMapping::ShadowMemoryMask40>(App);
+ case 47: return appToShadowImpl<ShadowMapping::ShadowMemoryMask47>(App);
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
+ Die();
+ }
+ }
}
static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {
diff --git a/lib/interception/interception.h b/lib/interception/interception.h
index 9e9aca215c4d..d79fa67babfa 100644
--- a/lib/interception/interception.h
+++ b/lib/interception/interception.h
@@ -92,8 +92,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
// Just a pair of pointers.
struct interpose_substitution {
- const uptr replacement;
- const uptr original;
+ const __sanitizer::uptr replacement;
+ const __sanitizer::uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
@@ -158,10 +158,12 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
+# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
+# define ASSIGN_REAL(x, y)
#endif // __APPLE__
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc
index 8977d59ac4f1..91abecf6de5f 100644
--- a/lib/interception/interception_win.cc
+++ b/lib/interception/interception_win.cc
@@ -148,10 +148,16 @@ static void InterceptionFailed() {
}
static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
if (from < target)
return target - from <= (uptr)0x7FFFFFFFU;
else
return from - target <= (uptr)0x80000000U;
+#else
+ // In a 32-bit address space, the address calculation will wrap, so this check
+ // is unnecessary.
+ return true;
+#endif
}
static uptr GetMmapGranularity() {
@@ -167,6 +173,21 @@ static uptr RoundUpTo(uptr size, uptr boundary) {
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
+static size_t _strlen(const char *str) {
+ const char* p = str;
+ while (*p != '\0') ++p;
+ return p - str;
+}
+
+static char* _strchr(char* str, char c) {
+ while (*str) {
+ if (*str == c)
+ return str;
+ ++str;
+ }
+ return nullptr;
+}
+
static void _memset(void *p, int value, size_t sz) {
for (size_t i = 0; i < sz; ++i)
((char*)p)[i] = (char)value;
@@ -229,10 +250,6 @@ static void WritePadding(uptr from, uptr size) {
_memset((void*)from, 0xCC, (size_t)size);
}
-static void CopyInstructions(uptr from, uptr to, uptr size) {
- _memcpy((void*)from, (void*)to, (size_t)size);
-}
-
static void WriteJumpInstruction(uptr from, uptr target) {
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
InterceptionFailed();
@@ -294,7 +311,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
-static const uptr kTrampolineScanLimitRange = 1 << 30; // 1 gig
+static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
@@ -384,7 +401,7 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
}
// Returns 0 on error.
-static size_t GetInstructionSize(uptr address) {
+static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@@ -410,7 +427,6 @@ static size_t GetInstructionSize(uptr address) {
case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
- case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
return 5;
// Cannot overwrite control-instruction. Return 0 to indicate failure.
@@ -452,7 +468,18 @@ static size_t GetInstructionSize(uptr address) {
return 0;
}
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
+ return 7;
+ }
+
#if SANITIZER_WINDOWS64
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX XX XX XX XX :
+ // movabs eax, dword ptr ds:[XXXXXXXX]
+ return 8;
+ }
+
switch (*(u16*)address) {
case 0x5040: // push rax
case 0x5140: // push rcx
@@ -477,17 +504,20 @@ static size_t GetInstructionSize(uptr address) {
case 0xd9f748: // 48 f7 d9 : neg rcx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ case 0xc98548: // 48 85 C9 : test rcx, rcx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
+ case 0xdb3345: // 45 33 DB : xor r11d, r11d
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
+ case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
- case 0xdc8b4c: // 4c 8b dc : mov r11,rsp
+ case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
return 3;
@@ -496,11 +526,22 @@ static size_t GetInstructionSize(uptr address) {
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
return 4;
+ case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
+ return 7;
+
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
+
+ // Instructions having offset relative to 'rip' need offset adjustment.
+ if (rel_offset)
+ *rel_offset = 3;
return 7;
+
+ case 0x2444c7: // C7 44 24 XX YY YY YY YY
+ // mov dword ptr [rsp + XX], YYYYYYYY
+ return 8;
}
switch (*(u32*)(address)) {
@@ -513,6 +554,10 @@ static size_t GetInstructionSize(uptr address) {
#else
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
+ return 5;
+ }
switch (*(u16*)address) {
case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
@@ -566,6 +611,28 @@ static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
return cursor;
}
+static bool CopyInstructions(uptr to, uptr from, size_t size) {
+ size_t cursor = 0;
+ while (cursor != size) {
+ size_t rel_offset = 0;
+ size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ (size_t)instruction_size);
+ if (rel_offset) {
+ uptr delta = to - from;
+ uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
+#if SANITIZER_WINDOWS64
+ if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+ return false;
+#endif
+ *(u32*)(to + cursor + rel_offset) = relocated_offset;
+ }
+ cursor += instruction_size;
+ }
+ return true;
+}
+
+
#if !SANITIZER_WINDOWS64
bool OverrideFunctionWithDetour(
uptr old_func, uptr new_func, uptr *orig_old_func) {
@@ -656,7 +723,8 @@ bool OverrideFunctionWithHotPatch(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- CopyInstructions(trampoline, old_func, instruction_size);
+ if (!CopyInstructions(trampoline, old_func, instruction_size))
+ return false;
WriteDirectBranch(trampoline + instruction_size,
old_func + instruction_size);
*orig_old_func = trampoline;
@@ -705,7 +773,8 @@ bool OverrideFunctionWithTrampoline(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- CopyInstructions(trampoline, old_func, instructions_length);
+ if (!CopyInstructions(trampoline, old_func, instructions_length))
+ return false;
WriteDirectBranch(trampoline + instructions_length,
old_func + instructions_length);
*orig_old_func = trampoline;
@@ -820,6 +889,32 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
if (!strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
+
+ // Handle forwarded functions.
+ DWORD offset = functions[index];
+ if (offset >= export_directory->VirtualAddress &&
+ offset < export_directory->VirtualAddress + export_directory->Size) {
+ // An entry for a forwarded function is a string with the following
+ // format: "<module> . <function_name>" that is stored into the
+ // exported directory.
+ char function_name[256];
+ size_t funtion_name_length = _strlen(func);
+ if (funtion_name_length >= sizeof(function_name) - 1)
+ InterceptionFailed();
+
+ _memcpy(function_name, func, funtion_name_length);
+ function_name[funtion_name_length] = '\0';
+ char* separator = _strchr(function_name, '.');
+ if (!separator)
+ InterceptionFailed();
+ *separator = '\0';
+
+ void* redirected_module = GetModuleHandleA(function_name);
+ if (!redirected_module)
+ InterceptionFailed();
+ return InternalGetProcAddress(redirected_module, separator + 1);
+ }
+
return (uptr)(char *)func;
}
}
@@ -827,19 +922,18 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
return 0;
}
-static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
- *func_addr = 0;
+bool OverrideFunction(
+ const char *func_name, uptr new_func, uptr *orig_old_func) {
+ bool hooked = false;
void **DLLs = InterestingDLLsAvailable();
- for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
- *func_addr = InternalGetProcAddress(DLLs[i], func_name);
- return (*func_addr != 0);
-}
-
-bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
- uptr orig_func;
- if (!GetFunctionAddressInDLLs(name, &orig_func))
- return false;
- return OverrideFunction(orig_func, new_func, orig_old_func);
+ for (size_t i = 0; DLLs[i]; ++i) {
+ uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
+ if (func_addr &&
+ OverrideFunction(func_addr, new_func, orig_old_func)) {
+ hooked = true;
+ }
+ }
+ return hooked;
}
bool OverrideImportedFunction(const char *module_to_patch,
diff --git a/lib/interception/tests/CMakeLists.txt b/lib/interception/tests/CMakeLists.txt
index bfe41fed2fed..5ea943f9a82a 100644
--- a/lib/interception/tests/CMakeLists.txt
+++ b/lib/interception/tests/CMakeLists.txt
@@ -29,6 +29,7 @@ else()
endif()
if(MSVC)
list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gcodeview)
+ list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -Wl,-largeaddressaware)
endif()
list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -g)
diff --git a/lib/interception/tests/interception_linux_test.cc b/lib/interception/tests/interception_linux_test.cc
index 4a1ae785d16f..cc09aa09df3a 100644
--- a/lib/interception/tests/interception_linux_test.cc
+++ b/lib/interception/tests/interception_linux_test.cc
@@ -11,6 +11,10 @@
// Tests for interception_linux.h.
//
//===----------------------------------------------------------------------===//
+
+// Do not declare isdigit in ctype.h.
+#define __NO_CTYPE
+
#include "interception/interception.h"
#include "gtest/gtest.h"
@@ -31,10 +35,9 @@ INTERCEPTOR(int, isdigit, int d) {
namespace __interception {
TEST(Interception, GetRealFunctionAddress) {
- uptr expected_malloc_address = (uptr)(void*)&malloc;
uptr malloc_address = 0;
EXPECT_TRUE(GetRealFunctionAddress("malloc", &malloc_address, 0, 0));
- EXPECT_EQ(expected_malloc_address, malloc_address);
+ EXPECT_NE(0U, malloc_address);
uptr dummy_address = 0;
EXPECT_TRUE(
diff --git a/lib/interception/tests/interception_win_test.cc b/lib/interception/tests/interception_win_test.cc
index 611354f03d12..684ee0303559 100644
--- a/lib/interception/tests/interception_win_test.cc
+++ b/lib/interception/tests/interception_win_test.cc
@@ -163,6 +163,13 @@ const u8 kPatchableCode4[] = {
0x90, 0x90, 0x90, 0x90,
};
+const u8 kPatchableCode5[] = {
+ 0x55, // push ebp
+ 0x8b, 0xec, // mov ebp,esp
+ 0x8d, 0xa4, 0x24, 0x30, 0xfd, 0xff, 0xff, // lea esp,[esp-2D0h]
+ 0x54, // push esp
+};
+
const u8 kUnpatchableCode1[] = {
0xC3, // ret
};
@@ -197,7 +204,29 @@ const u8 kUnpatchableCode6[] = {
// A buffer holding the dynamically generated code under test.
u8* ActiveCode;
-size_t ActiveCodeLength = 4096;
+const size_t ActiveCodeLength = 4096;
+
+int InterceptorFunction(int x);
+
+/// Allocate code memory more than 2GB away from Base.
+u8 *AllocateCode2GBAway(u8 *Base) {
+ // Find a 64K aligned location after Base plus 2GB.
+ size_t TwoGB = 0x80000000;
+ size_t AllocGranularity = 0x10000;
+ Base = (u8 *)((((uptr)Base + TwoGB + AllocGranularity)) & ~(AllocGranularity - 1));
+
+ // Check if that location is free, and if not, loop over regions until we find
+ // one that is.
+ MEMORY_BASIC_INFORMATION mbi = {};
+ while (sizeof(mbi) == VirtualQuery(Base, &mbi, sizeof(mbi))) {
+ if (mbi.State & MEM_FREE) break;
+ Base += mbi.RegionSize;
+ }
+
+ // Allocate one RWX page at the free location.
+ return (u8 *)::VirtualAlloc(Base, ActiveCodeLength, MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+}
template<class T>
static void LoadActiveCode(
@@ -205,11 +234,8 @@ static void LoadActiveCode(
uptr *entry_point,
FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
if (ActiveCode == nullptr) {
- ActiveCode =
- (u8*)::VirtualAlloc(nullptr, ActiveCodeLength,
- MEM_COMMIT | MEM_RESERVE,
- PAGE_EXECUTE_READWRITE);
- ASSERT_NE(ActiveCode, nullptr);
+ ActiveCode = AllocateCode2GBAway((u8*)&InterceptorFunction);
+ ASSERT_NE(ActiveCode, nullptr) << "failed to allocate RWX memory 2GB away";
}
size_t position = 0;
@@ -474,6 +500,7 @@ TEST(Interception, PatchableFunction) {
EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override));
#endif
EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode5, override));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt
index 9412c7a42c2f..73e475d2fdba 100644
--- a/lib/lsan/CMakeLists.txt
+++ b/lib/lsan/CMakeLists.txt
@@ -16,9 +16,6 @@ set(LSAN_SOURCES
set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-add_custom_target(lsan)
-set_target_properties(lsan PROPERTIES FOLDER "Compiler-RT Misc")
-
add_compiler_rt_object_libraries(RTLSanCommon
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${LSAN_COMMON_SUPPORTED_ARCH}
@@ -27,6 +24,8 @@ add_compiler_rt_object_libraries(RTLSanCommon
if(COMPILER_RT_HAS_LSAN)
foreach(arch ${LSAN_SUPPORTED_ARCH})
+ add_compiler_rt_component(lsan)
+
add_compiler_rt_runtime(clang_rt.lsan
STATIC
ARCHS ${arch}
@@ -39,5 +38,3 @@ if(COMPILER_RT_HAS_LSAN)
PARENT_TARGET lsan)
endforeach()
endif()
-
-add_dependencies(compiler-rt lsan)
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index a5220f1a34b1..1f6efc0f8d93 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -43,10 +43,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
- sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
@@ -57,7 +64,9 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index 888a25b206c8..b20941ef23f6 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -32,6 +32,7 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
+__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
void DisableInThisThread() { disable_counter++; }
@@ -449,6 +450,8 @@ static bool CheckForLeaks() {
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index 1f5430395b88..f6154d8b97d1 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -71,7 +71,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
- CHECK_LT(allocator_end, end);
+ CHECK_LE(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc
index 8bd6d90edc92..5dff4f748106 100644
--- a/lib/lsan/lsan_thread.cc
+++ b/lib/lsan/lsan_thread.cc
@@ -36,7 +36,7 @@ static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
- static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
+ static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt
index e7f2877d1b2a..598ae54588c1 100644
--- a/lib/msan/CMakeLists.txt
+++ b/lib/msan/CMakeLists.txt
@@ -25,8 +25,7 @@ append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding MSAN_RTL_CFLAGS
set(MSAN_RUNTIME_LIBRARIES)
# Static runtime library.
-add_custom_target(msan)
-set_target_properties(msan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(msan)
foreach(arch ${MSAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.msan
@@ -61,7 +60,6 @@ foreach(arch ${MSAN_SUPPORTED_ARCH})
endforeach()
add_compiler_rt_resource_file(msan_blacklist msan_blacklist.txt msan)
-add_dependencies(compiler-rt msan)
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index 1f2ff59ca686..0709260eebe2 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -42,27 +42,43 @@ struct MappingDesc {
#if SANITIZER_LINUX && defined(__mips64)
-// Everything is above 0x00e000000000.
+// MIPS64 maps:
+// - 0x0000000000-0x0200000000: Program own segments
+// - 0xa200000000-0xc000000000: PIE program segments
+// - 0xe200000000-0xffffffffff: libraries segments.
const MappingDesc kMemoryLayout[] = {
- {0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
- {0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
- {0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
-
-#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
+ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"},
+ {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"},
+ {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL)
#elif SANITIZER_LINUX && defined(__aarch64__)
-// The mapping describes both 39-bits and 42-bits. AArch64 maps:
-// - 0x00000000000-0x00010000000: 39/42-bits program own segments
-// - 0x05500000000-0x05600000000: 39-bits PIE program segments
-// - 0x07f80000000-0x07fffffffff: 39-bits libraries segments
-// - 0x2aa00000000-0x2ab00000000: 42-bits PIE program segments
-// - 0x3ff00000000-0x3ffffffffff: 42-bits libraries segments
+// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
+// maps:
+// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
+// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
+// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
+// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
+// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
+// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
+// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
// It is fragmented in multiples segments to increase the memory available
// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
-// 39 bits).
+// 39 bits). The 48-bits segments only cover the usual PIE/default segments
+// plus some more segments (262144GB total, 0.39% total VMA).
const MappingDesc kMemoryLayout[] = {
{0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
{0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
@@ -103,6 +119,42 @@ const MappingDesc kMemoryLayout[] = {
{0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
{0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
{0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
+ // The mappings below are used only for 48-bits VMA.
+ // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
+ // segments and some more segments totalizing 262144GB of VMA (which cover
+ // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
+ // adding multiple application segments like 39 and 42 mapping.
+ {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
+ {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
+ {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
+ {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
+ {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
+ {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
+ {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
+ {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
+ {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
+ {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
+ {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
+ {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
+ {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
+ {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
+ {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
+ {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
+ {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
+ {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
};
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
@@ -277,11 +329,20 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
common_flags()->fast_unwind_on_malloc)
+// For platforms which support slow unwinder only, we restrict the store context
+// size to 1, basically only storing the current pc. We do this because the slow
+// unwinder which is based on libunwind is not async signal safe and causes
+// random freezes in forking applications as well as in signal handlers.
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
- if (__msan_get_track_origins() > 1 && msan_inited) \
- GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
- common_flags()->fast_unwind_on_malloc)
+ if (__msan_get_track_origins() > 1 && msan_inited) { \
+ if (!SANITIZER_CAN_FAST_UNWIND) \
+ GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \
+ false); \
+ else \
+ GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
+ common_flags()->fast_unwind_on_malloc); \
+ }
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index b7d394729bfc..6c389f008cf7 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -33,9 +33,12 @@ struct MsanMapUnmapCallback {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
- FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
- if (__msan_get_track_origins())
- FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
}
};
@@ -56,23 +59,32 @@ struct MsanMapUnmapCallback {
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
- static const uptr kAllocatorSize = 0x80000000000; // 8T.
- static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 8UL << 30;
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
- DefaultSizeClassMap,
- MsanMapUnmapCallback> PrimaryAllocator;
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
- static const uptr kAllocatorSpace = 0x300000000000;
- static const uptr kAllocatorSize = 0x020000000000; // 2T
- static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
- DefaultSizeClassMap,
- MsanMapUnmapCallback> PrimaryAllocator;
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
@@ -94,7 +106,9 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
- allocator.Init(common_flags()->allocator_may_return_null);
+ allocator.Init(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -112,7 +126,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -170,7 +184,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index f23d3eeb3eda..6447bb1b270e 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -45,6 +45,8 @@ using __sanitizer::atomic_uintptr_t;
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
#if SANITIZER_FREEBSD
#define __errno_location __error
@@ -64,6 +66,23 @@ bool IsInInterceptorScope() {
return in_interceptor_scope;
}
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
#define ENSURE_MSAN_INITED() do { \
CHECK(!msan_init_is_running); \
if (!msan_inited) { \
@@ -135,10 +154,6 @@ INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
return res;
}
-INTERCEPTOR(void *, memcpy, void *dest, const void *src, SIZE_T n) {
- return __msan_memcpy(dest, src, n);
-}
-
INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) {
return (char *)__msan_memcpy(dest, src, n) + n;
}
@@ -153,14 +168,6 @@ INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) {
return res;
}
-INTERCEPTOR(void *, memmove, void *dest, const void *src, SIZE_T n) {
- return __msan_memmove(dest, src, n);
-}
-
-INTERCEPTOR(void *, memset, void *s, int c, SIZE_T n) {
- return __msan_memset(s, c, n);
-}
-
INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
return __msan_memmove(dest, src, n);
}
@@ -227,14 +234,14 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
INTERCEPTOR(void, free, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (!ptr) return;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void, cfree, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (!ptr) return;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
@@ -907,27 +914,35 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!msan_inited)) {
+ if (UNLIKELY(!msan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const SIZE_T kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static SIZE_T allocated;
- SIZE_T size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
+ return AllocateFromLocalPool(nmemb * size);
return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!msan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
return MsanReallocate(&stack, ptr, size, sizeof(u64), false);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!msan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
}
@@ -1329,11 +1344,23 @@ int OnExit() {
*begin = *end = 0; \
}
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ { \
+ (void)ctx; \
+ return __msan_memset(block, c, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memmove(to, from, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memcpy(to, from, size); \
+ }
+
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Msan needs custom handling of these:
-#undef SANITIZER_INTERCEPT_MEMSET
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#undef SANITIZER_INTERCEPT_MEMCPY
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s)
@@ -1489,11 +1516,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(fread);
MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
INTERCEPT_FUNCTION(readlink);
- INTERCEPT_FUNCTION(memcpy);
INTERCEPT_FUNCTION(memccpy);
INTERCEPT_FUNCTION(mempcpy);
- INTERCEPT_FUNCTION(memset);
- INTERCEPT_FUNCTION(memmove);
INTERCEPT_FUNCTION(bcopy);
INTERCEPT_FUNCTION(wmemset);
INTERCEPT_FUNCTION(wmemcpy);
diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h
index c1e02ce72bf4..c6990db243c1 100644
--- a/lib/msan/msan_interface_internal.h
+++ b/lib/msan/msan_interface_internal.h
@@ -37,6 +37,16 @@ void __msan_warning();
SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
void __msan_warning_noreturn();
+using __sanitizer::uptr;
+using __sanitizer::sptr;
+using __sanitizer::uu64;
+using __sanitizer::uu32;
+using __sanitizer::uu16;
+using __sanitizer::u64;
+using __sanitizer::u32;
+using __sanitizer::u16;
+using __sanitizer::u8;
+
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_1(u8 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc
index d6a95889ad0f..0a687f620c94 100644
--- a/lib/msan/msan_linux.cc
+++ b/lib/msan/msan_linux.cc
@@ -66,7 +66,8 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
}
if ((uptr)addr != beg) {
uptr end = beg + size - 1;
- Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
+ Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
+ name);
return false;
}
}
diff --git a/lib/profile/CMakeLists.txt b/lib/profile/CMakeLists.txt
index ccf79d7e7267..006285b34943 100644
--- a/lib/profile/CMakeLists.txt
+++ b/lib/profile/CMakeLists.txt
@@ -38,8 +38,7 @@ int main() {
" COMPILER_RT_TARGET_HAS_FCNTL_LCK)
-add_custom_target(profile)
-set_target_properties(profile PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(profile)
set(PROFILE_SOURCES
GCDAProfiling.c
@@ -99,5 +98,3 @@ else()
SOURCES ${PROFILE_SOURCES}
PARENT_TARGET profile)
endif()
-
-add_dependencies(compiler-rt profile)
diff --git a/lib/profile/GCDAProfiling.c b/lib/profile/GCDAProfiling.c
index 2756084f5fd3..138af6ec4033 100644
--- a/lib/profile/GCDAProfiling.c
+++ b/lib/profile/GCDAProfiling.c
@@ -20,7 +20,6 @@
|*
\*===----------------------------------------------------------------------===*/
-#include "InstrProfilingInternal.h"
#include "InstrProfilingPort.h"
#include "InstrProfilingUtil.h"
@@ -35,6 +34,9 @@
#else
#include <sys/mman.h>
#include <sys/file.h>
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
#endif
#if defined(__FreeBSD__) && defined(__i386__)
diff --git a/lib/profile/InstrProfData.inc b/lib/profile/InstrProfData.inc
index 93d14ac4f6f9..f7c22d10763c 100644
--- a/lib/profile/InstrProfData.inc
+++ b/lib/profile/InstrProfData.inc
@@ -72,7 +72,7 @@
#endif
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
+ IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
Inc->getHash()->getZExtValue()))
@@ -204,7 +204,7 @@ COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
#else
COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- llvm::IndexedInstrProf::ComputeHash(NameValue)))
+ llvm::IndexedInstrProf::ComputeHash(NameValue)))
#endif
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
@@ -603,7 +603,12 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define IR_LEVEL_PROF_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
+
+/* The variable that holds the name of the profile data
+ * specified via command line. */
+#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
/* Runtime section names and name strings. */
#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
diff --git a/lib/profile/InstrProfiling.c b/lib/profile/InstrProfiling.c
index c763a44233a0..6828a3d27f34 100644
--- a/lib/profile/InstrProfiling.c
+++ b/lib/profile/InstrProfiling.c
@@ -16,15 +16,26 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "InstrProfData.inc"
-COMPILER_RT_VISIBILITY char *(*GetEnvHook)(const char *) = 0;
-COMPILER_RT_WEAK uint64_t __llvm_profile_raw_version = INSTR_PROF_RAW_VERSION;
+COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
+
+COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
}
+static unsigned ProfileDumped = 0;
+
+COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
+ return ProfileDumped;
+}
+
+COMPILER_RT_VISIBILITY void lprofSetProfileDumped() {
+ ProfileDumped = 1;
+}
+
/* Return the number of bytes needed to add to SizeInBytes to make it
* the result a multiple of 8.
*/
@@ -66,4 +77,5 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
}
}
}
+ ProfileDumped = 0;
}
diff --git a/lib/profile/InstrProfiling.h b/lib/profile/InstrProfiling.h
index b23bed8ea3a8..945f1c4ac38d 100644
--- a/lib/profile/InstrProfiling.h
+++ b/lib/profile/InstrProfiling.h
@@ -112,35 +112,43 @@ void INSTR_PROF_VALUE_PROF_FUNC(
* Writes to the file with the last name given to \a *
* __llvm_profile_set_filename(),
* or if it hasn't been called, the \c LLVM_PROFILE_FILE environment variable,
- * or if that's not set, the last name given to
- * \a __llvm_profile_override_default_filename(), or if that's not set,
- * \c "default.profraw".
+ * or if that's not set, the last name set to INSTR_PROF_PROFILE_NAME_VAR,
+ * or if that's not set, \c "default.profraw".
*/
int __llvm_profile_write_file(void);
/*!
- * \brief Set the filename for writing instrumentation data.
+ * \brief this is a wrapper interface to \c __llvm_profile_write_file.
+ * After this interface is invoked, a arleady dumped flag will be set
+ * so that profile won't be dumped again during program exit.
+ * Invocation of interface __llvm_profile_reset_counters will clear
+ * the flag. This interface is designed to be used to collect profile
+ * data from user selected hot regions. The use model is
+ * __llvm_profile_reset_counters();
+ * ... hot region 1
+ * __llvm_profile_dump();
+ * .. some other code
+ * __llvm_profile_reset_counters();
+ * ... hot region 2
+ * __llvm_profile_dump();
*
- * Sets the filename to be used for subsequent calls to
- * \a __llvm_profile_write_file().
- *
- * \c Name is not copied, so it must remain valid. Passing NULL resets the
- * filename logic to the default behaviour.
+ * It is expected that on-line profile merging is on with \c %m specifier
+ * used in profile filename . If merging is not turned on, user is expected
+ * to invoke __llvm_profile_set_filename to specify different profile names
+ * for different regions before dumping to avoid profile write clobbering.
*/
-void __llvm_profile_set_filename(const char *Name);
+int __llvm_profile_dump(void);
/*!
- * \brief Set the filename for writing instrumentation data, unless the
- * \c LLVM_PROFILE_FILE environment variable was set.
+ * \brief Set the filename for writing instrumentation data.
*
- * Unless overridden, sets the filename to be used for subsequent calls to
+ * Sets the filename to be used for subsequent calls to
* \a __llvm_profile_write_file().
*
* \c Name is not copied, so it must remain valid. Passing NULL resets the
- * filename logic to the default behaviour (unless the \c LLVM_PROFILE_FILE
- * was set in which case it has no effect).
+ * filename logic to the default behaviour.
*/
-void __llvm_profile_override_default_filename(const char *Name);
+void __llvm_profile_set_filename(const char *Name);
/*! \brief Register to write instrumentation data to file at exit. */
int __llvm_profile_register_write_file_atexit(void);
@@ -148,6 +156,16 @@ int __llvm_profile_register_write_file_atexit(void);
/*! \brief Initialize file handling. */
void __llvm_profile_initialize_file(void);
+/*!
+ * \brief Return path prefix (excluding the base filename) of the profile data.
+ * This is useful for users using \c -fprofile-generate=./path_prefix who do
+ * not care about the default raw profile name. It is also useful to collect
+ * more than more profile data files dumped in the same directory (Online
+ * merge mode is turned on for instrumented programs with shared libs).
+ * Side-effect: this API call will invoke malloc with dynamic memory allocation.
+ */
+const char *__llvm_profile_get_path_prefix();
+
/*! \brief Get the magic token for the file format. */
uint64_t __llvm_profile_get_magic(void);
@@ -166,8 +184,8 @@ uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
* Note that this variable's visibility needs to be hidden so that the
* definition of this variable in an instrumented shared library won't
* affect runtime initialization decision of the main program.
- */
-COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
+ * __llvm_profile_profile_runtime. */
+COMPILER_RT_VISIBILITY extern int INSTR_PROF_PROFILE_RUNTIME_VAR;
/*!
* This variable is defined in InstrProfiling.c. Its main purpose is to
@@ -179,6 +197,13 @@ COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
* main program are expected to be instrumented in the same way), there is
* no need for this variable to be hidden.
*/
-extern uint64_t __llvm_profile_raw_version;
+extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
+
+/*!
+ * This variable is a weak symbol defined in InstrProfiling.c. It allows
+ * compiler instrumentation to provide overriding definition with value
+ * from compiler command line. This variable has default visibility.
+ */
+extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */
#endif /* PROFILE_INSTRPROFILING_H_ */
diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c
index 32762d14eef2..f82080c98aac 100644
--- a/lib/profile/InstrProfilingFile.c
+++ b/lib/profile/InstrProfilingFile.c
@@ -59,10 +59,14 @@ static const char *getPNSStr(ProfileNameSpecifier PNS) {
}
#define MAX_PID_SIZE 16
-/* Data structure holding the result of parsed filename pattern. */
+/* Data structure holding the result of parsed filename pattern. */
typedef struct lprofFilename {
/* File name string possibly with %p or %h specifiers. */
const char *FilenamePat;
+ /* A flag indicating if FilenamePat's memory is allocated
+ * by runtime. */
+ unsigned OwnsFilenamePat;
+ const char *ProfilePathPrefix;
char PidChars[MAX_PID_SIZE];
char Hostname[COMPILER_RT_MAX_HOSTLEN];
unsigned NumPids;
@@ -78,7 +82,8 @@ typedef struct lprofFilename {
ProfileNameSpecifier PNS;
} lprofFilename;
-lprofFilename lprofCurFilename = {0, {0}, {0}, 0, 0, 0, PNS_unknown};
+COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, {0}, {0},
+ 0, 0, 0, PNS_unknown};
int getpid(void);
static int getCurFilenameLength();
@@ -229,16 +234,17 @@ static void truncateCurrentFile(void) {
return;
/* Create the directory holding the file, if needed. */
- if (strchr(Filename, DIR_SEPARATOR)
-#if defined(DIR_SEPARATOR_2)
- || strchr(Filename, DIR_SEPARATOR_2)
-#endif
- ) {
+ if (lprofFindFirstDirSeparator(Filename)) {
char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
strncpy(Copy, Filename, Length + 1);
__llvm_profile_recursive_mkdir(Copy);
}
+ /* By pass file truncation to allow online raw profile
+ * merging. */
+ if (lprofCurFilename.MergePoolSize)
+ return;
+
/* Truncate the file. Later we'll reopen and append. */
File = fopen(Filename, "w");
if (!File)
@@ -248,6 +254,9 @@ static void truncateCurrentFile(void) {
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
lprofCurFilename.FilenamePat = DefaultProfileName;
lprofCurFilename.PNS = PNS_default;
@@ -263,31 +272,46 @@ static int containsMergeSpecifier(const char *FilenamePat, int I) {
/* Parses the pattern string \p FilenamePat and stores the result to
* lprofcurFilename structure. */
-static int parseFilenamePattern(const char *FilenamePat) {
+static int parseFilenamePattern(const char *FilenamePat,
+ unsigned CopyFilenamePat) {
int NumPids = 0, NumHosts = 0, I;
char *PidChars = &lprofCurFilename.PidChars[0];
char *Hostname = &lprofCurFilename.Hostname[0];
int MergingEnabled = 0;
- lprofCurFilename.FilenamePat = FilenamePat;
+ /* Clean up cached prefix. */
+ if (lprofCurFilename.ProfilePathPrefix)
+ free((void *)lprofCurFilename.ProfilePathPrefix);
+ memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
+
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
+
+ if (!CopyFilenamePat)
+ lprofCurFilename.FilenamePat = FilenamePat;
+ else {
+ lprofCurFilename.FilenamePat = strdup(FilenamePat);
+ lprofCurFilename.OwnsFilenamePat = 1;
+ }
/* Check the filename for "%p", which indicates a pid-substitution. */
for (I = 0; FilenamePat[I]; ++I)
if (FilenamePat[I] == '%') {
if (FilenamePat[++I] == 'p') {
if (!NumPids++) {
if (snprintf(PidChars, MAX_PID_SIZE, "%d", getpid()) <= 0) {
- PROF_WARN(
- "Unable to parse filename pattern %s. Using the default name.",
- FilenamePat);
+ PROF_WARN("Unable to get pid for filename pattern %s. Using the "
+ "default name.",
+ FilenamePat);
return -1;
}
}
} else if (FilenamePat[I] == 'h') {
if (!NumHosts++)
if (COMPILER_RT_GETHOSTNAME(Hostname, COMPILER_RT_MAX_HOSTLEN)) {
- PROF_WARN(
- "Unable to parse filename pattern %s. Using the default name.",
- FilenamePat);
+ PROF_WARN("Unable to get hostname for filename pattern %s. Using "
+ "the default name.",
+ FilenamePat);
return -1;
}
} else if (containsMergeSpecifier(FilenamePat, I)) {
@@ -312,7 +336,8 @@ static int parseFilenamePattern(const char *FilenamePat) {
}
static void parseAndSetFilename(const char *FilenamePat,
- ProfileNameSpecifier PNS) {
+ ProfileNameSpecifier PNS,
+ unsigned CopyFilenamePat) {
const char *OldFilenamePat = lprofCurFilename.FilenamePat;
ProfileNameSpecifier OldPNS = lprofCurFilename.PNS;
@@ -323,33 +348,28 @@ static void parseAndSetFilename(const char *FilenamePat,
if (!FilenamePat)
FilenamePat = DefaultProfileName;
- /* When -fprofile-instr-generate=<path> is specified on the
- * command line, each module will be instrumented with runtime
- * init call to __llvm_profile_init function which calls
- * __llvm_profile_override_default_filename. In most of the cases,
- * the path will be identical, so bypass the parsing completely.
- */
if (OldFilenamePat && !strcmp(OldFilenamePat, FilenamePat)) {
lprofCurFilename.PNS = PNS;
return;
}
/* When PNS >= OldPNS, the last one wins. */
- if (!FilenamePat || parseFilenamePattern(FilenamePat))
+ if (!FilenamePat || parseFilenamePattern(FilenamePat, CopyFilenamePat))
resetFilenameToDefault();
lprofCurFilename.PNS = PNS;
if (!OldFilenamePat) {
- PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
- lprofCurFilename.FilenamePat, getPNSStr(PNS));
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
+ lprofCurFilename.FilenamePat, getPNSStr(PNS));
} else {
- PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
- OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
- getPNSStr(PNS));
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
+ OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
+ getPNSStr(PNS));
}
- if (!lprofCurFilename.MergePoolSize)
- truncateCurrentFile();
+ truncateCurrentFile();
}
/* Return buffer length that is required to store the current profile
@@ -429,16 +449,61 @@ static const char *getFilenamePatFromEnv(void) {
return Filename;
}
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_get_path_prefix(void) {
+ int Length;
+ char *FilenameBuf, *Prefix;
+ const char *Filename, *PrefixEnd;
+
+ if (lprofCurFilename.ProfilePathPrefix)
+ return lprofCurFilename.ProfilePathPrefix;
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf);
+ if (!Filename)
+ return "\0";
+
+ PrefixEnd = lprofFindLastDirSeparator(Filename);
+ if (!PrefixEnd)
+ return "\0";
+
+ Length = PrefixEnd - Filename + 1;
+ Prefix = (char *)malloc(Length + 1);
+ if (!Prefix) {
+ PROF_ERR("Failed to %s\n", "allocate memory.");
+ return "\0";
+ }
+ memcpy(Prefix, Filename, Length);
+ Prefix[Length] = '\0';
+ lprofCurFilename.ProfilePathPrefix = Prefix;
+ return Prefix;
+}
+
/* This method is invoked by the runtime initialization hook
* InstrProfilingRuntime.o if it is linked in. Both user specified
* profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE
* environment variable can override this default value. */
COMPILER_RT_VISIBILITY
void __llvm_profile_initialize_file(void) {
- const char *FilenamePat;
+ const char *EnvFilenamePat;
+ const char *SelectedPat = NULL;
+ ProfileNameSpecifier PNS = PNS_unknown;
+ int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0);
+
+ EnvFilenamePat = getFilenamePatFromEnv();
+ if (EnvFilenamePat) {
+ SelectedPat = EnvFilenamePat;
+ PNS = PNS_environment;
+ } else if (hasCommandLineOverrider) {
+ SelectedPat = INSTR_PROF_PROFILE_NAME_VAR;
+ PNS = PNS_command_line;
+ } else {
+ SelectedPat = NULL;
+ PNS = PNS_default;
+ }
- FilenamePat = getFilenamePatFromEnv();
- parseAndSetFilename(FilenamePat, FilenamePat ? PNS_environment : PNS_default);
+ parseAndSetFilename(SelectedPat, PNS, 0);
}
/* This API is directly called by the user application code. It has the
@@ -447,18 +512,7 @@ void __llvm_profile_initialize_file(void) {
*/
COMPILER_RT_VISIBILITY
void __llvm_profile_set_filename(const char *FilenamePat) {
- parseAndSetFilename(FilenamePat, PNS_runtime_api);
-}
-
-/*
- * This API is invoked by the global initializers emitted by Clang/LLVM when
- * -fprofile-instr-generate=<..> is specified (vs -fprofile-instr-generate
- * without an argument). This option has lower precedence than the
- * LLVM_PROFILE_FILE environment variable.
- */
-COMPILER_RT_VISIBILITY
-void __llvm_profile_override_default_filename(const char *FilenamePat) {
- parseAndSetFilename(FilenamePat, PNS_command_line);
+ parseAndSetFilename(FilenamePat, PNS_runtime_api, 1);
}
/* The public API for writing profile data into the file with name
@@ -471,6 +525,12 @@ int __llvm_profile_write_file(void) {
const char *Filename;
char *FilenameBuf;
+ if (lprofProfileDumped()) {
+ PROF_NOTE("Profile data not written to file: %s.\n",
+ "already written");
+ return 0;
+ }
+
Length = getCurFilenameLength();
FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
Filename = getCurFilename(FilenameBuf);
@@ -497,6 +557,18 @@ int __llvm_profile_write_file(void) {
return rc;
}
+COMPILER_RT_VISIBILITY
+int __llvm_profile_dump(void) {
+ if (!doMerging())
+ PROF_WARN("Later invocation of __llvm_profile_dump can lead to clobbering "
+ " of previously dumped profile data : %s. Either use %%m "
+ "in profile name or change profile name before dumping.\n",
+ "online profile merging is not on");
+ int rc = __llvm_profile_write_file();
+ lprofSetProfileDumped();
+ return rc;
+}
+
static void writeFileWithoutReturn(void) { __llvm_profile_write_file(); }
COMPILER_RT_VISIBILITY
diff --git a/lib/profile/InstrProfilingInternal.h b/lib/profile/InstrProfilingInternal.h
index 44f308206ca8..c73b29101302 100644
--- a/lib/profile/InstrProfilingInternal.h
+++ b/lib/profile/InstrProfilingInternal.h
@@ -163,21 +163,13 @@ void lprofSetupValueProfiler();
* to dump merged profile data into its own profile file. */
uint64_t lprofGetLoadModuleSignature();
-/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
-/* Return the path prefix specified by GCOV_PREFIX environment variable.
- * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
- * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
+/*
+ * Return non zero value if the profile data has already been
+ * dumped to the file.
*/
-const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
-/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
- * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
- * is not zero, path prefixes are stripped from \c PathStr (the level of
- * stripping is specified by \c PrefixStrip) before \c Prefix is added.
- */
-void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
- size_t PrefixLen, int PrefixStrip);
+unsigned lprofProfileDumped();
+void lprofSetProfileDumped();
-COMPILER_RT_VISIBILITY extern char *(*GetEnvHook)(const char *);
COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *);
COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer;
COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize;
diff --git a/lib/profile/InstrProfilingPort.h b/lib/profile/InstrProfilingPort.h
index c947153e2517..5789351956b9 100644
--- a/lib/profile/InstrProfilingPort.h
+++ b/lib/profile/InstrProfilingPort.h
@@ -40,14 +40,14 @@
#endif
#define COMPILER_RT_MAX_HOSTLEN 128
-#ifdef _MSC_VER
-#define COMPILER_RT_GETHOSTNAME(Name, Len) gethostname(Name, Len)
-#elif defined(__ORBIS__)
+#ifdef __ORBIS__
#define COMPILER_RT_GETHOSTNAME(Name, Len) ((void)(Name), (void)(Len), (-1))
#else
#define COMPILER_RT_GETHOSTNAME(Name, Len) lprofGetHostName(Name, Len)
+#ifndef _MSC_VER
#define COMPILER_RT_HAS_UNAME 1
#endif
+#endif
#if COMPILER_RT_HAS_ATOMICS == 1
#ifdef _MSC_VER
diff --git a/lib/profile/InstrProfilingRuntime.cc b/lib/profile/InstrProfilingRuntime.cc
index 12ad9f1573f4..eb83074983ba 100644
--- a/lib/profile/InstrProfilingRuntime.cc
+++ b/lib/profile/InstrProfilingRuntime.cc
@@ -11,7 +11,8 @@ extern "C" {
#include "InstrProfiling.h"
-COMPILER_RT_VISIBILITY int __llvm_profile_runtime;
+/* int __llvm_profile_runtime */
+COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR;
}
namespace {
diff --git a/lib/profile/InstrProfilingUtil.c b/lib/profile/InstrProfilingUtil.c
index 5c66933bc1af..321c7192cc60 100644
--- a/lib/profile/InstrProfilingUtil.c
+++ b/lib/profile/InstrProfilingUtil.c
@@ -35,7 +35,7 @@ void __llvm_profile_recursive_mkdir(char *path) {
for (i = 1; path[i] != '\0'; ++i) {
char save = path[i];
- if (!(path[i] == '/' || path[i] == '\\'))
+ if (!IS_DIR_SEPARATOR(path[i]))
continue;
path[i] = '\0';
#ifdef _WIN32
@@ -66,7 +66,19 @@ void *lprofPtrFetchAdd(void **Mem, long ByteIncr) {
#endif
-#ifdef COMPILER_RT_HAS_UNAME
+#ifdef _MSC_VER
+COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
+ WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN];
+ DWORD BufferSize = sizeof(Buffer);
+ BOOL Result =
+ GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize);
+ if (!Result)
+ return -1;
+ if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0)
+ return -1;
+ return 0;
+}
+#elif defined(COMPILER_RT_HAS_UNAME)
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
struct utsname N;
int R;
@@ -184,3 +196,26 @@ lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1);
}
+
+COMPILER_RT_VISIBILITY const char *
+lprofFindFirstDirSeparator(const char *Path) {
+ const char *Sep;
+ Sep = strchr(Path, DIR_SEPARATOR);
+ if (Sep)
+ return Sep;
+#if defined(DIR_SEPARATOR_2)
+ Sep = strchr(Path, DIR_SEPARATOR_2);
+#endif
+ return Sep;
+}
+
+COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
+ const char *Sep;
+ Sep = strrchr(Path, DIR_SEPARATOR);
+ if (Sep)
+ return Sep;
+#if defined(DIR_SEPARATOR_2)
+ Sep = strrchr(Path, DIR_SEPARATOR_2);
+#endif
+ return Sep;
+}
diff --git a/lib/profile/InstrProfilingUtil.h b/lib/profile/InstrProfilingUtil.h
index 16d3fbf420f2..a80fde77e16a 100644
--- a/lib/profile/InstrProfilingUtil.h
+++ b/lib/profile/InstrProfilingUtil.h
@@ -25,6 +25,27 @@ FILE *lprofOpenFileEx(const char *Filename);
static inline char *getenv(const char *name) { return NULL; }
#endif /* #if __ORBIS__ */
+/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
+/* Return the path prefix specified by GCOV_PREFIX environment variable.
+ * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
+ * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
+ */
+const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
+/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
+ * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
+ * is not zero, path prefixes are stripped from \c PathStr (the level of
+ * stripping is specified by \c PrefixStrip) before \c Prefix is added.
+ */
+void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
+ size_t PrefixLen, int PrefixStrip);
+
+/* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindFirstDirSeparator(const char *Path);
+/* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindLastDirSeparator(const char *Path);
+
int lprofGetHostName(char *Name, int Len);
unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);
diff --git a/lib/profile/InstrProfilingValue.c b/lib/profile/InstrProfilingValue.c
index 93957e323762..6648f8923584 100644
--- a/lib/profile/InstrProfilingValue.c
+++ b/lib/profile/InstrProfilingValue.c
@@ -192,7 +192,7 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
* the runtime can wipe out more than one lowest count entries
* to give space for hot targets.
*/
- if (!(--MinCountVNode->Count)) {
+ if (!MinCountVNode->Count || !(--MinCountVNode->Count)) {
CurVNode = MinCountVNode;
CurVNode->Value = TargetValue;
CurVNode->Count++;
diff --git a/lib/profile/WindowsMMap.c b/lib/profile/WindowsMMap.c
index 1f7342050032..f81d7da5389e 100644
--- a/lib/profile/WindowsMMap.c
+++ b/lib/profile/WindowsMMap.c
@@ -20,6 +20,9 @@
#include "WindowsMMap.h"
#include "InstrProfiling.h"
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
#ifdef __USE_FILE_OFFSET64
# define DWORD_HI(x) (x >> 32)
# define DWORD_LO(x) ((x) & 0xffffffff)
diff --git a/lib/safestack/CMakeLists.txt b/lib/safestack/CMakeLists.txt
index a3870ab80357..5a1bac2912b7 100644
--- a/lib/safestack/CMakeLists.txt
+++ b/lib/safestack/CMakeLists.txt
@@ -1,6 +1,4 @@
-add_custom_target(safestack)
-set_target_properties(safestack PROPERTIES
- FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(safestack)
set(SAFESTACK_SOURCES safestack.cc)
diff --git a/lib/safestack/safestack.cc b/lib/safestack/safestack.cc
index 92c24b35d6d0..b194b6cfa8f9 100644
--- a/lib/safestack/safestack.cc
+++ b/lib/safestack/safestack.cc
@@ -92,6 +92,8 @@ static __thread void *unsafe_stack_start = nullptr;
static __thread size_t unsafe_stack_size = 0;
static __thread size_t unsafe_stack_guard = 0;
+using namespace __sanitizer;
+
static inline void *unsafe_stack_alloc(size_t size, size_t guard) {
CHECK_GE(size + guard, size);
void *addr = MmapOrDie(size + guard, "unsafe_stack_alloc");
diff --git a/lib/sanitizer_common/.clang-tidy b/lib/sanitizer_common/.clang-tidy
index aa695cc924a4..6c71abff0d38 100644
--- a/lib/sanitizer_common/.clang-tidy
+++ b/lib/sanitizer_common/.clang-tidy
@@ -8,5 +8,9 @@ CheckOptions:
value: CamelCase
- key: readability-identifier-naming.UnionCase
value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantCase
+ value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantPrefix
+ value: "k"
- key: readability-identifier-naming.VariableCase
value: lower_case
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index 4af0009196e8..0d9a7f067a77 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -37,6 +37,8 @@ set(SANITIZER_SOURCES_NOTERMINATION
if(UNIX AND NOT APPLE)
list(APPEND SANITIZER_SOURCES_NOTERMINATION
sanitizer_linux_x86_64.S)
+ list(APPEND SANITIZER_SOURCES_NOTERMINATION
+ sanitizer_linux_mips64.S)
endif()
set(SANITIZER_SOURCES
@@ -51,6 +53,7 @@ set(SANITIZER_NOLIBC_SOURCES
set(SANITIZER_LIBCDEP_SOURCES
sanitizer_common_libcdep.cc
sanitizer_coverage_libcdep.cc
+ sanitizer_coverage_libcdep_new.cc
sanitizer_coverage_mapping_libcdep.cc
sanitizer_linux_libcdep.cc
sanitizer_posix_libcdep.cc
@@ -66,8 +69,16 @@ set(SANITIZER_LIBCDEP_SOURCES
set(SANITIZER_HEADERS
sanitizer_addrhashmap.h
sanitizer_allocator.h
+ sanitizer_allocator_bytemap.h
+ sanitizer_allocator_combined.h
sanitizer_allocator_interface.h
sanitizer_allocator_internal.h
+ sanitizer_allocator_local_cache.h
+ sanitizer_allocator_primary32.h
+ sanitizer_allocator_primary64.h
+ sanitizer_allocator_secondary.h
+ sanitizer_allocator_size_class_map.h
+ sanitizer_allocator_stats.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
sanitizer_atomic_msvc.h
@@ -118,14 +129,6 @@ set(SANITIZER_HEADERS
set(SANITIZER_COMMON_DEFINITIONS)
-if(MSVC)
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=0)
-else()
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=1)
-endif()
-
include(CheckIncludeFile)
append_have_file_definition(rpc/xdr.h HAVE_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
append_have_file_definition(tirpc/rpc/xdr.h HAVE_TIRPC_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
@@ -147,6 +150,8 @@ if (LLVM_ENABLE_PEDANTIC AND UNIX AND NOT APPLE)
# CMAKE_C*_FLAGS and re-add as a source property to all the non-.S files).
set_source_files_properties(sanitizer_linux_x86_64.S
PROPERTIES COMPILE_FLAGS "-w")
+ set_source_files_properties(sanitizer_linux_mips64.S
+ PROPERTIES COMPILE_FLAGS "-w")
endif ()
if(APPLE)
diff --git a/lib/sanitizer_common/sanitizer_addrhashmap.h b/lib/sanitizer_common/sanitizer_addrhashmap.h
index e55fc4f95a9a..2ca3c405bff3 100644
--- a/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -73,6 +73,8 @@ class AddrHashMap {
~Handle();
T *operator->();
+ T &operator*();
+ const T &operator*() const;
bool created() const;
bool exists() const;
@@ -136,6 +138,16 @@ T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index df298c62271d..d47b5b41413c 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -13,27 +13,33 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
+
#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
+# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
+# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
@@ -41,10 +47,20 @@ static void *__libc_memalign(uptr alignment, uptr size) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
+#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
@@ -62,7 +78,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
-#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -78,7 +94,8 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(/* may_return_null*/ false);
+ internal_allocator_instance->Init(
+ /* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -115,7 +132,7 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
@@ -145,7 +162,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDie();
+ return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@@ -192,7 +209,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void NORETURN ReportAllocatorCannotReturnNull() {
+static atomic_uint8_t reporting_out_of_memory = {0};
+
+bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
+ if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index f0f002004709..9a37a2f2145f 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -20,271 +20,16 @@
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
+#include "sanitizer_procmaps.h"
namespace __sanitizer {
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull();
-
-// SizeClassMap maps allocation sizes into size classes and back.
-// Class 0 corresponds to size 0.
-// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
-// Next 4 classes: 256 + i * 64 (i = 1 to 4).
-// Next 4 classes: 512 + i * 128 (i = 1 to 4).
-// ...
-// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
-// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
-//
-// This structure of the size class map gives us:
-// - Efficient table-free class-to-size and size-to-class functions.
-// - Difference between two consequent size classes is betweed 14% and 25%
-//
-// This class also gives a hint to a thread-caching allocator about the amount
-// of chunks that need to be cached per-thread:
-// - kMaxNumCached is the maximal number of chunks per size class.
-// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
-//
-// Part of output of SizeClassMap::Print():
-// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
-// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
-// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
-// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
-// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
-// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
-// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
-// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
-//
-// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
-// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
-// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
-// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
-// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
-// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
-// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
-// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
-//
-// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
-// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
-// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
-// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
-//
-// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
-// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
-// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
-// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
-//
-// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
-// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
-// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
-// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
-//
-// ...
-//
-// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
-// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
-// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
-// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
-//
-// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
-class SizeClassMap {
- static const uptr kMinSizeLog = 4;
- static const uptr kMidSizeLog = kMinSizeLog + 4;
- static const uptr kMinSize = 1 << kMinSizeLog;
- static const uptr kMidSize = 1 << kMidSizeLog;
- static const uptr kMidClass = kMidSize / kMinSize;
- static const uptr S = 2;
- static const uptr M = (1 << S) - 1;
-
- public:
- static const uptr kMaxNumCached = kMaxNumCachedT;
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we use one of the chunks to store the batch.
- struct TransferBatch {
- TransferBatch *next;
- uptr count;
- void *batch[kMaxNumCached];
- };
-
- static const uptr kMaxSize = 1UL << kMaxSizeLog;
- static const uptr kNumClasses =
- kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
- COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
- static const uptr kNumClassesRounded =
- kNumClasses == 32 ? 32 :
- kNumClasses <= 64 ? 64 :
- kNumClasses <= 128 ? 128 : 256;
-
- static uptr Size(uptr class_id) {
- if (class_id <= kMidClass)
- return kMinSize * class_id;
- class_id -= kMidClass;
- uptr t = kMidSize << (class_id >> S);
- return t + (t >> S) * (class_id & M);
- }
-
- static uptr ClassID(uptr size) {
- if (size <= kMidSize)
- return (size + kMinSize - 1) >> kMinSizeLog;
- if (size > kMaxSize) return 0;
- uptr l = MostSignificantSetBitIndex(size);
- uptr hbits = (size >> (l - S)) & M;
- uptr lbits = size & ((1 << (l - S)) - 1);
- uptr l1 = l - kMidSizeLog;
- return kMidClass + (l1 << S) + hbits + (lbits > 0);
- }
-
- static uptr MaxCached(uptr class_id) {
- if (class_id == 0) return 0;
- uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max<uptr>(1, Min(kMaxNumCached, n));
- }
-
- static void Print() {
- uptr prev_s = 0;
- uptr total_cached = 0;
- for (uptr i = 0; i < kNumClasses; i++) {
- uptr s = Size(i);
- if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
- Printf("\n");
- uptr d = s - prev_s;
- uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = s ? MostSignificantSetBitIndex(s) : 0;
- uptr cached = MaxCached(i) * s;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
- total_cached += cached;
- prev_s = s;
- }
- Printf("Total cached: %zd\n", total_cached);
- }
-
- static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
- return Size(class_id) < sizeof(TransferBatch) -
- sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
- }
-
- static void Validate() {
- for (uptr c = 1; c < kNumClasses; c++) {
- // Printf("Validate: c%zd\n", c);
- uptr s = Size(c);
- CHECK_NE(s, 0U);
- CHECK_EQ(ClassID(s), c);
- if (c != kNumClasses - 1)
- CHECK_EQ(ClassID(s + 1), c + 1);
- CHECK_EQ(ClassID(s - 1), c);
- if (c)
- CHECK_GT(Size(c), Size(c-1));
- }
- CHECK_EQ(ClassID(kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= kMaxSize; s++) {
- uptr c = ClassID(s);
- // Printf("s%zd => c%zd\n", s, c);
- CHECK_LT(c, kNumClasses);
- CHECK_GE(Size(c), s);
- if (c > 0)
- CHECK_LT(Size(c-1), s);
- }
- }
-};
+// Returns true if ReportAllocatorCannotReturnNull(true) was called.
+// Can be use to avoid memory hungry operations.
+bool IsReportingOOM();
-typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
-template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
-
-// Memory allocator statistics
-enum AllocatorStat {
- AllocatorStatAllocated,
- AllocatorStatMapped,
- AllocatorStatCount
-};
-
-typedef uptr AllocatorStatCounters[AllocatorStatCount];
-
-// Per-thread stats, live in per-thread cache.
-class AllocatorStats {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
- void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Set(AllocatorStat i, uptr v) {
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- uptr Get(AllocatorStat i) const {
- return atomic_load(&stats_[i], memory_order_relaxed);
- }
-
- private:
- friend class AllocatorGlobalStats;
- AllocatorStats *next_;
- AllocatorStats *prev_;
- atomic_uintptr_t stats_[AllocatorStatCount];
-};
-
-// Global stats, used for aggregation and querying.
-class AllocatorGlobalStats : public AllocatorStats {
- public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
- }
-
- void Register(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->next_ = next_;
- s->prev_ = this;
- next_->prev_ = s;
- next_ = s;
- }
-
- void Unregister(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- for (int i = 0; i < AllocatorStatCount; i++)
- Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
- }
-
- void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
- SpinMutexLock l(&mu_);
- const AllocatorStats *stats = this;
- for (;;) {
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] += stats->Get(AllocatorStat(i));
- stats = stats->next_;
- if (stats == this)
- break;
- }
- // All stats must be non-negative.
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
- }
-
- private:
- mutable SpinMutex mu_;
-};
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
@@ -295,1185 +40,18 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-// SizeClassAllocator64 -- allocator for 64-bit address space.
-//
-// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
-// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
-// Otherwise SpaceBeg=kSpaceBeg (fixed address).
-// kSpaceSize is a power of two.
-// At the beginning the entire space is mprotect-ed, then small parts of it
-// are mapped on demand.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator64 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
- if (kUsingConstantSpaceBeg) {
- CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
- MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
- } else {
- NonConstSpaceBeg =
- reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
- CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
- }
- MapWithCallback(SpaceEnd(), AdditionalSize());
- }
-
- void MapWithCallback(uptr beg, uptr size) {
- CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
- MapUnmapCallback().OnMap(beg, size);
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- Batch *b = region->free_list.Pop();
- if (!b)
- b = PopulateFreeList(stat, c, class_id, region);
- region->n_allocated += b->count;
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- RegionInfo *region = GetRegionInfo(class_id);
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- region->n_freed += b->count;
- }
-
- bool PointerIsMine(const void *p) {
- uptr P = reinterpret_cast<uptr>(p);
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return P / kSpaceSize == kSpaceBeg / kSpaceSize;
- return P >= SpaceBeg() && P < SpaceEnd();
- }
-
- uptr GetSizeClass(const void *p) {
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
- return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
- kNumClassesRounded;
- }
-
- void *GetBlockBegin(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- if (!size) return nullptr;
- uptr chunk_idx = GetChunkIdx((uptr)p, size);
- uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr beg = chunk_idx * size;
- uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user >= next_beg)
- return reinterpret_cast<void*>(reg_beg + beg);
- return nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
- return reinterpret_cast<void *>(SpaceBeg() +
- (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
- }
-
- void PrintStats() {
- uptr total_mapped = 0;
- uptr n_allocated = 0;
- uptr n_freed = 0;
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- total_mapped += region->mapped_user;
- n_allocated += region->n_allocated;
- n_freed += region->n_freed;
- }
- Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
- "remains %zd\n",
- total_mapped >> 20, n_allocated, n_allocated - n_freed);
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user == 0) continue;
- Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
- class_id,
- SizeClassMap::Size(class_id),
- region->mapped_user >> 10,
- region->n_allocated,
- region->n_allocated - region->n_freed);
- }
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetRegionInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = (int)kNumClasses - 1; i >= 0; i--) {
- GetRegionInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- uptr chunk_size = SizeClassMap::Size(class_id);
- uptr region_beg = SpaceBeg() + class_id * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + region->allocated_user;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
-
- private:
- static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
-
- static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
- uptr NonConstSpaceBeg;
- uptr SpaceBeg() const {
- return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
- }
- uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 14;
- // Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
- // Call mmap for metadata memory with at least this size.
- static const uptr kMetaMapSize = 1 << 16;
-
- struct RegionInfo {
- BlockingMutex mutex;
- LFStack<Batch> free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- uptr mapped_user; // Bytes mapped for user memory.
- uptr mapped_meta; // Bytes mapped for metadata.
- uptr n_allocated, n_freed; // Just stats.
- };
- COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions =
- reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
- return &regions[class_id];
- }
-
- static uptr GetChunkIdx(uptr chunk, uptr size) {
- uptr offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // size always fits into 32-bits. If the offset fits too, use 32-bit div.
- if (offset >> (SANITIZER_WORDSIZE / 2))
- return offset / size;
- return (u32)offset / (u32)size;
- }
-
- NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id, RegionInfo *region) {
- BlockingMutexLock l(&region->mutex);
- Batch *b = region->free_list.Pop();
- if (b)
- return b;
- uptr size = SizeClassMap::Size(class_id);
- uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + count * size;
- uptr region_beg = SpaceBeg() + kRegionSize * class_id;
- if (end_idx + size > region->mapped_user) {
- // Do the mmap for the user memory.
- uptr map_size = kUserMapSize;
- while (end_idx + size > region->mapped_user + map_size)
- map_size += kUserMapSize;
- CHECK_GE(region->mapped_user + map_size, end_idx);
- MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- region->mapped_user += map_size;
- }
- uptr total_count = (region->mapped_user - beg_idx - size)
- / size / count * count;
- region->allocated_meta += total_count * kMetadataSize;
- if (region->allocated_meta > region->mapped_meta) {
- uptr map_size = kMetaMapSize;
- while (region->allocated_meta > region->mapped_meta + map_size)
- map_size += kMetaMapSize;
- // Do the mmap for the metadata.
- CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
- MapWithCallback(region_beg + kRegionSize -
- region->mapped_meta - map_size, map_size);
- region->mapped_meta += map_size;
- }
- CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->mapped_user + region->mapped_meta > kRegionSize) {
- Printf("%s: Out of memory. Dying. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- for (;;) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)(region_beg + beg_idx);
- b->count = count;
- for (uptr i = 0; i < count; i++)
- b->batch[i] = (void*)(region_beg + beg_idx + i * size);
- region->allocated_user += count * size;
- CHECK_LE(region->allocated_user, region->mapped_user);
- beg_idx += count * size;
- if (beg_idx + count * size + size > region->mapped_user)
- break;
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- }
- return b;
- }
-};
-
-// Maps integers in rage [0, kSize) to u8 values.
-template<u64 kSize>
-class FlatByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- return map2[idx % kSize2];
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
-// SizeClassAllocator32 -- allocator for 32-bit address space.
-// This allocator can theoretically be used on 64-bit arch, but there it is less
-// efficient than SizeClassAllocator64.
-//
-// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
-// be returned by MmapOrDie().
-//
-// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
-// Since the regions are aligned by kRegionSize, there are exactly
-// kNumPossibleRegions possible regions in the address space and so we keep
-// a ByteMap possible_regions to store the size classes of each Region.
-// 0 size class means the region is not used by the allocator.
-//
-// One Region is used to allocate chunks of a single size class.
-// A Region looks like this:
-// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
-//
-// In order to avoid false sharing the objects of this class should be
-// chache-line aligned.
-template <const uptr kSpaceBeg, const u64 kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- const uptr kRegionSizeLog,
- class ByteMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator32 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- possible_regions.TestOnlyInit();
- internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- }
-
- void *MapWithCallback(uptr size) {
- size = RoundUpTo(size, GetPageSizeCached());
- void *res = MmapOrDie(size, "SizeClassAllocator32");
- MapUnmapCallback().OnMap((uptr)res, size);
- return res;
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *GetMetaData(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- uptr n = offset / (u32)size; // 32-bit division
- uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
- return reinterpret_cast<void*>(meta);
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
- CHECK(!sci->free_list.empty());
- Batch *b = sci->free_list.front();
- sci->free_list.pop_front();
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- CHECK_GT(b->count, 0);
- sci->free_list.push_front(b);
- }
-
- bool PointerIsMine(const void *p) {
- uptr mem = reinterpret_cast<uptr>(p);
- if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
- return false;
- return GetSizeClass(p) != 0;
- }
-
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
- }
-
- void *GetBlockBegin(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- u32 n = offset / (u32)size; // 32-bit division
- uptr res = beg + (n * (u32)size);
- return reinterpret_cast<void*>(res);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- uptr TotalMemoryUsed() {
- // No need to lock here.
- uptr res = 0;
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- res += kRegionSize;
- return res;
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- UnmapWithCallback((i * kRegionSize), kRegionSize);
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetSizeClassInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = kNumClasses - 1; i >= 0; i--) {
- GetSizeClassInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
- uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
- uptr region_beg = region * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + max_chunks_in_region * chunk_size;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- void PrintStats() {
- }
-
- static uptr AdditionalSize() {
- return 0;
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
-
- private:
- static const uptr kRegionSize = 1 << kRegionSizeLog;
- static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
-
- struct SizeClassInfo {
- SpinMutex mutex;
- IntrusiveList<Batch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
- };
- COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
-
- uptr ComputeRegionId(uptr mem) {
- uptr res = mem >> kRegionSizeLog;
- CHECK_LT(res, kNumPossibleRegions);
- return res;
- }
-
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
-
- uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
- MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
- return res;
- }
-
- SizeClassInfo *GetSizeClassInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- return &size_class_info_array[class_id];
- }
-
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- SizeClassInfo *sci, uptr class_id) {
- uptr size = SizeClassMap::Size(class_id);
- uptr reg = AllocateRegion(stat, class_id);
- uptr n_chunks = kRegionSize / (size + kMetadataSize);
- uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = nullptr;
- for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (!b) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)i;
- b->count = 0;
- }
- b->batch[b->count++] = (void*)i;
- if (b->count == max_count) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- b = nullptr;
- }
- }
- if (b) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- }
- }
-
- ByteMap possible_regions;
- SizeClassInfo size_class_info_array[kNumClasses];
-};
-
-// Objects of this type should be used as local caches for SizeClassAllocator64
-// or SizeClassAllocator32. Since the typical use of this class is to have one
-// object per thread in TLS, is has to be POD.
-template<class SizeClassAllocator>
-struct SizeClassAllocatorLocalCache {
- typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
-
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
-
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
-
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
- void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
- return res;
- }
-
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- CHECK_NE(c->max_count, 0UL);
- if (UNLIKELY(c->count == c->max_count))
- Drain(allocator, class_id);
- c->batch[c->count++] = p;
- }
-
- void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
- while (c->count > 0)
- Drain(allocator, class_id);
- }
- }
-
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
- struct PerClass {
- uptr count;
- uptr max_count;
- void *batch[2 * SizeClassMap::kMaxNumCached];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
-
- void InitCache() {
- if (per_class_[1].max_count)
- return;
- for (uptr i = 0; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- c->max_count = 2 * SizeClassMap::MaxCached(i);
- }
- }
-
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
- CHECK_GT(b->count, 0);
- for (uptr i = 0; i < b->count; i++)
- c->batch[i] = b->batch[i];
- c->count = b->count;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
- }
-
- NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)c->batch[0];
- uptr cnt = Min(c->max_count / 2, c->count);
- for (uptr i = 0; i < cnt; i++) {
- b->batch[i] = c->batch[i];
- c->batch[i] = c->batch[i + c->max_count / 2];
- }
- b->count = cnt;
- c->count -= cnt;
- CHECK_GT(b->count, 0);
- allocator->DeallocateBatch(&stats_, class_id, b);
- }
-};
-
-// This class can (de)allocate only large chunks of memory using mmap/unmap.
-// The main purpose of this allocator is to cover large and rare allocation
-// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
-class LargeMmapAllocator {
- public:
- void InitLinkerInitialized(bool may_return_null) {
- page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void Init(bool may_return_null) {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
- }
-
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = RoundUpMapSize(size);
- if (alignment > page_size_)
- map_size += alignment;
- // Overflow.
- if (map_size < size)
- return ReturnNullOrDie();
- uptr map_beg = reinterpret_cast<uptr>(
- MmapOrDie(map_size, "LargeMmapAllocator"));
- CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
- uptr map_end = map_beg + map_size;
- uptr res = map_beg + page_size_;
- if (res & (alignment - 1)) // Align.
- res += alignment - (res & (alignment - 1));
- CHECK(IsAligned(res, alignment));
- CHECK(IsAligned(res, page_size_));
- CHECK_GE(res + size, map_beg);
- CHECK_LE(res + size, map_end);
- Header *h = GetHeader(res);
- h->size = size;
- h->map_beg = map_beg;
- h->map_size = map_size;
- uptr size_log = MostSignificantSetBitIndex(map_size);
- CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
- {
- SpinMutexLock l(&mutex_);
- uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
- h->chunk_idx = idx;
- chunks_[idx] = h;
- stats.n_allocs++;
- stats.currently_allocated += map_size;
- stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
- stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatAllocated, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- }
- return reinterpret_cast<void*>(res);
- }
-
- void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- void Deallocate(AllocatorStats *stat, void *p) {
- Header *h = GetHeader(p);
- {
- SpinMutexLock l(&mutex_);
- uptr idx = h->chunk_idx;
- CHECK_EQ(chunks_[idx], h);
- CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
- chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
- chunks_sorted_ = false;
- stats.n_frees++;
- stats.currently_allocated -= h->map_size;
- stat->Sub(AllocatorStatAllocated, h->map_size);
- stat->Sub(AllocatorStatMapped, h->map_size);
- }
- MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
- UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
- }
-
- uptr TotalMemoryUsed() {
- SpinMutexLock l(&mutex_);
- uptr res = 0;
- for (uptr i = 0; i < n_chunks_; i++) {
- Header *h = chunks_[i];
- CHECK_EQ(h->chunk_idx, i);
- res += RoundUpMapSize(h->size);
- }
- return res;
- }
-
- bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpTo(GetHeader(p)->size, page_size_);
- }
-
- // At least page_size_/2 metadata bytes is available.
- void *GetMetaData(const void *p) {
- // Too slow: CHECK_EQ(p, GetBlockBegin(p));
- if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
- Printf("%s: bad pointer %p\n", SanitizerToolName, p);
- CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
- }
- return GetHeader(p) + 1;
- }
-
- void *GetBlockBegin(const void *ptr) {
- uptr p = reinterpret_cast<uptr>(ptr);
- SpinMutexLock l(&mutex_);
- uptr nearest_chunk = 0;
- // Cache-friendly linear search.
- for (uptr i = 0; i < n_chunks_; i++) {
- uptr ch = reinterpret_cast<uptr>(chunks_[i]);
- if (p < ch) continue; // p is at left to this chunk, skip it.
- if (p - ch < p - nearest_chunk)
- nearest_chunk = ch;
- }
- if (!nearest_chunk)
- return nullptr;
- Header *h = reinterpret_cast<Header *>(nearest_chunk);
- CHECK_GE(nearest_chunk, h->map_beg);
- CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
- CHECK_LE(nearest_chunk, p);
- if (h->map_beg + h->map_size <= p)
- return nullptr;
- return GetUser(h);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
- mutex_.CheckLocked();
- uptr p = reinterpret_cast<uptr>(ptr);
- uptr n = n_chunks_;
- if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
- if (p < min_mmap_ || p >= max_mmap_)
- return nullptr;
- uptr beg = 0, end = n - 1;
- // This loop is a log(n) lower_bound. It does not check for the exact match
- // to avoid expensive cache-thrashing loads.
- while (end - beg >= 2) {
- uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
- if (p < reinterpret_cast<uptr>(chunks_[mid]))
- end = mid - 1; // We are not interested in chunks_[mid].
- else
- beg = mid; // chunks_[mid] may still be what we want.
- }
-
- if (beg < end) {
- CHECK_EQ(beg + 1, end);
- // There are 2 chunks left, choose one.
- if (p >= reinterpret_cast<uptr>(chunks_[end]))
- beg = end;
- }
-
- Header *h = chunks_[beg];
- if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return nullptr;
- return GetUser(h);
- }
-
- void PrintStats() {
- Printf("Stats: LargeMmapAllocator: allocated %zd times, "
- "remains %zd (%zd K) max %zd M; by size logs: ",
- stats.n_allocs, stats.n_allocs - stats.n_frees,
- stats.currently_allocated >> 10, stats.max_allocated >> 20);
- for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
- uptr c = stats.by_size_log[i];
- if (!c) continue;
- Printf("%zd:%zd; ", i, c);
- }
- Printf("\n");
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
-
- void ForceUnlock() {
- mutex_.Unlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
- }
-
- private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
- struct Header {
- uptr map_beg;
- uptr map_size;
- uptr size;
- uptr chunk_idx;
- };
-
- Header *GetHeader(uptr p) {
- CHECK(IsAligned(p, page_size_));
- return reinterpret_cast<Header*>(p - page_size_);
- }
- Header *GetHeader(const void *p) {
- return GetHeader(reinterpret_cast<uptr>(p));
- }
-
- void *GetUser(Header *h) {
- CHECK(IsAligned((uptr)h, page_size_));
- return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
- }
-
- uptr RoundUpMapSize(uptr size) {
- return RoundUpTo(size, page_size_) + page_size_;
- }
-
- uptr page_size_;
- Header *chunks_[kMaxNumChunks];
- uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
- bool chunks_sorted_;
- struct Stats {
- uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
- } stats;
- atomic_uint8_t may_return_null_;
- SpinMutex mutex_;
-};
-
-// This class implements a complete memory allocator by using two
-// internal allocators:
-// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
-// When allocating 2^x bytes it should return 2^x aligned chunk.
-// PrimaryAllocator is used via a local AllocatorCache.
-// SecondaryAllocator can allocate anything, but is not efficient.
-template <class PrimaryAllocator, class AllocatorCache,
- class SecondaryAllocator> // NOLINT
-class CombinedAllocator {
- public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void InitLinkerInitialized(bool may_return_null) {
- secondary_.InitLinkerInitialized(may_return_null);
- stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
- }
-
- void Init(bool may_return_null) {
- secondary_.Init(may_return_null);
- stats_.Init();
- InitCommon(may_return_null);
- }
-
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
- // Returning 0 on malloc(0) may break a lot of code.
- if (size == 0)
- size = 1;
- if (size + alignment < size)
- return ReturnNullOrDie();
- if (check_rss_limit && RssLimitIsExceeded())
- return ReturnNullOrDie();
- if (alignment > 8)
- size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
- if (from_primary)
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
- res = secondary_.Allocate(&stats_, size, alignment);
- if (alignment > 8)
- CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared && res && from_primary)
- internal_bzero_aligned16(res, RoundUpTo(size, 16));
- return res;
- }
-
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDie() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
- }
-
- void Deallocate(AllocatorCache *cache, void *p) {
- if (!p) return;
- if (primary_.PointerIsMine(p))
- cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
- else
- secondary_.Deallocate(&stats_, p);
- }
-
- void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
- uptr alignment) {
- if (!p)
- return Allocate(cache, new_size, alignment);
- if (!new_size) {
- Deallocate(cache, p);
- return nullptr;
- }
- CHECK(PointerIsMine(p));
- uptr old_size = GetActuallyAllocatedSize(p);
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = Allocate(cache, new_size, alignment);
- if (new_p)
- internal_memcpy(new_p, p, memcpy_size);
- Deallocate(cache, p);
- return new_p;
- }
-
- bool PointerIsMine(void *p) {
- if (primary_.PointerIsMine(p))
- return true;
- return secondary_.PointerIsMine(p);
- }
-
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
-
- void *GetMetaData(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetMetaData(p);
- return secondary_.GetMetaData(p);
- }
-
- void *GetBlockBegin(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBegin(p);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBeginFastLocked(p);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetActuallyAllocatedSize(p);
- return secondary_.GetActuallyAllocatedSize(p);
- }
-
- uptr TotalMemoryUsed() {
- return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
- }
-
- void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
-
- void InitCache(AllocatorCache *cache) {
- cache->Init(&stats_);
- }
-
- void DestroyCache(AllocatorCache *cache) {
- cache->Destroy(&primary_, &stats_);
- }
-
- void SwallowCache(AllocatorCache *cache) {
- cache->Drain(&primary_);
- }
-
- void GetStats(AllocatorStatCounters s) const {
- stats_.Get(s);
- }
-
- void PrintStats() {
- primary_.PrintStats();
- secondary_.PrintStats();
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- primary_.ForceLock();
- secondary_.ForceLock();
- }
-
- void ForceUnlock() {
- secondary_.ForceUnlock();
- primary_.ForceUnlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- primary_.ForEachChunk(callback, arg);
- secondary_.ForEachChunk(callback, arg);
- }
-
- private:
- PrimaryAllocator primary_;
- SecondaryAllocator secondary_;
- AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
-};
-
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_bytemap.h b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
new file mode 100644
index 000000000000..92472cdf5150
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -0,0 +1,103 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ return map2[idx % kSize2];
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
new file mode 100644
index 000000000000..19e1ae9b9f75
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -0,0 +1,233 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(
+ bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void Init(bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.Init(may_return_null);
+ stats_.Init();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false, bool check_rss_limit = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
+ if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ bool from_primary = primary_.CanAllocate(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
+ if (from_primary)
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ // When serviced by the secondary, the chunk comes from a mmap allocation
+ // and will be zero'd out anyway. We only need to clear our the chunk if
+ // it was serviced by the primary, hence using the rounded up 'size'.
+ if (cleared && res && from_primary)
+ internal_bzero_aligned16(res, RoundUpTo(size, 16));
+ return res;
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_interface.h b/lib/sanitizer_common/sanitizer_allocator_interface.h
index 797c38a79885..5ff6edba0a1a 100644
--- a/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -37,6 +37,10 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_print_memory_profile(int top_percent);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index a7ea454ff17b..e939cbe01c3c 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -61,8 +61,8 @@ enum InternalAllocEnum {
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- InternalAllocEnum) {
- return InternalAlloc(size);
+ __sanitizer::InternalAllocEnum) {
+ return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
new file mode 100644
index 000000000000..e1172e0c2820
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -0,0 +1,249 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache
+ : SizeClassAllocator::AllocatorCache {
+};
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(c, allocator, class_id);
+ CHECK_GT(c->count, 0);
+ CompactPtrT chunk = c->chunks[--c->count];
+ void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(c, allocator, class_id, c->count);
+ }
+ }
+
+ // private:
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ }
+ }
+
+ NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache();
+ uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
+ allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks);
+ c->count = num_requested_chunks;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ InitCache();
+ CHECK_GE(c->count, count);
+ uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * TransferBatch::MaxCached(i);
+ }
+ }
+
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (Allocator::ClassIdToSize(class_id) <
+ TransferBatch::AllocationSizeRequiredForNElements(
+ TransferBatch::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ // For small size classes allocates the batch from the allocator.
+ // For large size classes simply returns b.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ // For small size classes deallocates b to the allocator.
+ // Does notthing for large size classes.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ }
+
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ uptr first_idx_to_drain = c->count - cnt;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
+ &c->batch[first_idx_to_drain], cnt);
+ c->count -= cnt;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
new file mode 100644
index 000000000000..2882afd1fe1d
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -0,0 +1,310 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ CHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init(s32 release_to_os_interval_ms) {
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(stat, c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_front(b);
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {
+ }
+
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) -
+ sizeof(IntrusiveList<TransferBatch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ uptr size = ClassIdToSize(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = TransferBatch::MaxCached(class_id);
+ TransferBatch *b = nullptr;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ b->Clear();
+ }
+ b->Add((void*)i);
+ if (b->Count() == max_count) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h
new file mode 100644
index 000000000000..f2d94a07a523
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -0,0 +1,522 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
+ MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
+ } else {
+ NonConstSpaceBeg =
+ reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallback(SpaceEnd(), AdditionalSize());
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id);
+ }
+
+ NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks);
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->n_allocated += n_chunks;
+ }
+
+
+ bool PointerIsMine(const void *p) {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->n_allocated - region->n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
+ "num_freed_chunks %zd"
+ " avail: %zd rss: %zdK releases: %zd\n",
+ class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
+ region->n_allocated, region->n_freed, in_use,
+ region->num_freed_chunks, avail_chunks, rss >> 10,
+ region->rtoi.num_releases);
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region->allocated_user;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ u64 last_release_at_ns;
+ };
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ uptr n_allocated, n_freed; // Just stats.
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+ }
+
+ u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+ void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(rand_state, i + 1)]);
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions =
+ reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) {
+ return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
+ kFreeArraySize);
+ }
+
+ void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ CHECK_LE(needed_space, kFreeArraySize);
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ MapWithCallback(current_map_end, new_map_size);
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ }
+
+
+ NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ uptr size = ClassIdToSize(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + requested_count * size;
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ if (end_idx > region->mapped_user) {
+ if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
+ region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ region->mapped_user += map_size;
+ }
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ uptr total_count = (region->mapped_user - beg_idx) / size;
+ uptr num_freed_chunks = region->num_freed_chunks;
+ EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
+ for (uptr i = 0; i < total_count; i++) {
+ uptr chunk = beg_idx + i * size;
+ free_array[num_freed_chunks + total_count - 1 - i] =
+ PointerToCompactPtr(0, chunk);
+ }
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[num_freed_chunks], total_count,
+ &region->rand_state);
+ region->num_freed_chunks += total_count;
+ region->allocated_user += total_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(GetMetadataEnd(region_beg) -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->mapped_user + region->mapped_meta >
+ kRegionSize - kFreeArraySize) {
+ Printf("%s: Out of memory. Dying. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ }
+
+ void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
+ CompactPtrT first, CompactPtrT last) {
+ uptr beg_ptr = CompactPtrToPointer(region_beg, first);
+ uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
+ ReleaseMemoryPagesToOS(beg_ptr, end_ptr);
+ }
+
+ // Attempts to release some RAM back to OS. The region is expected to be
+ // locked.
+ // Algorithm:
+ // * Sort the chunks.
+ // * Find ranges fully covered by free-d chunks
+ // * Release them to OS with madvise.
+ void MaybeReleaseToOS(uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
+ page_size) {
+ return; // Nothing new to release.
+ }
+
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ u64 now_ns = NanoTime();
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
+ return; // Memory was returned recently.
+ region->rtoi.last_release_at_ns = now_ns;
+
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ SortArray(free_array, n);
+
+ const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
+ const uptr kScaledGranularity = page_size >> kCompactPtrScale;
+
+ uptr range_beg = free_array[0];
+ uptr prev = free_array[0];
+ for (uptr i = 1; i < n; i++) {
+ uptr chunk = free_array[i];
+ CHECK_GT(chunk, prev);
+ if (chunk - prev != scaled_chunk_size) {
+ CHECK_GT(chunk - prev, scaled_chunk_size);
+ if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
+ MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
+ region->rtoi.n_freed_at_last_release = region->n_freed;
+ region->rtoi.num_releases++;
+ }
+ range_beg = chunk;
+ }
+ prev = chunk;
+ }
+ }
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
new file mode 100644
index 000000000000..2e98e591b432
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -0,0 +1,282 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void InitLinkerInitialized(bool may_return_null) {
+ page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) return ReturnNullOrDieOnBadRequest();
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ chunks_sorted_ = false;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ EnsureSortedChunks();
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ auto max_mmap_ =
+ reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks_[mid]))
+ end = mid - 1; // We are not interested in chunks_[mid].
+ else
+ beg = mid; // chunks_[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks_[end]))
+ beg = end;
+ }
+
+ Header *h = chunks_[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ auto t = chunks_[i];
+ callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks_[i], t);
+ CHECK_EQ(chunks_[i]->chunk_idx, i);
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ atomic_uint8_t may_return_null_;
+ SpinMutex mutex_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
new file mode 100644
index 000000000000..7151a4636056
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -0,0 +1,217 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = MostSignificantSetBitIndex(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max<uptr>(1, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
diff --git a/lib/sanitizer_common/sanitizer_allocator_stats.h b/lib/sanitizer_common/sanitizer_allocator_stats.h
new file mode 100644
index 000000000000..38b088b8446e
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_stats.h
@@ -0,0 +1,107 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_atomic.h b/lib/sanitizer_common/sanitizer_atomic.h
index b26693e24f8d..8f400acc999c 100644
--- a/lib/sanitizer_common/sanitizer_atomic.h
+++ b/lib/sanitizer_common/sanitizer_atomic.h
@@ -37,6 +37,11 @@ struct atomic_uint16_t {
volatile Type val_dont_use;
};
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 79fcbb1183f9..1c6fc3ef86a3 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -114,7 +114,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
@@ -157,6 +157,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
template<class T>
static inline bool CompareLess(const T &a, const T &b) {
@@ -167,6 +168,10 @@ void SortArray(uptr *array, uptr size) {
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
+void SortArray(u32 *array, uptr size) {
+ InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess);
+}
+
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (!filepath) return nullptr;
@@ -202,7 +207,7 @@ void ReportErrorSummary(const char *error_message) {
__sanitizer_report_error_summary(buff.data());
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
@@ -254,9 +259,18 @@ void LoadedModule::set(const char *module_name, uptr base_address) {
base_address_ = base_address;
}
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize]) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
@@ -483,4 +497,11 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
+
+#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_print_memory_profile(int top_percent) {
+ (void)top_percent;
+}
+#endif
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 6c1d6a00a10c..66c2d26fa4f5 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -98,9 +98,14 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
+
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-void FlushUnneededShadowMemory(uptr addr, uptr size);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
@@ -115,16 +120,14 @@ void RunFreeHooks(const void *ptr);
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
// InternalAlloc is made libc-free.
-template<typename T>
+template <typename T>
class InternalScopedBuffer {
public:
explicit InternalScopedBuffer(uptr cnt) {
cnt_ = cnt;
- ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
- }
- ~InternalScopedBuffer() {
- UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
}
+ ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
T &operator[](uptr i) { return ptr_[i]; }
T *data() { return ptr_; }
uptr size() { return cnt_ * sizeof(T); }
@@ -132,9 +135,11 @@ class InternalScopedBuffer {
private:
T *ptr_;
uptr cnt_;
- // Disallow evil constructors.
- InternalScopedBuffer(const InternalScopedBuffer&);
- void operator=(const InternalScopedBuffer&);
+ // Disallow copies and moves.
+ InternalScopedBuffer(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer(InternalScopedBuffer &&) = delete;
+ InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
};
class InternalScopedString : public InternalScopedBuffer<char> {
@@ -330,6 +335,7 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+void SortArray(u32 *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit
@@ -389,7 +395,7 @@ void ReportErrorSummary(const char *error_message);
// error_type file:line[:column][ function]
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -446,8 +452,8 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
if (IsPowerOfTwo(size)) return size;
uptr up = MostSignificantSetBitIndex(size);
- CHECK(size < (1ULL << (up + 1)));
- CHECK(size > (1ULL << up));
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
return 1ULL << (up + 1);
}
@@ -541,6 +547,13 @@ class InternalMmapVectorNoCtor {
uptr capacity() const {
return capacity_;
}
+ void resize(uptr new_size) {
+ Resize(new_size);
+ if (new_size > size_) {
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
@@ -625,34 +638,55 @@ void InternalSort(Container *v, uptr size, Compare comp) {
}
}
-template<class Container, class Value, class Compare>
-uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
- uptr not_found = last + 1;
- while (last >= first) {
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
- else if (comp(val, v[mid]))
- last = mid - 1;
else
- return mid;
+ last = mid;
}
- return not_found;
+ return first;
}
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+const uptr kModuleUUIDSize = 16;
+
// Represents a binary loaded into virtual memory (e.g. this can be an
// executable or a shared object).
class LoadedModule {
public:
- LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
+ LoadedModule()
+ : full_name_(nullptr), base_address_(0), arch_(kModuleArchUnknown) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize]);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
struct AddressRange {
AddressRange *next;
@@ -669,6 +703,8 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
IntrusiveList<AddressRange> ranges_;
};
@@ -789,6 +825,8 @@ struct SignalContext {
is_memory_access(is_memory_access),
write_flag(write_flag) {}
+ static void DumpAllRegisters(void *context);
+
// Creates signal context in a platform-specific manner.
static SignalContext Create(void *siginfo, void *context);
@@ -827,6 +865,15 @@ void AvoidCVE_2016_2143();
INLINE void AvoidCVE_2016_2143() {}
#endif
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
@@ -834,9 +881,4 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr allocated;
-};
-
#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index c95b3580af2c..ca571d1a9fd5 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -30,6 +30,9 @@
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
@@ -67,6 +70,19 @@
#define iconv __bsd_iconv
#endif
+// Platform-specific options.
+#if SANITIZER_MAC
+namespace __sanitizer {
+bool PlatformHasDifferentMemcpyAndMemmove();
+}
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif
@@ -163,6 +179,47 @@
COMMON_INTERCEPT_FUNCTION(fn)
#endif
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
@@ -304,8 +361,14 @@ INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
s2, size, result);
@@ -348,24 +411,30 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
- const char *s1, const char *s2, uptr n,
+ const char *s1, const char *s2, uptr size,
int result)
-INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCaseCmp(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
- s1, s2, n, result);
+ s1, s2, size, result);
return result;
}
@@ -390,7 +459,7 @@ static inline void StrstrCheck(void *ctx, char *r, const char *s1,
#if SANITIZER_INTERCEPT_STRSTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
@@ -413,7 +482,7 @@ INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_STRCASESTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
void *ctx;
@@ -434,7 +503,7 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_MEMMEM
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
const void *s1, SIZE_T len1, const void *s2,
- SIZE_T len2, void *result);
+ SIZE_T len2, void *result)
INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
SIZE_T len2) {
@@ -553,14 +622,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#endif
#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memset(dst, v, size);
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);
- if (common_flags()->intercept_intrin)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- return REAL(memset)(dst, v, size);
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
}
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
@@ -569,16 +633,9 @@ INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memmove(dst, src, size);
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
- return REAL(memmove)(dst, src, size);
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
}
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
@@ -587,25 +644,30 @@ INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
- }
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
- return REAL(memcpy)(dst, src, size);
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
}
-#define INIT_MEMCPY COMMON_INTERCEPT_FUNCTION(memcpy)
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
#else
#define INIT_MEMCPY
#endif
@@ -663,7 +725,16 @@ INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
return internal_memchr(s, c, n);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
void *res = REAL(memchr)(s, c, n);
+#endif
uptr len = res ? (char *)res - (const char *)s + 1 : n;
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
return res;
@@ -1218,12 +1289,12 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#if SANITIZER_INTERCEPT_SCANF
#define INIT_SCANF \
- COMMON_INTERCEPT_FUNCTION(scanf); \
- COMMON_INTERCEPT_FUNCTION(sscanf); \
- COMMON_INTERCEPT_FUNCTION(fscanf); \
- COMMON_INTERCEPT_FUNCTION(vscanf); \
- COMMON_INTERCEPT_FUNCTION(vsscanf); \
- COMMON_INTERCEPT_FUNCTION(vfscanf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
#else
#define INIT_SCANF
#endif
@@ -1396,16 +1467,16 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
#if SANITIZER_INTERCEPT_PRINTF
#define INIT_PRINTF \
- COMMON_INTERCEPT_FUNCTION(printf); \
- COMMON_INTERCEPT_FUNCTION(sprintf); \
- COMMON_INTERCEPT_FUNCTION(snprintf); \
- COMMON_INTERCEPT_FUNCTION(asprintf); \
- COMMON_INTERCEPT_FUNCTION(fprintf); \
- COMMON_INTERCEPT_FUNCTION(vprintf); \
- COMMON_INTERCEPT_FUNCTION(vsprintf); \
- COMMON_INTERCEPT_FUNCTION(vsnprintf); \
- COMMON_INTERCEPT_FUNCTION(vasprintf); \
- COMMON_INTERCEPT_FUNCTION(vfprintf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
#else
#define INIT_PRINTF
#endif
@@ -4174,6 +4245,20 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TEMPNAM
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
@@ -4802,47 +4887,67 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#endif
#if SANITIZER_INTERCEPT_AEABI_MEM
-DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr)
-
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
// Note the argument order.
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT_AEABI_MEM \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
@@ -4861,11 +4966,11 @@ INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
#endif // SANITIZER_INTERCEPT_AEABI_MEM
#if SANITIZER_INTERCEPT___BZERO
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
-
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
#else
#define INIT___BZERO
@@ -5855,6 +5960,72 @@ INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
// FIXME: add other *stat interceptor
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline);
+#else
+#define INIT_UTMPX
+#endif
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
@@ -5999,6 +6170,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_TTYNAME_R;
INIT_TEMPNAM;
INIT_PTHREAD_SETNAME_NP;
INIT_SINCOS;
@@ -6050,4 +6222,6 @@ static void InitializeCommonInterceptors() {
INIT___LXSTAT;
INIT___LXSTAT64;
// FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
}
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 92318cda35fd..12563499c515 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -435,10 +435,6 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
}
static int printf_get_value_size(PrintfDirective *dir) {
- if (dir->convSpecifier == 'm') {
- return sizeof(char *);
- }
-
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
@@ -519,6 +515,9 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
// Dynamic precision
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
int size = printf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("WARNING: unexpected format specifier in printf "
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 959c622a32f9..4ed9afedf84a 100755
--- a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -583,7 +583,8 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
return;
if (request == IOCTL_SIOCGIFCONF) {
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
}
}
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 596f5bcd3173..49ca961f3cb0 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -13,6 +13,7 @@
#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
@@ -46,7 +47,7 @@ void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
@@ -69,12 +70,15 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
SoftRssLimitExceededCallback = Callback;
}
+#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ bool heap_profile = common_flags()->heap_profile;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
+ uptr rss_during_last_reported_profile = 0;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
@@ -116,8 +120,15 @@ void BackgroundThread(void *arg) {
SoftRssLimitExceededCallback(false);
}
}
+ if (heap_profile &&
+ current_rss_mb > rss_during_last_reported_profile * 1.1) {
+ Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
+ __sanitizer_print_memory_profile(90);
+ rss_during_last_reported_profile = current_rss_mb;
+ }
}
}
+#endif
void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize);
@@ -142,7 +153,8 @@ void MaybeStartBackgroudThread() {
!SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb) return;
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
@@ -152,7 +164,7 @@ void MaybeStartBackgroudThread() {
void NOINLINE
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
- PrepareForSandboxing(args);
- if (sandboxing_callback)
- sandboxing_callback();
+ __sanitizer::PrepareForSandboxing(args);
+ if (__sanitizer::sandboxing_callback)
+ __sanitizer::sandboxing_callback();
}
diff --git a/lib/sanitizer_common/sanitizer_common_nolibc.cc b/lib/sanitizer_common/sanitizer_common_nolibc.cc
index e24cf998ec69..ba54c739a9e0 100644
--- a/lib/sanitizer_common/sanitizer_common_nolibc.cc
+++ b/lib/sanitizer_common/sanitizer_common_nolibc.cc
@@ -17,6 +17,9 @@
namespace __sanitizer {
+// The Windows implementations of these functions use the win32 API directly,
+// bypassing libc.
+#if !SANITIZER_WINDOWS
#if SANITIZER_LINUX
bool ShouldLogAfterPrintf() { return false; }
void LogMessageOnPrintf(const char *str) {}
@@ -24,5 +27,10 @@ void LogMessageOnPrintf(const char *str) {}
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+#endif // !SANITIZER_WINDOWS
+
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+void ListOfModules::init() {}
+#endif
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index 51b53d345ab8..ebdee33d7d5b 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -47,6 +47,8 @@
#include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h"
+using namespace __sanitizer;
+
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
@@ -110,7 +112,6 @@ class CoverageData {
uptr *data();
uptr size() const;
- uptr *buffer() const { return pc_buffer; }
private:
struct NamedPcRange {
@@ -125,9 +126,8 @@ class CoverageData {
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
- 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
- 1 << 27);
+ static const uptr kPcArrayMaxSize =
+ FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@@ -143,8 +143,6 @@ class CoverageData {
// Descriptor of the file mapped pc array.
fd_t pc_fd;
- uptr *pc_buffer;
-
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
@@ -216,11 +214,6 @@ void CoverageData::Enable() {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
}
- pc_buffer = nullptr;
- if (common_flags()->coverage_pc_buffer)
- pc_buffer = reinterpret_cast<uptr *>(MmapNoReserveOrDie(
- sizeof(uptr) * kPcArrayMaxSize, "CovInit::pc_buffer"));
-
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
@@ -258,10 +251,6 @@ void CoverageData::Disable() {
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
cc_array = nullptr;
}
- if (pc_buffer) {
- UnmapOrDie(pc_buffer, sizeof(uptr) * kPcArrayMaxSize);
- pc_buffer = nullptr;
- }
if (tr_event_array) {
UnmapOrDie(tr_event_array,
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
@@ -430,7 +419,6 @@ void CoverageData::Add(uptr pc, u32 *guard) {
atomic_load(&pc_array_size, memory_order_acquire));
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
pc_array[idx] = BundlePcAndCounter(pc, counter);
- if (pc_buffer) pc_buffer[counter] = pc;
}
// Registers a pair caller=>callee.
@@ -966,6 +954,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
coverage_data.DumpAll();
+ __sanitizer_dump_trace_pc_guard_coverage();
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
@@ -1019,12 +1008,6 @@ uptr __sanitizer_get_coverage_guards(uptr **data) {
}
SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_coverage_pc_buffer(uptr **data) {
- *data = coverage_data.buffer();
- return __sanitizer_get_total_unique_coverage();
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_number_of_counters() {
return coverage_data.GetNumberOf8bitCounters();
}
@@ -1034,8 +1017,26 @@ uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
// Default empty implementations (weak). Users should redefine them.
+#if !SANITIZER_WINDOWS // weak does not work on Windows.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp() {}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp1() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp2() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_gep() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_indir() {}
+#endif // !SANITIZER_WINDOWS
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
new file mode 100644
index 000000000000..d83b77917bda
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
@@ -0,0 +1,165 @@
+//===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage Controller for Trace PC Guard.
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+using AddressRange = LoadedModule::AddressRange;
+
+namespace {
+
+static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
+
+static fd_t OpenFile(const char* path) {
+ error_t err;
+ fd_t fd = OpenFile(path, WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path, err);
+ return fd;
+}
+
+static void GetCoverageFilename(char* path, const char* name,
+ const char* extension) {
+ CHECK(name);
+ internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
+ common_flags()->coverage_dir, name, internal_getpid(),
+ extension);
+}
+
+static void WriteModuleCoverage(char* file_path, const char* module_name,
+ const uptr* pcs, uptr len) {
+ GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
+ fd_t fd = OpenFile(file_path);
+ WriteToFile(fd, &Magic, sizeof(Magic));
+ WriteToFile(fd, pcs, len * sizeof(*pcs));
+ CloseFile(fd);
+ Printf("SanitizerCoverage: %s %zd PCs written\n", file_path, len);
+}
+
+static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
+ if (!len) return;
+
+ char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));
+
+ internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
+ SortArray(pcs, len);
+
+ bool module_found = false;
+ uptr last_base = 0;
+ uptr module_start_idx = 0;
+
+ for (uptr i = 0; i < len; ++i) {
+ const uptr pc = pcs[i];
+ if (!pc) continue;
+
+ if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: bad pc %x\n", pc);
+ continue;
+ }
+ uptr module_base = pc - pcs[i];
+
+ if (module_base != last_base || !module_found) {
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ i - module_start_idx);
+ }
+
+ last_base = module_base;
+ module_start_idx = i;
+ module_found = true;
+ __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
+ &pcs[i]);
+ }
+ }
+
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ len - module_start_idx);
+ }
+
+ InternalFree(file_path);
+ InternalFree(module_name);
+ InternalFree(pcs);
+}
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ void Initialize() {
+ CHECK(!initialized);
+
+ initialized = true;
+ pc_vector.Initialize(0);
+ }
+
+ void InitTracePcGuard(u32* start, u32* end) {
+ if (!initialized) Initialize();
+ CHECK(!*start);
+ CHECK_NE(start, end);
+
+ u32 i = pc_vector.size();
+ for (u32* p = start; p < end; p++) *p = ++i;
+ pc_vector.resize(i);
+ }
+
+ void TracePcGuard(u32* guard, uptr pc) {
+ atomic_uint32_t* guard_ptr = reinterpret_cast<atomic_uint32_t*>(guard);
+ u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
+ if (!idx) return;
+ // we start indices from 1.
+ pc_vector[idx - 1] = pc;
+ }
+
+ void Dump() {
+ if (!initialized || !common_flags()->coverage) return;
+ __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
+ }
+
+ private:
+ bool initialized;
+ InternalMmapVectorNoCtor<uptr> pc_vector;
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr* pcs, uptr len) {
+ return SanitizerDumpCoverage(pcs, len);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard(u32* guard) {
+ if (!*guard) return;
+ pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard_init(u32* start, u32* end) {
+ if (start == end || *start) return;
+ pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ pc_guard_controller.Dump();
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_dbghelp.h b/lib/sanitizer_common/sanitizer_dbghelp.h
new file mode 100644
index 000000000000..1689edbf92db
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_dbghelp.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index c2f19d425bdb..913ce3cb423e 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -30,11 +30,6 @@ struct FlagDescription {
IntrusiveList<FlagDescription> flag_descriptions;
-// If set, the tool will install its own SEGV signal handler by default.
-#ifndef SANITIZER_NEEDS_SEGV
-# define SANITIZER_NEEDS_SEGV 1
-#endif
-
void CommonFlags::SetDefaults() {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sanitizer_flags.inc"
diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc
index 203f41ca3d2a..43900f87b330 100644
--- a/lib/sanitizer_common/sanitizer_flags.inc
+++ b/lib/sanitizer_common/sanitizer_flags.inc
@@ -75,7 +75,7 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
-COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+COMMON_FLAG(bool, handle_segv, true,
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
@@ -118,6 +118,12 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
+ "Experimental. Only affects a 64-bit allocator. If set, tries to "
+ "release unused memory to the OS, but not more often than this "
+ "interval (in milliseconds). Negative values mean do not attempt "
+ "to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
@@ -144,19 +150,16 @@ COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
-COMMON_FLAG(bool, coverage_pc_buffer, true,
- "If set (and if 'coverage' is set too), the pcs would be collected "
- "in a buffer.")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
- bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
- "default and for sanitizers that don't reserve lots of virtual memory.")
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
@@ -216,7 +219,7 @@ COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, SANITIZER_MAC,
+ bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,
diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h
index 7f43c84c2e7d..174d5e92ba44 100644
--- a/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -46,8 +46,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
@@ -60,6 +64,11 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg, const void *mid, const void *end);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index 720672d2908a..02a1e527312e 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -24,7 +24,7 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
+#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
#else
@@ -32,7 +32,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@@ -296,12 +296,12 @@ inline void Trap() {
}
#else
extern "C" void* _ReturnAddress(void);
+extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
+# pragma intrinsic(_AddressOfReturnAddress)
# define GET_CALLER_PC() (uptr)_ReturnAddress()
// CaptureStackBackTrace doesn't need to know BP on Windows.
-// FIXME: This macro is still used when printing error reports though it's not
-// clear if the BP value is needed in the ASan reports on Windows.
-# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+# define GET_CURRENT_FRAME() (((uptr)_AddressOfReturnAddress()) + sizeof(uptr))
extern "C" void __ud2(void);
# pragma intrinsic(__ud2)
@@ -328,6 +328,17 @@ inline void Trap() {
} // namespace __sanitizer
-using namespace __sanitizer; // NOLINT
+namespace __asan { using namespace __sanitizer; } // NOLINT
+namespace __dsan { using namespace __sanitizer; } // NOLINT
+namespace __dfsan { using namespace __sanitizer; } // NOLINT
+namespace __esan { using namespace __sanitizer; } // NOLINT
+namespace __lsan { using namespace __sanitizer; } // NOLINT
+namespace __msan { using namespace __sanitizer; } // NOLINT
+namespace __tsan { using namespace __sanitizer; } // NOLINT
+namespace __scudo { using namespace __sanitizer; } // NOLINT
+namespace __ubsan { using namespace __sanitizer; } // NOLINT
+namespace __xray { using namespace __sanitizer; } // NOLINT
+namespace __interception { using namespace __sanitizer; } // NOLINT
+
#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
index 155bbc49a548..9c11fb0ad2be 100644
--- a/lib/sanitizer_common/sanitizer_libc.h
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -62,10 +62,12 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
bool mem_is_zero(const char *mem, uptr size);
// I/O
-const fd_t kInvalidFd = (fd_t)-1;
-const fd_t kStdinFd = 0;
-const fd_t kStdoutFd = (fd_t)1;
-const fd_t kStderrFd = (fd_t)2;
+// Define these as macros so we can use them in linker initialized global
+// structs without dynamic initialization.
+#define kInvalidFd ((fd_t)-1)
+#define kStdinFd ((fd_t)0)
+#define kStdoutFd ((fd_t)1)
+#define kStderrFd ((fd_t)2)
uptr internal_ftruncate(fd_t fd, uptr size);
diff --git a/lib/sanitizer_common/sanitizer_libignore.cc b/lib/sanitizer_common/sanitizer_libignore.cc
index 545393966b38..33a1763d28ea 100644
--- a/lib/sanitizer_common/sanitizer_libignore.cc
+++ b/lib/sanitizer_common/sanitizer_libignore.cc
@@ -50,23 +50,23 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
}
// Scan suppressions list and find newly loaded and unloaded libraries.
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedString module(kMaxPathLength);
+ ListOfModules modules;
+ modules.init();
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
- proc_maps.Reset();
- uptr b, e, off, prot;
- while (proc_maps.Next(&b, &e, &off, module.data(), module.size(), &prot)) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- if (TemplateMatch(lib->templ, module.data()) ||
- (lib->real_name &&
- internal_strcmp(lib->real_name, module.data()) == 0)) {
+ for (const auto &mod : modules) {
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (!TemplateMatch(lib->templ, mod.full_name()) &&
+ !(lib->real_name &&
+ internal_strcmp(lib->real_name, mod.full_name()) == 0))
+ continue;
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
" 2 libraries: '%s' and '%s'\n",
- SanitizerToolName, lib->templ, lib->name, module.data());
+ SanitizerToolName, lib->templ, lib->name, mod.full_name());
Die();
}
loaded = true;
@@ -75,13 +75,14 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
VReport(1,
"Matched called_from_lib suppression '%s' against library"
" '%s'\n",
- lib->templ, module.data());
+ lib->templ, mod.full_name());
lib->loaded = true;
- lib->name = internal_strdup(module.data());
+ lib->name = internal_strdup(mod.full_name());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
- code_ranges_[idx].begin = b;
- code_ranges_[idx].end = e;
+ code_ranges_[idx].begin = range.beg;
+ code_ranges_[idx].end = range.end;
atomic_store(&loaded_count_, idx + 1, memory_order_release);
+ break;
}
}
if (lib->loaded && !loaded) {
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 5c5a1a61a657..76cdc72a0f0c 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -99,7 +99,7 @@ const int FUTEX_WAKE = 1;
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) || SANITIZER_MIPS64
extern "C" {
extern void internal_sigreturn();
}
@@ -671,7 +671,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
// Invokes sigaction via a raw syscall with a restorer, but does not support
// all platforms yet.
// We disable for Go simply because we have not yet added to buildgo.sh.
-#if defined(__x86_64__) && !SANITIZER_GO
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
int internal_sigaction_syscall(int signum, const void *act, void *oldact) {
if (act == nullptr)
return internal_sigaction_norestorer(signum, act, oldact);
@@ -801,8 +801,9 @@ bool ThreadLister::GetDirectoryEntries() {
uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
-#if (SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))) || \
- SANITIZER_ANDROID
+#if SANITIZER_ANDROID
+ return 4096;
+#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@@ -1229,7 +1230,7 @@ bool IsHandledDeadlySignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
@@ -1291,10 +1292,6 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
#elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
- // FSR bits 5:0 describe the abort type, and are never 0 (or so it seems).
- // Zero FSR indicates an older kernel that does not pass this information to
- // the userspace.
- if (fsr == 0) return UNKNOWN;
return fsr & FSR_WRITE ? WRITE : READ;
#elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
@@ -1307,6 +1304,10 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
#endif
}
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
@@ -1392,6 +1393,11 @@ void MaybeReexec() {
// No need to re-exec on Linux.
}
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ UNREACHABLE("FindAvailableMemoryRange is not available");
+ return 0;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h
index 526fa4426e34..d4d0f47eed02 100644
--- a/lib/sanitizer_common/sanitizer_linux.h
+++ b/lib/sanitizer_common/sanitizer_linux.h
@@ -42,7 +42,7 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
-#if defined(__x86_64__) && !SANITIZER_GO
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
// Uses a raw system call to avoid interceptors.
int internal_sigaction_syscall(int signum, const void *act, void *oldact);
#endif
diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
index a37bdf118d3c..eb14c970a7fb 100644
--- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -34,6 +34,7 @@
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
+#include <syslog.h>
#if SANITIZER_FREEBSD
#include <pthread_np.h>
@@ -51,8 +52,6 @@
#if SANITIZER_ANDROID && __ANDROID_API__ < 21
#include <android/log.h>
-#else
-#include <syslog.h>
#endif
#if !SANITIZER_ANDROID
@@ -299,7 +298,10 @@ uptr ThreadSelf() {
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__) || defined(__s390__)
+# elif defined(__aarch64__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
@@ -521,6 +523,7 @@ uptr GetRSS() {
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
+ openlog(GetProcessName(), 0, LOG_USER);
atomic_store(&android_log_initialized, 1, memory_order_release);
}
diff --git a/lib/sanitizer_common/sanitizer_linux_mips64.S b/lib/sanitizer_common/sanitizer_linux_mips64.S
new file mode 100644
index 000000000000..8729642aa654
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_linux_mips64.S
@@ -0,0 +1,23 @@
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+
+// Avoid being marked as needing an executable stack:
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+// Further contents are mips64 only:
+#if defined(__linux__) && defined(__mips64)
+
+.section .text
+.set noreorder
+.globl internal_sigreturn
+.type internal_sigreturn, @function
+internal_sigreturn:
+
+ li $v0,5211 // #5211 is for SYS_rt_sigreturn
+ syscall
+
+.size internal_sigreturn, .-internal_sigreturn
+
+#endif // defined(__linux__) && defined(__mips64)
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index 69178c69ebc0..73e018c857e8 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -72,12 +72,23 @@ extern "C" {
#include <unistd.h>
#include <util.h>
-// from <crt_externs.h>, but we don't have that file on iOS
+// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
extern char ***_NSGetArgv(void);
extern char ***_NSGetEnviron(void);
}
+// From <mach/mach_vm.h>, but we don't have that file on iOS.
+extern "C" {
+ extern kern_return_t mach_vm_region_recurse(
+ vm_map_t target_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ natural_t *nesting_depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt);
+}
+
namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
@@ -362,7 +373,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
@@ -387,6 +398,10 @@ bool IsHandledDeadlySignal(int signum) {
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
// Handling fatal signals on watchOS and tvOS devices is disallowed.
return false;
+ if (common_flags()->handle_abort && signum == SIGABRT)
+ return true;
+ if (common_flags()->handle_sigill && signum == SIGILL)
+ return true;
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
@@ -433,6 +448,15 @@ MacosVersion GetMacosVersion() {
return result;
}
+bool PlatformHasDifferentMemcpyAndMemmove() {
+ // On OS X 10.7 memcpy() and memmove() are both resolved
+ // into memmove$VARIANT$sse42.
+ // See also https://github.com/google/sanitizers/issues/34.
+ // TODO(glider): need to check dynamically that memcpy() and memmove() are
+ // actually the same function.
+ return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
+}
+
uptr GetRSS() {
struct task_basic_info info;
unsigned count = TASK_BASIC_INFO_COUNT;
@@ -458,12 +482,12 @@ void *internal_start_thread(void(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static BlockingMutex syslog_lock(LINKER_INITIALIZED);
#endif
void WriteOneLineToSyslog(const char *s) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
syslog_lock.CheckLocked();
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
#endif
@@ -476,7 +500,7 @@ void LogMessageOnPrintf(const char *str) {
}
void LogFullErrorReport(const char *buffer) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Log with os_trace. This will make it into the crash log.
#if SANITIZER_OS_TRACE
if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
@@ -549,7 +573,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
@@ -740,6 +764,96 @@ char **GetArgv() {
return *_NSGetArgv();
}
+uptr FindAvailableMemoryRange(uptr shadow_size,
+ uptr alignment,
+ uptr left_padding) {
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+ // Start searching for available memory region past PAGEZERO, which is
+ // 4KB on 32-bit and 4GB on 64-bit.
+ mach_vm_address_t start_address =
+ (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+
+ mach_vm_address_t address = start_address;
+ mach_vm_address_t free_begin = start_address;
+ kern_return_t kr = KERN_SUCCESS;
+ while (kr == KERN_SUCCESS) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ RegionInfo vminfo;
+ mach_msg_type_number_t count = kRegionInfoSize;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+ if (free_begin != address) {
+ // We found a free region [free_begin..address-1].
+ uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding,
+ alignment);
+ if (shadow_address + shadow_size < (uptr)address) {
+ return shadow_address;
+ }
+ }
+ // Move to the next region.
+ address += vmsize;
+ free_begin = address;
+ }
+
+ // We looked at all free regions and could not find one large enough.
+ return 0;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void SignalContext::DumpAllRegisters(void *context) {
+ Report("Register values:\n");
+
+ ucontext_t *ucontext = (ucontext_t*)context;
+# define DUMPREG64(r) \
+ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG32(r) \
+ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG_(r) Printf(" "); DUMPREG(r);
+# define DUMPREG__(r) Printf(" "); DUMPREG(r);
+# define DUMPREG___(r) Printf(" "); DUMPREG(r);
+
+# if defined(__x86_64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
+ DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
+ DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
+ DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
+# elif defined(__i386__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
+ DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
+# elif defined(__aarch64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
+ DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
+ DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
+ DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
+ DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
+ DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
+ DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
+ DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n");
+# elif defined(__arm__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
+ DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
+ DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
+ DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
+# else
+# error "Unknown architecture"
+# endif
+
+# undef DUMPREG64
+# undef DUMPREG32
+# undef DUMPREG_
+# undef DUMPREG__
+# undef DUMPREG___
+# undef DUMPREG
+}
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_mac.h b/lib/sanitizer_common/sanitizer_mac.h
index 6e2b84f432e5..636d9bfeac8c 100644
--- a/lib/sanitizer_common/sanitizer_mac.h
+++ b/lib/sanitizer_common/sanitizer_mac.h
@@ -39,17 +39,21 @@ char **GetEnviron();
} // namespace __sanitizer
extern "C" {
-static char __crashreporter_info_buff__[kErrorMessageBufferSize] = {};
+static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
+ {};
static const char *__crashreporter_info__ __attribute__((__used__)) =
&__crashreporter_info_buff__[0];
asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
+
+namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
INLINE void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }
+} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
diff --git a/lib/sanitizer_common/sanitizer_malloc_mac.inc b/lib/sanitizer_common/sanitizer_malloc_mac.inc
index 149857c168c6..6fbee07c16cc 100644
--- a/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -46,9 +46,45 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
// This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
mprotect(new_zone, allocated_size, PROT_READ);
}
+ // We're explicitly *NOT* registering the zone.
return new_zone;
}
+INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
+ COMMON_MALLOC_ENTER();
+ // We don't need to do anything here. We're not registering new zones, so we
+ // don't to unregister. Just un-mprotect and free() the zone.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
+ }
+ COMMON_MALLOC_FREE(zone);
+}
+
+extern unsigned malloc_num_zones;
+extern malloc_zone_t **malloc_zones;
+
+// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
+// libmalloc tries to set up a different zone as malloc_zones[0], it will call
+// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
+// make sure we are still the first (default) zone.
+INTERCEPTOR(int, mprotect, void *addr, size_t len, int prot) {
+ if (addr == malloc_zones && prot == PROT_READ) {
+ if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
+ for (unsigned i = 1; i < malloc_num_zones; i++) {
+ if (malloc_zones[i] == &sanitizer_zone) {
+ // Swap malloc_zones[0] and malloc_zones[i].
+ malloc_zones[i] = malloc_zones[0];
+ malloc_zones[0] = &sanitizer_zone;
+ break;
+ }
+ }
+ }
+ }
+ return REAL(mprotect)(addr, len, prot);
+}
+
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
COMMON_MALLOC_ENTER();
return &sanitizer_zone;
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
index 0ce23077a5ea..d9a8e8df1573 100644
--- a/lib/sanitizer_common/sanitizer_platform.h
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -168,7 +168,9 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if defined(__mips64) || defined(__aarch64__)
+# if SANITIZER_ANDROID && defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
@@ -247,4 +249,8 @@
#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
#endif // SANITIZER_PLATFORM_H
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
index a4afc0f12bdb..c4f90aec942e 100644
--- a/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -85,14 +85,7 @@
#define SANITIZER_INTERCEPT_MEMCMP 1
// FIXME: enable memmem on Windows.
#define SANITIZER_INTERCEPT_MEMMEM SI_NOT_WINDOWS
-// The function memchr() contains a jump in the first 6 bytes
-// that is problematic to intercept correctly on Win64.
-// Disable memchr() interception for Win64.
-#if SANITIZER_WINDOWS64
-#define SANITIZER_INTERCEPT_MEMCHR 0
-#else
#define SANITIZER_INTERCEPT_MEMCHR 1
-#endif
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
@@ -242,6 +235,7 @@
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
@@ -318,4 +312,8 @@
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_UTMP SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD
+#define SANITIZER_INTERCEPT_UTMPX SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
+
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_linux.cc b/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
index ed16f639c4e6..46e3b181304e 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -38,6 +38,7 @@
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
+#define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h>
#undef ino_t
@@ -55,6 +56,8 @@
#include <linux/perf_event.h>
#endif
+using namespace __sanitizer;
+
namespace __sanitizer {
#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
@@ -62,7 +65,8 @@ namespace __sanitizer {
} // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
- && !defined(__mips__) && !defined(__s390__)
+ && !defined(__mips__) && !defined(__s390__)\
+ && !defined(__sparc__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
index 137cd9a3c52f..fbde5e17dc63 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -51,6 +51,9 @@
#include <termios.h>
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#include <utmp.h>
+#endif
#if !SANITIZER_IOS
#include <net/route.h>
@@ -59,6 +62,7 @@
#if !SANITIZER_ANDROID
#include <sys/mount.h>
#include <sys/timeb.h>
+#include <utmpx.h>
#endif
#if SANITIZER_LINUX
@@ -284,6 +288,13 @@ namespace __sanitizer {
int shmctl_shm_stat = (int)SHM_STAT;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+#endif
+#if !SANITIZER_ANDROID
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+#endif
+
int map_fixed = MAP_FIXED;
int af_inet = (int)AF_INET;
@@ -937,6 +948,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
+using namespace __sanitizer;
+
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 14bc75046a54..477a0ecbe0e4 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -20,13 +20,17 @@
#if SANITIZER_FREEBSD
// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
-// incroporates the map structure.
+// incorporates the map structure.
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
#else
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
#endif // !SANITIZER_FREEBSD
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@@ -87,6 +91,14 @@ namespace __sanitizer {
#elif defined(__s390x__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__sparc__) && defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+ const unsigned struct_kernel_stat64_sz = 144;
+#elif defined(__sparc__) && !defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@@ -109,7 +121,7 @@ namespace __sanitizer {
#if defined(__powerpc64__) || defined(__s390__)
const unsigned struct___old_kernel_stat_sz = 0;
-#else
+#elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
#endif
@@ -194,6 +206,18 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
+#elif defined(__sparc__)
+#if defined(__arch64__)
+ unsigned mode;
+ unsigned short __pad1;
+#else
+ unsigned short __pad1;
+ unsigned short mode;
+ unsigned short __pad2;
+#endif
+ unsigned short __seq;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
unsigned int mode;
unsigned short __seq;
@@ -217,6 +241,26 @@ namespace __sanitizer {
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
+ #if defined(__sparc__)
+ #if !defined(__arch64__)
+ u32 __pad1;
+ #endif
+ long shm_atime;
+ #if !defined(__arch64__)
+ u32 __pad2;
+ #endif
+ long shm_dtime;
+ #if !defined(__arch64__)
+ u32 __pad3;
+ #endif
+ long shm_ctime;
+ uptr shm_segsz;
+ int shm_cpid;
+ int shm_lpid;
+ unsigned long shm_nattch;
+ unsigned long __glibc_reserved1;
+ unsigned long __glibc_reserved2;
+ #else
#ifndef __powerpc__
uptr shm_segsz;
#elif !defined(__powerpc64__)
@@ -254,6 +298,7 @@ namespace __sanitizer {
uptr __unused4;
uptr __unused5;
#endif
+#endif
};
#elif SANITIZER_FREEBSD
struct __sanitizer_ipc_perm {
@@ -588,7 +633,18 @@ namespace __sanitizer {
__sanitizer_sigset_t sa_mask;
#endif
#ifndef __mips__
+#if defined(__sparc__)
+#if __GLIBC_PREREQ (2, 20)
+ // On sparc glibc 2.19 and earlier sa_flags was unsigned long, and
+ // __glibc_reserved0 didn't exist.
+ int __glibc_reserved0;
int sa_flags;
+#else
+ unsigned long sa_flags;
+#endif
+#else
+ int sa_flags;
+#endif
#endif
#endif
#if SANITIZER_LINUX
@@ -607,7 +663,7 @@ namespace __sanitizer {
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
#elif defined(__mips__)
struct __sanitizer_kernel_sigset_t {
- u8 sig[16];
+ uptr sig[2];
};
#else
struct __sanitizer_kernel_sigset_t {
@@ -616,6 +672,17 @@ namespace __sanitizer {
#endif
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_MIPS
+ struct __sanitizer_kernel_sigaction_t {
+ unsigned int sa_flags;
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ __sanitizer_kernel_sigset_t sa_mask;
+ void (*sa_restorer)(void);
+ };
+#else
struct __sanitizer_kernel_sigaction_t {
union {
void (*handler)(int signo);
@@ -625,6 +692,7 @@ namespace __sanitizer {
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
+#endif
extern uptr sig_ign;
extern uptr sig_dfl;
@@ -794,6 +862,13 @@ namespace __sanitizer {
extern int shmctl_shm_stat;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ extern unsigned struct_utmp_sz;
+#endif
+#if !SANITIZER_ANDROID
+ extern unsigned struct_utmpx_sz;
+#endif
+
extern int map_fixed;
// ioctl arguments
@@ -839,7 +914,8 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
@@ -869,7 +945,16 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index f1e8b50a2cf6..dd62140b5e07 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -44,6 +44,8 @@
#define MAP_NORESERVE 0
#endif
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+
namespace __sanitizer {
u32 GetUid() {
@@ -54,8 +56,12 @@ uptr GetThreadSelf() {
return (uptr)pthread_self();
}
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
- madvise((void*)addr, size, MADV_DONTNEED);
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr page_size = GetPageSizeCached();
+ uptr beg_aligned = RoundUpTo(beg, page_size);
+ uptr end_aligned = RoundDownTo(end, page_size);
+ if (beg_aligned < end_aligned)
+ madvise((void*)beg_aligned, end_aligned - beg_aligned, MADV_DONTNEED);
}
void NoHugePagesInRegion(uptr addr, uptr size) {
@@ -126,11 +132,21 @@ void SleepForMillis(int millis) {
}
void Abort() {
+#if !SANITIZER_GO
+ // If we are handling SIGABRT, unhandle it first.
+ if (IsHandledDeadlySignal(SIGABRT)) {
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ internal_sigaction(SIGABRT, &sigact, nullptr);
+ }
+#endif
+
abort();
}
int Atexit(void (*function)(void)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return atexit(function);
#else
return 0;
@@ -141,7 +157,7 @@ bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
@@ -170,7 +186,6 @@ void UnsetAlternateSignalStack() {
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
if (!IsHandledDeadlySignal(signum))
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index 434ebb93dffa..f394e75b05e6 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -213,7 +213,7 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
-#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
+#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h
index 1fe59ab89532..5c26fb77e686 100644
--- a/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/lib/sanitizer_common/sanitizer_procmaps.h
@@ -35,8 +35,9 @@ class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
~MemoryMappingLayout();
- bool Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection);
+ bool Next(uptr *start, uptr *end, uptr *offset, char filename[],
+ uptr filename_size, uptr *protection, ModuleArch *arch = nullptr,
+ u8 *uuid = nullptr);
void Reset();
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
@@ -65,13 +66,15 @@ class MemoryMappingLayout {
static ProcSelfMapsBuff cached_proc_self_maps_;
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
# elif SANITIZER_MAC
- template<u32 kLCSegment, typename SegmentCommand>
- bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
+ template <u32 kLCSegment, typename SegmentCommand>
+ bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, char filename[],
+ uptr filename_size, ModuleArch *arch, u8 *uuid,
uptr *protection);
int current_image_;
u32 current_magic_;
u32 current_filetype_;
+ ModuleArch current_arch_;
+ u8 current_uuid_[kModuleUUIDSize];
int current_load_cmd_count_;
char *current_load_cmd_addr_;
# endif
diff --git a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc b/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
index 5011b1ff14b2..30216456330e 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
@@ -50,7 +50,9 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
+ CHECK(!arch && "not implemented");
+ CHECK(!uuid && "not implemented");
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
diff --git a/lib/sanitizer_common/sanitizer_procmaps_linux.cc b/lib/sanitizer_common/sanitizer_procmaps_linux.cc
index b6fb7034ded4..fdf85b77a680 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_linux.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -28,7 +28,9 @@ static bool IsOneOf(char c, char c1, char c2) {
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
+ CHECK(!arch && "not implemented");
+ CHECK(!uuid && "not implemented");
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
diff --git a/lib/sanitizer_common/sanitizer_procmaps_mac.cc b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
index 417cc908e247..0dc299c0553a 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_mac.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -19,6 +19,20 @@
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
+// These are not available in older macOS SDKs.
+#ifndef CPU_SUBTYPE_X86_64_H
+#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7S
+#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7K
+#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
+
namespace __sanitizer {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
@@ -53,6 +67,8 @@ void MemoryMappingLayout::Reset() {
current_load_cmd_addr_ = 0;
current_magic_ = 0;
current_filetype_ = 0;
+ current_arch_ = kModuleArchUnknown;
+ internal_memset(current_uuid_, 0, kModuleUUIDSize);
}
// static
@@ -71,11 +87,12 @@ void MemoryMappingLayout::LoadFromCache() {
// and returns the start and end addresses and file offset of the corresponding
// segment.
// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- const char* lc = current_load_cmd_addr_;
+template <u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ ModuleArch *arch, u8 *uuid,
+ uptr *protection) {
+ const char *lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
@@ -97,14 +114,61 @@ bool MemoryMappingLayout::NextSegmentLoad(
internal_strncpy(filename, _dyld_get_image_name(current_image_),
filename_size);
}
+ if (arch) {
+ *arch = current_arch_;
+ }
+ if (uuid) {
+ internal_memcpy(uuid, current_uuid_, kModuleUUIDSize);
+ }
return true;
}
return false;
}
+ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
+ cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ switch (cputype) {
+ case CPU_TYPE_I386:
+ return kModuleArchI386;
+ case CPU_TYPE_X86_64:
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ CHECK(0 && "Invalid subtype of x86_64");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM:
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ CHECK(0 && "Invalid subtype of ARM");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM64:
+ return kModuleArchARM64;
+ default:
+ CHECK(0 && "Invalid CPU type");
+ return kModuleArchUnknown;
+ }
+}
+
+static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
+ const load_command *current_lc = first_lc;
+ while (1) {
+ if (current_lc->cmd == 0) return;
+ if (current_lc->cmd == LC_UUID) {
+ const uuid_command *uuid_lc = (const uuid_command *)current_lc;
+ const uint8_t *uuid = &uuid_lc->uuid[0];
+ internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
+ return;
+ }
+
+ current_lc =
+ (const load_command *)(((char *)current_lc) + current_lc->cmdsize);
+ }
+}
+
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
@@ -113,6 +177,7 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
current_load_cmd_count_ = hdr->ncmds;
current_magic_ = hdr->magic;
current_filetype_ = hdr->filetype;
+ current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
switch (current_magic_) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
@@ -130,20 +195,24 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
}
}
+ FindUUID((const load_command *)current_load_cmd_addr_, &current_uuid_[0]);
+
for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
switch (current_magic_) {
// current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
+ start, end, offset, filename, filename_size, arch, uuid,
+ protection))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
+ start, end, offset, filename, filename_size, arch, uuid,
+ protection))
return true;
break;
}
@@ -159,9 +228,11 @@ void MemoryMappingLayout::DumpListOfModules(
InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, prot;
+ ModuleArch cur_arch;
+ u8 cur_uuid[kModuleUUIDSize];
InternalScopedString module_name(kMaxPathLength);
for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
- module_name.size(), &prot);
+ module_name.size(), &prot, &cur_arch, &cur_uuid[0]);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
@@ -173,7 +244,7 @@ void MemoryMappingLayout::DumpListOfModules(
} else {
modules->push_back(LoadedModule());
cur_module = &modules->back();
- cur_module->set(cur_name, cur_beg);
+ cur_module->set(cur_name, cur_beg, cur_arch, cur_uuid);
}
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
}
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
index ccc22bf0133c..ff8f3fa302da 100644
--- a/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -56,6 +56,7 @@ class Quarantine {
}
uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
+ uptr GetCacheSize() const { return max_cache_size_; }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
index 985193d1ed5e..214dda56df34 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.cc
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -153,9 +153,9 @@ StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
- uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
- IdDescPair::IdComparator);
- if (idx > map_.size())
+ uptr idx =
+ InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
}
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index 59ca927fa5b6..36c98d057bd3 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -82,4 +82,61 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
}
}
+static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
+ uptr module_name_len, uptr *pc_offset) {
+ const char *found_module_name = nullptr;
+ bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
+ pc, &found_module_name, pc_offset);
+
+ if (!ok) return false;
+
+ if (module_name && module_name_len) {
+ internal_strncpy(module_name, found_module_name, module_name_len);
+ module_name[module_name_len - 1] = '\x00';
+ }
+ return true;
+}
+
} // namespace __sanitizer
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size) return;
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ if (!frame) {
+ internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached());
+ RenderFrame(&frame_desc, fmt, 0, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, frame_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
+ char *out_buf, uptr out_buf_size) {
+ if (!out_buf_size) return;
+ out_buf[0] = 0;
+ DataInfo DI;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
+ InternalScopedString data_desc(GetPageSizeCached());
+ RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, data_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc( // NOLINT
+ uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
+ pc_offset);
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
index 669b0ba28265..6fba581bd964 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
@@ -116,6 +116,35 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", DI->line);
+ break;
+ case 'g':
+ buffer->append("%s", DI->name);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index 7f6c5c73b85d..7be1d1977dce 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -59,6 +59,13 @@ void RenderSourceLocation(InternalScopedString *buffer, const char *file,
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, const char *strip_path_prefix);
+// Same as RenderFrame, but for data section (global variables).
+// Accepts %s, %l from above.
+// Also accepts:
+// %g - name of the global variable.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix = "");
+
} // namespace __sanitizer
#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index 1f8861f0516b..eb4c403d3de0 100644
--- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -79,10 +79,10 @@
// thread-local variables used by libc will be shared between the tracer task
// and the thread which spawned it.
-COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
-
namespace __sanitizer {
+COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
StopTheWorldCallback callback;
@@ -190,6 +190,7 @@ void ThreadSuspender::KillAllThreads() {
bool ThreadSuspender::SuspendAllThreads() {
ThreadLister thread_lister(pid_);
bool added_threads;
+ bool first_iteration = true;
do {
// Run through the directory entries once.
added_threads = false;
@@ -199,12 +200,13 @@ bool ThreadSuspender::SuspendAllThreads() {
added_threads = true;
tid = thread_lister.GetNextTID();
}
- if (thread_lister.error()) {
+ if (thread_lister.error() || (first_iteration && !added_threads)) {
// Detach threads and fail.
ResumeAllThreads();
return false;
}
thread_lister.Reset();
+ first_iteration = false;
} while (added_threads);
return true;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 36b4fa91f545..31506fe5c834 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -242,25 +242,21 @@ static const char *ParseFileLineInfo(AddressInfo *info, const char *str) {
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
- // Parse the last :<int>, which must be there.
- char *last_colon = internal_strrchr(file_line_info, ':');
- CHECK(last_colon);
- int line_or_column = internal_atoll(last_colon + 1);
- // Truncate the string at the last colon and find the next-to-last colon.
- *last_colon = '\0';
- last_colon = internal_strrchr(file_line_info, ':');
- if (last_colon && IsDigit(last_colon[1])) {
- // If the second-to-last colon is followed by a digit, it must be the line
- // number, and the previous parsed number was a column.
- info->line = internal_atoll(last_colon + 1);
- info->column = line_or_column;
- *last_colon = '\0';
- } else {
- // Otherwise, we have line info but no column info.
- info->line = line_or_column;
- info->column = 0;
+
+ if (uptr size = internal_strlen(file_line_info)) {
+ char *back = file_line_info + size - 1;
+ for (int i = 0; i < 2; ++i) {
+ while (back > file_line_info && IsDigit(*back)) --back;
+ if (*back != ':' || !IsDigit(back[1])) break;
+ info->column = info->line;
+ info->line = internal_atoll(back + 1);
+ // Truncate the string at the colon to keep only filename.
+ *back = '\0';
+ --back;
+ }
+ ExtractToken(file_line_info, "", &info->file);
}
- ExtractToken(file_line_info, "", &info->file);
+
InternalFree(file_line_info);
return str;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
index d591abca15df..f08cb9f97cf9 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -19,8 +19,6 @@
#include "sanitizer_mac.h"
#include "sanitizer_symbolizer_mac.h"
-namespace __sanitizer {
-
#include <dlfcn.h>
#include <errno.h>
#include <stdlib.h>
@@ -28,12 +26,15 @@ namespace __sanitizer {
#include <unistd.h>
#include <util.h>
+namespace __sanitizer {
+
bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
- stack->info.function = demangled ? internal_strdup(demangled) : nullptr;
+ if (!demangled) return false;
+ stack->info.function = internal_strdup(demangled);
return true;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index 7028da656e15..f50d8b1840ab 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -55,7 +55,7 @@ const char *DemangleCXXABI(const char *name) {
// own demangler (libc++abi's implementation could be adapted so that
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
- if (__cxxabiv1::__cxa_demangle)
+ if (&__cxxabiv1::__cxa_demangle)
if (const char *demangled_name =
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
@@ -101,6 +101,46 @@ const char *DemangleSwiftAndCXX(const char *name) {
return DemangleCXXABI(name);
}
+static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+ infd_[0] = infd[0];
+ infd_[1] = infd[1];
+ outfd_[0] = outfd[0];
+ outfd_[1] = outfd[1];
+ return true;
+}
+
bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (!FileExists(path_)) {
if (!reported_invalid_path_) {
@@ -110,7 +150,18 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
return false;
}
- int pid;
+ int pid = -1;
+
+ int infd[2];
+ internal_memset(&infd, 0, sizeof(infd));
+ int outfd[2];
+ internal_memset(&outfd, 0, sizeof(outfd));
+ if (!CreateTwoHighNumberedPipes(infd, outfd)) {
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ }
+
if (use_forkpty_) {
#if SANITIZER_MAC
fd_t fd = kInvalidFd;
@@ -121,6 +172,10 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
int saved_stderr = dup(STDERR_FILENO);
CHECK_GE(saved_stderr, 0);
+ // We only need one pipe, for stdin of the child.
+ close(outfd[0]);
+ close(outfd[1]);
+
// Use forkpty to disable buffering in the new terminal.
pid = internal_forkpty(&fd);
if (pid == -1) {
@@ -131,6 +186,13 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
} else if (pid == 0) {
// Child subprocess.
+ // infd[0] is the child's reading end.
+ close(infd[1]);
+
+ // Set up stdin to read from the pipe.
+ CHECK_GE(dup2(infd[0], STDIN_FILENO), 0);
+ close(infd[0]);
+
// Restore stderr.
CHECK_GE(dup2(saved_stderr, STDERR_FILENO), 0);
close(saved_stderr);
@@ -141,8 +203,12 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
internal__exit(1);
}
+ // Input for the child, infd[1] is our writing end.
+ output_fd_ = infd[1];
+ close(infd[0]);
+
// Continue execution in parent process.
- input_fd_ = output_fd_ = fd;
+ input_fd_ = fd;
close(saved_stderr);
@@ -156,41 +222,6 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
UNIMPLEMENTED();
#endif // SANITIZER_MAC
} else {
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
- }
- }
- CHECK(infd);
- CHECK(outfd);
-
const char *argv[kArgVMax];
GetArgV(path_, argv);
pid = StartSubprocess(path_, argv, /* stdin */ outfd[0],
@@ -205,6 +236,8 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
output_fd_ = outfd[1];
}
+ CHECK_GT(pid, 0);
+
// Check that symbolizer subprocess started successfully.
SleepForMillis(kSymbolizerStartupTimeMillis);
if (!IsProcessRunning(pid)) {
@@ -463,7 +496,9 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ if (IsReportingOOM()) {
+ VReport(2, "Cannot use internal symbolizer: out of memory\n");
+ } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
index 3cb7e487012d..135823b157de 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -14,15 +14,24 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <dbghelp.h>
-#pragma comment(lib, "dbghelp.lib")
+#include "sanitizer_dbghelp.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
+decltype(::StackWalk64) *StackWalk64;
+decltype(::SymCleanup) *SymCleanup;
+decltype(::SymFromAddr) *SymFromAddr;
+decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+decltype(::SymInitialize) *SymInitialize;
+decltype(::SymSetOptions) *SymSetOptions;
+decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
namespace {
class WinSymbolizerTool : public SymbolizerTool {
@@ -50,6 +59,29 @@ bool TrySymInitialize() {
void InitializeDbgHelpIfNeeded() {
if (is_dbghelp_initialized)
return;
+
+ HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
+ CHECK(dbghelp && "failed to load dbghelp.dll");
+
+#define DBGHELP_IMPORT(name) \
+ do { \
+ name = \
+ reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \
+ CHECK(name != nullptr); \
+ } while (0)
+ DBGHELP_IMPORT(StackWalk64);
+ DBGHELP_IMPORT(SymCleanup);
+ DBGHELP_IMPORT(SymFromAddr);
+ DBGHELP_IMPORT(SymFunctionTableAccess64);
+ DBGHELP_IMPORT(SymGetLineFromAddr64);
+ DBGHELP_IMPORT(SymGetModuleBase64);
+ DBGHELP_IMPORT(SymGetSearchPathW);
+ DBGHELP_IMPORT(SymInitialize);
+ DBGHELP_IMPORT(SymSetOptions);
+ DBGHELP_IMPORT(SymSetSearchPathW);
+ DBGHELP_IMPORT(UnDecorateSymbolName);
+#undef DBGHELP_IMPORT
+
if (!TrySymInitialize()) {
// OK, maybe the client app has called SymInitialize already.
// That's a bit unfortunate for us as all the DbgHelp functions are
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc
index 6e7ddfa64d4b..c2b75e652ce9 100644
--- a/lib/sanitizer_common/sanitizer_thread_registry.cc
+++ b/lib/sanitizer_common/sanitizer_thread_registry.cc
@@ -131,7 +131,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
#else
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index f762731cb639..c4a57f0870cc 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -18,15 +18,16 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <windows.h>
-#include <dbghelp.h>
#include <io.h>
#include <psapi.h>
#include <stdlib.h>
#include "sanitizer_common.h"
+#include "sanitizer_dbghelp.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
@@ -173,10 +174,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
(void)name; // unsupported
-#if SANITIZER_WINDOWS64
- // On Windows64, use MEM_COMMIT would result in error
+#if !SANITIZER_GO && SANITIZER_WINDOWS64
+ // On asan/Windows64, use MEM_COMMIT would result in error
// 1455:ERROR_COMMITMENT_LIMIT.
- // We use exception handler to commit page on demand.
+ // Asan uses exception handler to commit page on demand.
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
#else
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
@@ -220,8 +221,12 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
}
void *MmapNoAccess(uptr size) {
- // FIXME: unsupported.
- return nullptr;
+ void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes (error code: %d)\n",
+ SanitizerToolName, size, size, GetLastError());
+ return res;
}
bool MprotectNoAccess(uptr addr, uptr size) {
@@ -229,14 +234,13 @@ bool MprotectNoAccess(uptr addr, uptr size) {
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
-
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
// This is almost useless on 32-bits.
// FIXME: add madvise-analog when we move to 64-bits.
}
void NoHugePagesInRegion(uptr addr, uptr size) {
- // FIXME: probably similar to FlushUnneededShadowMemory.
+ // FIXME: probably similar to ReleaseMemoryToOS.
}
void DontDumpShadowMemory(uptr addr, uptr length) {
@@ -244,6 +248,26 @@ void DontDumpShadowMemory(uptr addr, uptr length) {
// FIXME: add madvise-analog when we move to 64-bits.
}
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ uptr address = 0;
+ while (true) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return 0;
+
+ if (info.State == MEM_FREE) {
+ uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
+ alignment);
+ if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
+ return shadow_address;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ }
+ return 0;
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MEMORY_BASIC_INFORMATION mbi;
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
@@ -309,7 +333,7 @@ struct ModuleInfo {
uptr end_address;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int CompareModulesBase(const void *pl, const void *pr) {
const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
if (l->base_address < r->base_address)
@@ -319,7 +343,7 @@ int CompareModulesBase(const void *pl, const void *pr) {
#endif
} // namespace
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void DumpProcessMap() {
Report("Dumping process modules:\n");
ListOfModules modules;
@@ -329,8 +353,8 @@ void DumpProcessMap() {
InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
for (size_t i = 0; i < num_modules; ++i) {
module_infos[i].filepath = modules[i].full_name();
- module_infos[i].base_address = modules[i].base_address();
- module_infos[i].end_address = modules[i].ranges().front()->end;
+ module_infos[i].base_address = modules[i].ranges().front()->beg;
+ module_infos[i].end_address = modules[i].ranges().back()->end;
}
qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
CompareModulesBase);
@@ -400,12 +424,10 @@ u64 NanoTime() {
}
void Abort() {
- if (::IsDebuggerPresent())
- __debugbreak();
internal__exit(3);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
// load the image at this address. Therefore, we call it the preferred base. Any
@@ -631,7 +653,13 @@ uptr internal_sched_yield() {
}
void internal__exit(int exitcode) {
- ExitProcess(exitcode);
+ // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
+ // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
+ // so add our own breakpoint here.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ TerminateProcess(GetCurrentProcess(), exitcode);
+ __assume(0);
}
uptr internal_ftruncate(fd_t fd, uptr size) {
@@ -698,7 +726,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
@@ -735,6 +763,8 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
+ InitializeDbgHelpIfNeeded();
+
size = 0;
#if defined(_WIN64)
int machine_type = IMAGE_FILE_MACHINE_AMD64;
@@ -751,8 +781,8 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
- &SymGetModuleBase64, NULL) &&
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
@@ -842,6 +872,10 @@ SignalContext SignalContext::Create(void *siginfo, void *context) {
write_flag);
}
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
// FIXME: Actually implement this function.
CHECK_GT(buf_len, 0);
@@ -882,6 +916,24 @@ bool IsProcessRunning(pid_t pid) {
int WaitForProcess(pid_t pid) { return -1; }
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+
} // namespace __sanitizer
+#if !SANITIZER_GO
+// Workaround to implement weak hooks on Windows. COFF doesn't directly support
+// weak symbols, but it does support /alternatename, which is similar. If the
+// user does not override the hook, we will use this default definition instead
+// of null.
+extern "C" void __sanitizer_print_memory_profile(int top_percent) {}
+
+#ifdef _WIN64
+#pragma comment(linker, "/alternatename:__sanitizer_print_memory_profile=__sanitizer_default_print_memory_profile") // NOLINT
+#else
+#pragma comment(linker, "/alternatename:___sanitizer_print_memory_profile=___sanitizer_default_print_memory_profile") // NOLINT
+#endif
+#endif
+
#endif // _WIN32
diff --git a/lib/sanitizer_common/scripts/gen_dynamic_list.py b/lib/sanitizer_common/scripts/gen_dynamic_list.py
index b8b79b5994b7..1d4230607b92 100755
--- a/lib/sanitizer_common/scripts/gen_dynamic_list.py
+++ b/lib/sanitizer_common/scripts/gen_dynamic_list.py
@@ -19,6 +19,7 @@ import os
import re
import subprocess
import sys
+import platform
new_delete = set([
'_Znam', '_ZnamRKSt9nothrow_t', # operator new[](unsigned long)
@@ -42,14 +43,15 @@ versioned_functions = set(['memcpy', 'pthread_attr_getaffinity_np',
def get_global_functions(library):
functions = []
- nm_proc = subprocess.Popen(['nm', library], stdout=subprocess.PIPE,
+ nm = os.environ.get('NM', 'nm')
+ nm_proc = subprocess.Popen([nm, library], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
nm_out = nm_proc.communicate()[0].decode().split('\n')
if nm_proc.returncode != 0:
- raise subprocess.CalledProcessError(nm_proc.returncode, 'nm')
+ raise subprocess.CalledProcessError(nm_proc.returncode, nm)
func_symbols = ['T', 'W']
# On PowerPC, nm prints function descriptors from .data section.
- if os.uname()[4] in ["powerpc", "ppc64"]:
+ if platform.uname()[4] in ["powerpc", "ppc64"]:
func_symbols += ['D']
for line in nm_out:
cols = line.split(' ')
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
new file mode 100644
index 000000000000..bd315a0c9bd4
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
@@ -0,0 +1,72 @@
+//===-- sanitizer_symbolize.cc ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of weak hooks from sanitizer_symbolizer_posix_libcdep.cc.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdio.h>
+#include <string>
+
+#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
+#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+
+static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
+ static llvm::symbolize::LLVMSymbolizer DefaultSymbolizer;
+ return &DefaultSymbolizer;
+}
+
+namespace __sanitizer {
+int internal_snprintf(char *buffer, unsigned long length, const char *format,
+ ...);
+} // namespace __sanitizer
+
+extern "C" {
+
+typedef uint64_t u64;
+
+bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ auto ResOrErr =
+ getDefaultSymbolizer()->symbolizeInlinedCode(ModuleName, ModuleOffset);
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ }
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return true;
+}
+
+bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ auto ResOrErr =
+ getDefaultSymbolizer()->symbolizeData(ModuleName, ModuleOffset);
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ }
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return true;
+}
+
+void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
+
+int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength) {
+ std::string Result =
+ llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return static_cast<int>(Result.size() + 1);
+}
+
+} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
new file mode 100644
index 000000000000..0a796d91a3d0
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
@@ -0,0 +1,175 @@
+//===-- sanitizer_wrappers.cc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Redirect some functions to sanitizer interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <tuple>
+
+// Need to match ../sanitizer_common/sanitizer_internal_defs.h
+#if defined(ARCH_PPC)
+#define OFF_T unsigned long
+#else
+#define OFF_T unsigned long long
+#endif
+
+namespace __sanitizer {
+unsigned long internal_open(const char *filename, int flags);
+unsigned long internal_open(const char *filename, int flags, unsigned mode);
+unsigned long internal_close(int fd);
+unsigned long internal_stat(const char *path, void *buf);
+unsigned long internal_lstat(const char *path, void *buf);
+unsigned long internal_fstat(int fd, void *buf);
+size_t internal_strlen(const char *s);
+unsigned long internal_mmap(void *addr, unsigned long length, int prot,
+ int flags, int fd, OFF_T offset);
+void *internal_memcpy(void *dest, const void *src, unsigned long n);
+// Used to propagate errno.
+bool internal_iserror(unsigned long retval, int *rverrno = 0);
+} // namespace __sanitizer
+
+namespace {
+
+template <typename T>
+struct GetTypes;
+
+template <typename R, typename... Args>
+struct GetTypes<R(Args...)> {
+ using Result = R;
+ template <size_t i>
+ struct Arg {
+ using Type = typename std::tuple_element<i, std::tuple<Args...>>::type;
+ };
+};
+
+#define LLVM_SYMBOLIZER_GET_FUNC(Function) \
+ ((__interceptor_##Function) \
+ ? (__interceptor_##Function) \
+ : reinterpret_cast<decltype(&Function)>(dlsym(RTLD_NEXT, #Function)))
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR1(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR2(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR3(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR4(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type arg3) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2, arg3); \
+ }
+
+} // namespace
+
+// C-style interface around internal sanitizer libc functions.
+extern "C" {
+
+#define RETURN_OR_SET_ERRNO(T, res) \
+ int rverrno; \
+ if (__sanitizer::internal_iserror(res, &rverrno)) { \
+ errno = rverrno; \
+ return (T)-1; \
+ } \
+ return (T)res;
+
+int open(const char *filename, int flags, ...) {
+ unsigned long res;
+ if (flags | O_CREAT) {
+ va_list va;
+ va_start(va, flags);
+ unsigned mode = va_arg(va, unsigned);
+ va_end(va);
+ res = __sanitizer::internal_open(filename, flags, mode);
+ } else {
+ res = __sanitizer::internal_open(filename, flags);
+ }
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+int close(int fd) {
+ unsigned long res = __sanitizer::internal_close(fd);
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+#define STAT(func, arg, buf) \
+ unsigned long res = __sanitizer::internal_##func(arg, buf); \
+ RETURN_OR_SET_ERRNO(int, res);
+
+int stat(const char *path, struct stat *buf) { STAT(stat, path, buf); }
+
+int lstat(const char *path, struct stat *buf) { STAT(lstat, path, buf); }
+
+int fstat(int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+// Redirect versioned stat functions to the __sanitizer::internal() as well.
+int __xstat(int version, const char *path, struct stat *buf) {
+ STAT(stat, path, buf);
+}
+
+int __lxstat(int version, const char *path, struct stat *buf) {
+ STAT(lstat, path, buf);
+}
+
+int __fxstat(int version, int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+size_t strlen(const char *s) { return __sanitizer::internal_strlen(s); }
+
+void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ unsigned long res = __sanitizer::internal_mmap(
+ addr, (unsigned long)length, prot, flags, fd, (unsigned long long)offset);
+ RETURN_OR_SET_ERRNO(void *, res);
+}
+
+LLVM_SYMBOLIZER_INTERCEPTOR3(read, ssize_t(int, void *, size_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread, ssize_t(int, void *, size_t, off_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread64, ssize_t(int, void *, size_t, off64_t))
+LLVM_SYMBOLIZER_INTERCEPTOR2(realpath, char *(const char *, char *))
+
+} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
new file mode 100755
index 000000000000..07239eb50587
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -0,0 +1,187 @@
+#!/bin/bash -eu
+#
+# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \
+# build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/
+# zlib can be downloaded from from http://www.zlib.net.
+#
+# Script compiles self-contained object file with symbolization code and injects
+# it into the given set of runtime libraries. Script updates only libraries
+# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.
+# Object file is be compiled from LLVM sources with dependencies like libc++ and
+# zlib. Then it internalizes symbols in the file, so that it can be linked
+# into arbitrary programs, avoiding conflicts with the program own symbols and
+# avoiding dependencies on any program symbols. The only acceptable dependencies
+# are libc and __sanitizer::internal_* from sanitizer runtime.
+#
+# Symbols exported by the object file will be used by Sanitizer runtime
+# libraries to symbolize code/data in-process.
+#
+# The script will modify the output directory which is given as the first
+# argument to the script.
+#
+# FIXME: We should really be using a simpler approach to building this object
+# file, and it should be available as a regular cmake rule. Conceptually, we
+# want to be doing "ld -r" followed by "objcopy -G" to create a relocatable
+# object file with only our entry points exposed. However, this does not work at
+# present, see PR30750.
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+SRC_DIR=$(readlink -f $SCRIPT_DIR/..)
+TARGE_DIR=$(readlink -f $1)
+
+LLVM_SRC="${LLVM_SRC:-$SCRIPT_DIR/../../../../../..}"
+LLVM_SRC=$(readlink -f $LLVM_SRC)
+
+if [[ ! -d "${LLVM_SRC}/projects/libcxxabi" ||
+ ! -d "${LLVM_SRC}/projects/libcxx" ]]; then
+ echo "Missing or incomplete LLVM_SRC"
+ exit 1
+fi
+
+if [[ "$ZLIB_SRC" == "" ||
+ ! -x "${ZLIB_SRC}/configure" ||
+ ! -f "${ZLIB_SRC}/zlib.h" ]]; then
+ echo "Missing or incomplete ZLIB_SRC"
+ exit 1
+fi
+ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+
+J="${J:-50}"
+
+CLANG="${CLANG:-`which clang`}"
+CLANG_DIR=$(readlink -f $(dirname "$CLANG"))
+
+BUILD_DIR=$(readlink -f ./symbolizer)
+mkdir -p $BUILD_DIR
+cd $BUILD_DIR
+
+CC=$CLANG_DIR/clang
+CXX=$CLANG_DIR/clang++
+TBLGEN=$CLANG_DIR/llvm-tblgen
+LINK=$CLANG_DIR/llvm-link
+OPT=$CLANG_DIR/opt
+AR=$CLANG_DIR/llvm-ar
+
+for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
+ if [[ ! -x "$F" ]]; then
+ echo "Missing $F"
+ exit 1
+ fi
+done
+
+ZLIB_BUILD=${BUILD_DIR}/zlib
+LIBCXX_BUILD=${BUILD_DIR}/libcxx
+LLVM_BUILD=${BUILD_DIR}/llvm
+SYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer
+
+FLAGS=${FLAGS:-}
+FLAGS="$FLAGS -fPIC -flto -Os -g0 -DNDEBUG"
+
+# Build zlib.
+mkdir -p ${ZLIB_BUILD}
+cd ${ZLIB_BUILD}
+cp -r ${ZLIB_SRC}/* .
+CC=$CC CFLAGS="$FLAGS" RANLIB=/bin/true ./configure --static
+make -j${J} libz.a
+
+# Build and install libcxxabi and libcxx.
+if [[ ! -d ${LIBCXX_BUILD} ]]; then
+ mkdir -p ${LIBCXX_BUILD}
+ cd ${LIBCXX_BUILD}
+ LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined -I${LLVM_SRC}/projects/libcxxabi/include"
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
+ -DLIBCXXABI_ENABLE_THREADS=OFF \
+ -DLIBCXX_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXX_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXX_ENABLE_RTTI=OFF \
+ -DLIBCXX_ENABLE_SHARED=OFF \
+ -DLIBCXX_ENABLE_THREADS=OFF \
+ $LLVM_SRC
+fi
+cd ${LIBCXX_BUILD}
+ninja cxx cxxabi
+
+FLAGS="${FLAGS} -fno-rtti -fno-exceptions"
+
+# Build LLVM.
+if [[ ! -d ${LLVM_BUILD} ]]; then
+ mkdir -p ${LLVM_BUILD}
+ cd ${LLVM_BUILD}
+ LLVM_FLAGS="${FLAGS} -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1"
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DLLVM_TABLEGEN=$TBLGEN \
+ -DLLVM_ENABLE_ZLIB=ON \
+ -DLLVM_ENABLE_TERMINFO=OFF \
+ -DLLVM_ENABLE_THREADS=OFF \
+ $LLVM_SRC
+fi
+cd ${LLVM_BUILD}
+ninja LLVMSymbolize LLVMObject LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC
+
+cd ${BUILD_DIR}
+rm -rf ${SYMBOLIZER_BUILD}
+mkdir ${SYMBOLIZER_BUILD}
+cd ${SYMBOLIZER_BUILD}
+
+for A in $LIBCXX_BUILD/lib/libc++.a \
+ $LIBCXX_BUILD/lib/libc++abi.a \
+ $LLVM_BUILD/lib/libLLVMSymbolize.a \
+ $LLVM_BUILD/lib/libLLVMObject.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
+ $LLVM_BUILD/lib/libLLVMSupport.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
+ $LLVM_BUILD/lib/libLLVMMC.a \
+ $ZLIB_BUILD/libz.a ; do
+ for O in $($AR t $A); do
+ $AR x $A $O
+ mv -f $O "$(basename $A).$O" # Rename to avoid collisions between libs.
+ done
+done
+
+echo "Compiling..."
+SYMBOLIZER_FLAGS="$FLAGS -std=c++11 -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -I${LIBCXX_BUILD}/include/c++/v1"
+$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cc ${SRC_DIR}/sanitizer_wrappers.cc -c
+
+SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
+
+# Merge all the object files together and copy the resulting library back.
+$LINK *.o -o all.bc
+echo "Optimizing..."
+$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
+$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o
+
+echo "Checking undefined symbols..."
+nm -f posix -g symbolizer.o | cut -f 1,2 -d \ | LC_COLLATE=C sort -u > undefined.new
+(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E "^\+[^+]") && \
+ (echo "Failed: unexpected symbols"; exit 1)
+
+arch() {
+ objdump -f $1 | grep -m1 -Po "(?<=file format ).*$"
+}
+
+SYMBOLIZER_FORMAT=$(arch symbolizer.o)
+echo "Injecting $SYMBOLIZER_FORMAT symbolizer..."
+for A in $TARGE_DIR/libclang_rt.*san*.a; do
+ A_FORMAT=$(arch $A)
+ if [[ "$A_FORMAT" != "$SYMBOLIZER_FORMAT" ]] ; then
+ continue
+ fi
+ (nm -u $A 2>/dev/null | grep -E "__sanitizer_symbolize_code" >/dev/null) || continue
+ echo "$A"
+ $AR rcs $A symbolizer.o
+done
+
+echo "Success!"
diff --git a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
new file mode 100644
index 000000000000..033acf7f202a
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -0,0 +1,137 @@
+_GLOBAL_OFFSET_TABLE_ U
+_ZN11__sanitizer13internal_mmapEPvmiiiy U
+_ZN11__sanitizer13internal_openEPKcij U
+_ZN11__sanitizer13internal_statEPKcPv U
+_ZN11__sanitizer14internal_closeEi U
+_ZN11__sanitizer14internal_fstatEiPv U
+_ZN11__sanitizer14internal_lstatEPKcPv U
+_ZN11__sanitizer15internal_strlenEPKc U
+_ZN11__sanitizer16internal_iserrorEmPi U
+_ZN11__sanitizer17internal_snprintfEPcmPKcz U
+__ctype_b_loc U
+__ctype_get_mb_cur_max U
+__cxa_atexit U
+__divdi3 U
+__dso_handle U
+__errno_location U
+__interceptor_pread w
+__interceptor_read w
+__interceptor_realpath w
+__moddi3 U
+__sanitizer_symbolize_code T
+__sanitizer_symbolize_data T
+__sanitizer_symbolize_demangle T
+__sanitizer_symbolize_flush T
+__strdup U
+__udivdi3 U
+__umoddi3 U
+_exit U
+abort U
+access U
+calloc U
+catclose U
+catgets U
+catopen U
+ceil U
+clock_gettime U
+cfgetospeed U
+dl_iterate_phdr U
+dlsym U
+dup2 U
+environ U
+execv U
+exit U
+fclose U
+fflush U
+fileno U
+fopen U
+fork U
+fprintf U
+fputc U
+free U
+freelocale U
+fwrite U
+getc U
+getcwd U
+getenv U
+getpagesize U
+getpid U
+gettimeofday U
+ioctl U
+isatty U
+isprint U
+isupper U
+isxdigit U
+log10 U
+lseek U
+lseek64 U
+malloc U
+mbrlen U
+mbrtowc U
+mbsnrtowcs U
+mbsrtowcs U
+mbtowc U
+memchr U
+memcmp U
+memcpy U
+memmove U
+memset U
+mkdir U
+munmap U
+newlocale U
+perror U
+posix_spawn U
+posix_spawn_file_actions_adddup2 U
+posix_spawn_file_actions_addopen U
+posix_spawn_file_actions_destroy U
+posix_spawn_file_actions_init U
+qsort U
+rand U
+readlink U
+realloc U
+remove U
+setvbuf U
+sigfillset U
+sigprocmask U
+snprintf U
+sprintf U
+srand U
+sscanf U
+stderr U
+stdin U
+stdout U
+strcat U
+strchr U
+strcmp U
+strcpy U
+strdup U
+strerror U
+strerror_r U
+strftime_l U
+strncmp U
+strncpy U
+strrchr U
+strsep U
+strtod_l U
+strtof_l U
+strtol U
+strtold_l U
+strtoll_l U
+strtoull_l U
+tcgetattr U
+uname U
+ungetc U
+unlink U
+uselocale U
+vasprintf U
+vfprintf U
+vsnprintf U
+vsscanf U
+waitpid U
+wcrtomb U
+wcslen U
+wcsnrtombs U
+wmemcpy U
+wmemmove U
+wmemset U
+write U
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
index 0a828dc13486..b66f7563b01e 100644
--- a/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -78,6 +78,11 @@ if(ANDROID)
list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON -pie)
endif()
+if(APPLE)
+ list(APPEND SANITIZER_TEST_CFLAGS_COMMON ${DARWIN_osx_CFLAGS})
+ list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON ${DARWIN_osx_LINKFLAGS})
+endif()
+
# MSVC linker is allocating 1M for the stack by default, which is not
# enough for the unittests. Some unittests require more than 2M.
# The default stack size for clang is 8M.
diff --git a/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc b/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc
new file mode 100644
index 000000000000..3e03c4bddfd7
--- /dev/null
+++ b/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc
@@ -0,0 +1,37 @@
+#include <thread>
+#include <iostream>
+
+const size_t kAllocSize = 16;
+const size_t kInitialNumAllocs = 1 << 10;
+const size_t kPeriodicNumAllocs = 1 << 10;
+const size_t kNumIterations = 1 << 7;
+const size_t kNumThreads = 16;
+
+void Thread() {
+ // int sp;
+ // std::cerr << "Thread starting, sp = " << &sp << std::endl;
+ char *InitialAllocations[kInitialNumAllocs];
+ char *PeriodicaAllocations[kPeriodicNumAllocs];
+ for (auto &p : InitialAllocations) p = new char[kAllocSize];
+ for (size_t i = 0; i < kNumIterations; i++) {
+ for (size_t j = 0; j < kPeriodicNumAllocs; j++) {
+ for (auto &p : PeriodicaAllocations) {
+ p = new char[kAllocSize];
+ *p = 0;
+ }
+ for (auto p : PeriodicaAllocations) delete [] p;
+ }
+ }
+ for (auto p : InitialAllocations) delete [] p;
+}
+
+int main() {
+// Thread();
+// return 0;
+ std::thread *Threads[kNumThreads];
+ for (auto &T : Threads) T = new std::thread(&Thread);
+ for (auto T : Threads) {
+ T->join();
+ delete T;
+ }
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 31eec19c3632..8df5efda674e 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -25,25 +25,73 @@
#include <vector>
#include <set>
+using namespace __sanitizer;
+
// Too slow for debug build
#if !SANITIZER_DEBUG
#if SANITIZER_CAN_USE_ALLOCATOR64
#if SANITIZER_WINDOWS
-static const uptr kAllocatorSpace = 0x10000000000ULL;
-static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-static const u64 kAddressSpaceSize = 1ULL << 40;
+// On Windows 64-bit there is no easy way to find a large enough fixed address
+// space that is always available. Thus, a dynamically allocated address space
+// is used instead (i.e. ~(uptr)0).
+static const uptr kAllocatorSpace = ~(uptr)0;
+static const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_ANDROID && defined(__aarch64__)
+static const uptr kAllocatorSpace = 0x3000000000ULL;
+static const uptr kAllocatorSize = 0x2000000000ULL;
+static const u64 kAddressSpaceSize = 1ULL << 39;
+typedef VeryCompactSizeClassMap SizeClassMap;
#else
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
#endif
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
+struct AP64 { // Allocator Params. Short name for shorter demangled names..
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+struct AP64Dyn {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+struct AP64Compact {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef CompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+struct AP64VeryCompact {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = 1ULL << 37;
+ static const uptr kMetadataSize = 16;
+ typedef VeryCompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+
+typedef SizeClassAllocator64<AP64> Allocator64;
+typedef SizeClassAllocator64<AP64Dyn> Allocator64Dynamic;
+typedef SizeClassAllocator64<AP64Compact> Allocator64Compact;
+typedef SizeClassAllocator64<AP64VeryCompact> Allocator64VeryCompact;
#elif defined(__mips64)
static const u64 kAddressSpaceSize = 1ULL << 40;
#elif defined(__aarch64__)
@@ -70,7 +118,7 @@ typedef SizeClassAllocator32<
template <class SizeClassMap>
void TestSizeClassMap() {
typedef SizeClassMap SCMap;
- // SCMap::Print();
+ SCMap::Print();
SCMap::Validate();
}
@@ -82,6 +130,10 @@ TEST(SanitizerCommon, CompactSizeClassMap) {
TestSizeClassMap<CompactSizeClassMap>();
}
+TEST(SanitizerCommon, VeryCompactSizeClassMap) {
+ TestSizeClassMap<VeryCompactSizeClassMap>();
+}
+
TEST(SanitizerCommon, InternalSizeClassMap) {
TestSizeClassMap<InternalSizeClassMap>();
}
@@ -89,13 +141,15 @@ TEST(SanitizerCommon, InternalSizeClassMap) {
template <class Allocator>
void TestSizeClassAllocator() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
- static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
- 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
+ static const uptr sizes[] = {
+ 1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
+ };
std::vector<void *> allocated;
@@ -154,15 +208,29 @@ void TestSizeClassAllocator() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64) {
TestSizeClassAllocator<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
+ TestSizeClassAllocator<Allocator64Dynamic>();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
}
#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
+ TestSizeClassAllocator<Allocator64VeryCompact>();
+}
+#endif
+#endif
+
TEST(SanitizerCommon, SizeClassAllocator32Compact) {
TestSizeClassAllocator<Allocator32Compact>();
}
@@ -170,7 +238,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Compact) {
template <class Allocator>
void SizeClassAllocatorMetadataStress() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -179,7 +247,7 @@ void SizeClassAllocatorMetadataStress() {
void *allocated[kNumAllocs];
void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = cache.Allocate(a, 1 + i % 50);
+ void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
allocated[i] = x;
meta[i] = a->GetMetaData(x);
}
@@ -190,7 +258,7 @@ void SizeClassAllocatorMetadataStress() {
EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(a, 1 + i % 50, allocated[i]);
+ cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
}
a->TestOnlyUnmap();
@@ -198,31 +266,41 @@ void SizeClassAllocatorMetadataStress() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
+ SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
+#endif
+
+#endif
#endif // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
}
template <class Allocator>
-void SizeClassAllocatorGetBlockBeginStress() {
+void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
- uptr max_size_class = Allocator::kNumClasses - 1;
+ uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
uptr size = Allocator::SizeClassMapT::Size(max_size_class);
- u64 G8 = 1ULL << 33;
// Make sure we correctly compute GetBlockBegin() w/o overflow.
- for (size_t i = 0; i <= G8 / size; i++) {
+ for (size_t i = 0; i <= TotalSize / size; i++) {
void *x = cache.Allocate(a, max_size_class);
void *beg = a->GetBlockBegin(x);
// if ((i & (i - 1)) == 0)
@@ -235,15 +313,30 @@ void SizeClassAllocatorGetBlockBeginStress() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator64>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
+}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
+}
+#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
+ // Does not have > 4Gb for each class.
+ SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
}
TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
}
+#endif
#endif // SANITIZER_CAN_USE_ALLOCATOR64
struct TestMapUnmapCallback {
@@ -255,27 +348,42 @@ int TestMapUnmapCallback::map_count;
int TestMapUnmapCallback::unmap_count;
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
+
+struct AP64WithCallback {
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef TestMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
- typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
- TestMapUnmapCallback> Allocator64WithCallBack;
+ typedef SizeClassAllocator64<AP64WithCallback> Allocator64WithCallBack;
Allocator64WithCallBack *a = new Allocator64WithCallBack;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
AllocatorStats stats;
stats.Init();
- a->AllocateBatch(&stats, &cache, 32);
- EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
+ const size_t kNumChunks = 128;
+ uint32_t chunks[kNumChunks];
+ a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
+ // State + alloc + metadata + freearray.
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
a->TestOnlyUnmap();
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
delete a;
}
#endif
+#endif
TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
@@ -289,7 +397,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback>
Allocator32WithCallBack;
Allocator32WithCallBack *a = new Allocator32WithCallBack;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
@@ -322,20 +430,24 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
template<class Allocator>
void FailInAssertionOnOOM() {
Allocator a;
- a.Init();
+ a.Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
AllocatorStats stats;
stats.Init();
+ const size_t kNumChunks = 128;
+ uint32_t chunks[kNumChunks];
for (int i = 0; i < 1000000; i++) {
- a.AllocateBatch(&stats, &cache, 52);
+ a.GetFromAllocator(&stats, 52, chunks, kNumChunks);
}
a.TestOnlyUnmap();
}
-#if SANITIZER_CAN_USE_ALLOCATOR64
+// Don't test OOM conditions on Win64 because it causes other tests on the same
+// machine to OOM.
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
}
@@ -390,8 +502,10 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
}
CHECK_EQ(a.TotalMemoryUsed(), 0);
- // Test alignments.
- uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
+ // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
+ // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
+ uptr max_alignment =
+ (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
const uptr kNumAlignedAllocs = 100;
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
@@ -424,7 +538,7 @@ void TestCombinedAllocator() {
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
Allocator *a = new Allocator;
- a->Init(/* may_return_null */ true);
+ a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
@@ -480,6 +594,13 @@ TEST(SanitizerCommon, CombinedAllocator64) {
SizeClassAllocatorLocalCache<Allocator64> > ();
}
+TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
+ TestCombinedAllocator<Allocator64Dynamic,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact,
LargeMmapAllocator<>,
@@ -487,6 +608,13 @@ TEST(SanitizerCommon, CombinedAllocator64Compact) {
}
#endif
+TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
+ TestCombinedAllocator<Allocator64VeryCompact,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64VeryCompact> > ();
+}
+#endif
+
TEST(SanitizerCommon, CombinedAllocator32Compact) {
TestCombinedAllocator<Allocator32Compact,
LargeMmapAllocator<>,
@@ -499,7 +627,7 @@ void TestSizeClassAllocatorLocalCache() {
typedef typename AllocatorCache::Allocator Allocator;
Allocator *a = new Allocator();
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -528,16 +656,31 @@ void TestSizeClassAllocatorLocalCache() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64> >();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64Compact> >();
}
#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
+}
+#endif
+#endif
TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
TestSizeClassAllocatorLocalCache<
@@ -559,7 +702,7 @@ void *AllocatorLeakTestWorker(void *arg) {
TEST(SanitizerCommon, AllocatorLeakTest) {
typedef AllocatorCache::Allocator Allocator;
Allocator a;
- a.Init();
+ a.Init(kReleaseToOSIntervalNever);
uptr total_used_memory = 0;
for (int i = 0; i < 100; i++) {
pthread_t t;
@@ -592,7 +735,7 @@ static void *DeallocNewThreadWorker(void *arg) {
// able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator, AllocatorCacheDeallocNewThread) {
AllocatorCache::Allocator allocator;
- allocator.Init();
+ allocator.Init(kReleaseToOSIntervalNever);
AllocatorCache main_cache;
AllocatorCache child_cache;
memset(&main_cache, 0, sizeof(main_cache));
@@ -663,7 +806,7 @@ void IterationTestCallback(uptr chunk, void *arg) {
template <class Allocator>
void TestSizeClassAllocatorIteration() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -703,9 +846,16 @@ void TestSizeClassAllocatorIteration() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
TestSizeClassAllocatorIteration<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
+ TestSizeClassAllocatorIteration<Allocator64Dynamic>();
+}
+#endif
#endif
TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
@@ -777,33 +927,60 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
}
-#if SANITIZER_CAN_USE_ALLOCATOR64
+// Don't test OOM conditions on Win64 because it causes other tests on the same
+// machine to OOM.
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
+typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
+struct AP64_SpecialSizeClassMap {
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef SpecialSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
// Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// In a world where regions are small and chunks are huge...
- typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
- SpecialSizeClassMap> SpecialAllocator64;
+ typedef SizeClassAllocator64<AP64_SpecialSizeClassMap> SpecialAllocator64;
const uptr kRegionSize =
kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
SpecialAllocator64 *a = new SpecialAllocator64;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
// ...one man is on a mission to overflow a region with a series of
// successive allocations.
+
const uptr kClassID = 107;
- const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
+ const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
ASSERT_LT(2 * kAllocationSize, kRegionSize);
ASSERT_GT(3 * kAllocationSize, kRegionSize);
cache.Allocate(a, kClassID);
EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
"The process has exhausted");
+
+ const uptr Class2 = 100;
+ const uptr Size2 = SpecialSizeClassMap::Size(Class2);
+ ASSERT_EQ(Size2 * 8, kRegionSize);
+ char *p[7];
+ for (int i = 0; i < 7; i++) {
+ p[i] = (char*)cache.Allocate(a, Class2);
+ fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
+ p[i][Size2 - 1] = 42;
+ if (i) ASSERT_LT(p[i - 1], p[i]);
+ }
+ EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted");
+ cache.Deallocate(a, Class2, p[0]);
+ cache.Drain(a);
+ ASSERT_EQ(p[6][Size2 - 1], 42);
a->TestOnlyUnmap();
delete a;
}
+
#endif
TEST(SanitizerCommon, TwoLevelByteMap) {
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
index 038d9c543bb7..c6dd3c4bb20c 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
@@ -14,6 +14,7 @@
clang++ -std=c++11 -fno-exceptions -g -fPIC -I. -I../include -Isanitizer \
sanitizer_common/tests/sanitizer_allocator_testlib.cc \
$(\ls sanitizer_common/sanitizer_*.cc | grep -v sanitizer_common_nolibc.cc) \
+ sanitizer_common/sanitizer_linux_x86_64.S \
-shared -lpthread -o testmalloc.so
LD_PRELOAD=`pwd`/testmalloc.so /your/app
*/
@@ -33,13 +34,22 @@ LD_PRELOAD=`pwd`/testmalloc.so /your/app
# define SANITIZER_FREE_HOOK(p)
#endif
-namespace {
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-// typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
-typedef SizeClassAllocator64<~(uptr)0, kAllocatorSize, 0,
- CompactSizeClassMap> PrimaryAllocator;
+struct __AP64 {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef CompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+};
+
+namespace {
+
+typedef SizeClassAllocator64<__AP64> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -59,6 +69,25 @@ static void thread_dtor(void *v) {
allocator.SwallowCache(&cache);
}
+static size_t GetRss() {
+ if (FILE *f = fopen("/proc/self/statm", "r")) {
+ size_t size = 0, rss = 0;
+ fscanf(f, "%zd %zd", &size, &rss);
+ fclose(f);
+ return rss << 12; // rss is in pages.
+ }
+ return 0;
+}
+
+struct AtExit {
+ ~AtExit() {
+ allocator.PrintStats();
+ Printf("RSS: %zdM\n", GetRss() >> 20);
+ }
+};
+
+static AtExit at_exit;
+
static void NOINLINE thread_init() {
if (!global_inited) {
global_inited = true;
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
index 6fc308ad14d4..ebc885db7525 100644
--- a/lib/sanitizer_common/tests/sanitizer_common_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -10,6 +10,8 @@
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
+#include <algorithm>
+
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -170,15 +172,54 @@ bool UptrLess(uptr a, uptr b) {
return a < b;
}
-TEST(SanitizerCommon, InternalBinarySearch) {
+TEST(SanitizerCommon, InternalLowerBound) {
static const uptr kSize = 5;
- uptr arr[kSize];
- for (uptr i = 0; i < kSize; i++) arr[i] = i * i;
+ int arr[kSize];
+ arr[0] = 1;
+ arr[1] = 3;
+ arr[2] = 5;
+ arr[3] = 7;
+ arr[4] = 11;
+
+ EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 0, UptrLess));
+ EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 1, UptrLess));
+ EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 2, UptrLess));
+ EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 3, UptrLess));
+ EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 4, UptrLess));
+ EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 5, UptrLess));
+ EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 6, UptrLess));
+ EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 7, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 8, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 9, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 10, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 11, UptrLess));
+ EXPECT_EQ(5u, InternalLowerBound(arr, 0, kSize, 12, UptrLess));
+}
- for (uptr i = 0; i < kSize; i++)
- ASSERT_EQ(InternalBinarySearch(arr, 0, kSize, i * i, UptrLess), i);
+TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {
+ std::vector<int> data;
+ auto create_item = [] (size_t i, size_t j) {
+ auto v = i * 10000 + j;
+ return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;
+ };
+ for (size_t i = 0; i < 1000; ++i) {
+ data.resize(i);
+ for (size_t j = 0; j < i; ++j) {
+ data[j] = create_item(i, j);
+ }
- ASSERT_EQ(InternalBinarySearch(arr, 0, kSize, 7, UptrLess), kSize + 1);
+ std::sort(data.begin(), data.end());
+
+ for (size_t j = 0; j < i; ++j) {
+ int val = create_item(i, j);
+ for (auto to_find : {val - 1, val, val + 1}) {
+ uptr expected =
+ std::lower_bound(data.begin(), data.end(), to_find) - data.begin();
+ EXPECT_EQ(expected, InternalLowerBound(data.data(), 0, data.size(),
+ to_find, std::less<int>()));
+ }
+ }
+ }
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID
diff --git a/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc b/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
index 13918aff1009..2f0494f82b0a 100644
--- a/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
@@ -256,4 +256,8 @@ TEST(SanitizerCommonInterceptors, Printf) {
// Checks for wide-character strings are not implemented yet.
testPrintf("%ls", 1, 0);
+
+ testPrintf("%m", 0);
+ testPrintf("%m%s", 1, test_buf_size);
+ testPrintf("%s%m%s", 2, test_buf_size, test_buf_size);
}
diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
index 015e32a09e37..625257622bf2 100644
--- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
@@ -25,6 +25,8 @@
# include "sanitizer_common/sanitizer_posix.h"
#endif
+using namespace __sanitizer;
+
// A regression test for internal_memmove() implementation.
TEST(SanitizerCommon, InternalMemmoveRegression) {
char src[] = "Hello World";
diff --git a/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc b/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
index 72df621d07ff..e761f00c56fc 100644
--- a/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
+++ b/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
@@ -15,5 +15,5 @@
#include "sanitizer_common/sanitizer_libc.h"
extern "C" void _start() {
- internal__exit(0);
+ __sanitizer::internal__exit(0);
}
diff --git a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
index ae7c5d531ae7..4ac55c706d6c 100644
--- a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
@@ -52,5 +52,26 @@ TEST(MemoryMappingLayout, DumpListOfModules) {
EXPECT_TRUE(found);
}
+TEST(MemoryMapping, LoadedModuleArchAndUUID) {
+ if (SANITIZER_MAC) {
+ MemoryMappingLayout memory_mapping(false);
+ const uptr kMaxModules = 100;
+ InternalMmapVector<LoadedModule> modules(kMaxModules);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ ModuleArch arch = modules[i].arch();
+ // Darwin unit tests are only run on i386/x86_64/x86_64h.
+ if (SANITIZER_WORDSIZE == 32) {
+ EXPECT_EQ(arch, kModuleArchI386);
+ } else if (SANITIZER_WORDSIZE == 64) {
+ EXPECT_TRUE(arch == kModuleArchX86_64 || arch == kModuleArchX86_64H);
+ }
+ const u8 *uuid = modules[i].uuid();
+ u8 null_uuid[kModuleUUIDSize] = {0};
+ EXPECT_NE(memcmp(null_uuid, uuid, kModuleUUIDSize), 0);
+ }
+ }
+}
+
} // namespace __sanitizer
#endif // !defined(_WIN32)
diff --git a/lib/sanitizer_common/tests/sanitizer_test_main.cc b/lib/sanitizer_common/tests/sanitizer_test_main.cc
index 20f8f53975d0..0da886120c31 100644
--- a/lib/sanitizer_common/tests/sanitizer_test_main.cc
+++ b/lib/sanitizer_common/tests/sanitizer_test_main.cc
@@ -19,6 +19,6 @@ int main(int argc, char **argv) {
argv0 = argv[0];
testing::GTEST_FLAG(death_test_style) = "threadsafe";
testing::InitGoogleTest(&argc, argv);
- SetCommonFlagsDefaults();
+ __sanitizer::SetCommonFlagsDefaults();
return RUN_ALL_TESTS();
}
diff --git a/lib/scudo/CMakeLists.txt b/lib/scudo/CMakeLists.txt
index 6cbb85f83255..332c3a972e6f 100644
--- a/lib/scudo/CMakeLists.txt
+++ b/lib/scudo/CMakeLists.txt
@@ -1,11 +1,10 @@
-add_custom_target(scudo)
-set_target_properties(scudo PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(scudo)
include_directories(..)
set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF SCUDO_CFLAGS)
-list(APPEND SCUDO_CFLAGS -msse4.2 -mcx16)
+append_list_if(COMPILER_RT_HAS_MSSE4_2_FLAG -msse4.2 SCUDO_CFLAGS)
set(SCUDO_SOURCES
scudo_allocator.cpp
@@ -28,6 +27,3 @@ if(COMPILER_RT_HAS_SCUDO)
PARENT_TARGET scudo)
endforeach()
endif()
-
-add_dependencies(compiler-rt scudo)
-
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 3ad499aed102..96cfbdbc1af7 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -22,23 +22,59 @@
#include <limits.h>
#include <pthread.h>
-#include <smmintrin.h>
-#include <atomic>
#include <cstring>
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+# ifdef __SSE4_2__
+# include <smmintrin.h>
+# define HW_CRC32 FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+# endif
+# ifdef __ARM_FEATURE_CRC32
+# include <arm_acle.h>
+# define HW_CRC32 FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+# endif
+#endif
+
namespace __scudo {
+#if SANITIZER_CAN_USE_ALLOCATOR64
const uptr AllocatorSpace = ~0ULL;
-const uptr AllocatorSize = 0x10000000000ULL;
-const uptr MinAlignmentLog = 4; // 16 bytes for x64
-const uptr MaxAlignmentLog = 24;
-
+const uptr AllocatorSize = 0x40000000000ULL;
typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<AllocatorSpace, AllocatorSize, 0, SizeClassMap>
- PrimaryAllocator;
+struct AP {
+ static const uptr kSpaceBeg = AllocatorSpace;
+ static const uptr kSpaceSize = AllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __scudo::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+};
+typedef SizeClassAllocator64<AP> PrimaryAllocator;
+#else
+// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
+// security improvements brought to the 64-bit one. This makes the 32-bit
+// version of Scudo slightly less toughened.
+static const uptr RegionSizeLog = 20;
+static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
+# if SANITIZER_WORDSIZE == 32
+typedef FlatByteMap<NumRegions> ByteMap;
+# elif SANITIZER_WORDSIZE == 64
+typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
+# endif // SANITIZER_WORDSIZE
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
+ RegionSizeLog, ByteMap> PrimaryAllocator;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<> SecondaryAllocator;
+typedef ScudoLargeMmapAllocator SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
ScudoAllocator;
@@ -46,67 +82,75 @@ static ScudoAllocator &getAllocator();
static thread_local Xorshift128Plus Prng;
// Global static cookie, initialized at start-up.
-static u64 Cookie;
+static uptr Cookie;
-enum ChunkState : u8 {
- ChunkAvailable = 0,
- ChunkAllocated = 1,
- ChunkQuarantine = 2
+enum : u8 {
+ CRC32Software = 0,
+ CRC32Hardware = 1,
};
-
-typedef unsigned __int128 PackedHeader;
-typedef std::atomic<PackedHeader> AtomicPackedHeader;
-
-// Our header requires 128-bit of storage on x64 (the only platform supported
-// as of now), which fits nicely with the alignment requirements.
-// Having the offset saves us from using functions such as GetBlockBegin, that
-// is fairly costly. Our first implementation used the MetaData as well, which
-// offers the advantage of being stored away from the chunk itself, but
-// accessing it was costly as well.
-// The header will be atomically loaded and stored using the 16-byte primitives
-// offered by the platform (likely requires cmpxchg16b support).
-struct UnpackedHeader {
- // 1st 8 bytes
- u16 Checksum : 16;
- u64 RequestedSize : 40; // Needed for reallocation purposes.
- u8 State : 2; // available, allocated, or quarantined
- u8 AllocType : 2; // malloc, new, new[], or memalign
- u8 Unused_0_ : 4;
- // 2nd 8 bytes
- u64 Offset : 20; // Offset from the beginning of the backend
- // allocation to the beginning chunk itself, in
- // multiples of MinAlignment. See comment about its
- // maximum value and test in Initialize.
- u64 Unused_1_ : 28;
- u16 Salt : 16;
-};
-
-COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
-
-const uptr ChunkHeaderSize = sizeof(PackedHeader);
+// We default to software CRC32 if the alternatives are not supported, either
+// at compilation or at runtime.
+static atomic_uint8_t HashAlgorithm = { CRC32Software };
+
+// Helper function that will compute the chunk checksum, being passed all the
+// the needed information as uptrs. It will opt for the hardware version of
+// the checksumming function if available.
+INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
+ u32 Crc;
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ if (HashType == CRC32Hardware) {
+ Crc = HW_CRC32(Cookie, Pointer);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = HW_CRC32(Crc, Array[i]);
+ return Crc;
+ }
+#endif
+ Crc = computeCRC32(Cookie, Pointer);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = computeCRC32(Crc, Array[i]);
+ return Crc;
+}
struct ScudoChunk : UnpackedHeader {
// We can't use the offset member of the chunk itself, as we would double
// fetch it without any warranty that it wouldn't have been tampered. To
// prevent this, we work with a local copy of the header.
- void *AllocBeg(UnpackedHeader *Header) {
+ void *getAllocBeg(UnpackedHeader *Header) {
return reinterpret_cast<void *>(
reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
}
- // CRC32 checksum of the Chunk pointer and its ChunkHeader.
- // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
- u16 Checksum(UnpackedHeader *Header) const {
- u64 HeaderHolder[2];
- memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
- u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
- // This is somewhat of a shortcut. The checksum is stored in the 16 least
- // significant bits of the first 8 bytes of the header, hence zero-ing
- // those bits out. It would be more valid to zero the checksum field of the
- // UnpackedHeader, but would require holding an additional copy of it.
- Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
- Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
- return static_cast<u16>(Crc);
+ // Returns the usable size for a chunk, meaning the amount of bytes from the
+ // beginning of the user data to the end of the backend allocated chunk.
+ uptr getUsableSize(UnpackedHeader *Header) {
+ uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header));
+ if (Size == 0)
+ return Size;
+ return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
+ }
+
+ // Compute the checksum of the Chunk pointer and its ChunkHeader.
+ u16 computeChecksum(UnpackedHeader *Header) const {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ u32 Hash = hashUptrs(reinterpret_cast<uptr>(this),
+ HeaderHolder,
+ ARRAY_SIZE(HeaderHolder),
+ atomic_load_relaxed(&HashAlgorithm));
+ return static_cast<u16>(Hash);
+ }
+
+ // Checks the validity of a chunk by verifying its checksum.
+ bool isValid() {
+ UnpackedHeader NewUnpackedHeader;
+ const AtomicPackedHeader *AtomicHeader =
+ reinterpret_cast<const AtomicPackedHeader *>(this);
+ PackedHeader NewPackedHeader =
+ AtomicHeader->load(std::memory_order_relaxed);
+ NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
}
// Loads and unpacks the header, verifying the checksum in the process.
@@ -116,16 +160,14 @@ struct ScudoChunk : UnpackedHeader {
PackedHeader NewPackedHeader =
AtomicHeader->load(std::memory_order_relaxed);
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
- if ((NewUnpackedHeader->Unused_0_ != 0) ||
- (NewUnpackedHeader->Unused_1_ != 0) ||
- (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
+ if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
}
}
// Packs and stores the header, computing the checksum in the process.
void storeHeader(UnpackedHeader *NewUnpackedHeader) {
- NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
+ NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
AtomicPackedHeader *AtomicHeader =
reinterpret_cast<AtomicPackedHeader *>(this);
@@ -137,7 +179,7 @@ struct ScudoChunk : UnpackedHeader {
// we are not being raced by a corruption occurring in another thread.
void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
UnpackedHeader *OldUnpackedHeader) {
- NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
+ NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
AtomicPackedHeader *AtomicHeader =
@@ -154,7 +196,7 @@ struct ScudoChunk : UnpackedHeader {
static bool ScudoInitIsRunning = false;
static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
-static pthread_key_t pkey;
+static pthread_key_t PThreadKey;
static thread_local bool ThreadInited = false;
static thread_local bool ThreadTornDown = false;
@@ -168,7 +210,7 @@ static void teardownThread(void *p) {
// like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
// quarantine and swallowing the cache.
if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
- pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
return;
}
drainQuarantine();
@@ -181,23 +223,30 @@ static void initInternal() {
CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
ScudoInitIsRunning = true;
+ // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
+ if (testCPUFeature(CRC32CPUFeature)) {
+ atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
+ }
+
initFlags();
AllocatorOptions Options;
Options.setFrom(getFlags(), common_flags());
initAllocator(Options);
+ MaybeStartBackgroudThread();
+
ScudoInitIsRunning = false;
}
static void initGlobal() {
- pthread_key_create(&pkey, teardownThread);
+ pthread_key_create(&PThreadKey, teardownThread);
initInternal();
}
static void NOINLINE initThread() {
pthread_once(&GlobalInited, initGlobal);
- pthread_setspecific(pkey, reinterpret_cast<void *>(1));
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
getAllocator().InitCache(&Cache);
ThreadInited = true;
}
@@ -214,7 +263,7 @@ struct QuarantineCallback {
dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
Chunk);
}
- void *Ptr = Chunk->AllocBeg(&Header);
+ void *Ptr = Chunk->getAllocBeg(&Header);
getAllocator().Deallocate(Cache_, Ptr);
}
@@ -245,6 +294,7 @@ static thread_local QuarantineCache ThreadQuarantineCache;
void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
MayReturnNull = cf->allocator_may_return_null;
+ ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
QuarantineSizeMb = f->QuarantineSizeMb;
ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
DeallocationTypeMismatch = f->DeallocationTypeMismatch;
@@ -254,6 +304,7 @@ void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
cf->allocator_may_return_null = MayReturnNull;
+ cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
f->QuarantineSizeMb = QuarantineSizeMb;
f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
f->DeallocationTypeMismatch = DeallocationTypeMismatch;
@@ -262,9 +313,8 @@ void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
}
struct Allocator {
- static const uptr MaxAllowedMallocSize = 1ULL << 40;
- static const uptr MinAlignment = 1 << MinAlignmentLog;
- static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
ScudoAllocator BackendAllocator;
ScudoQuarantine AllocatorQuarantine;
@@ -285,85 +335,129 @@ struct Allocator {
FallbackQuarantineCache(LINKER_INITIALIZED) {}
void init(const AllocatorOptions &Options) {
- // Currently SSE 4.2 support is required. This might change later.
- CHECK(testCPUFeature(SSE4_2)); // for crc32
-
// Verify that the header offset field can hold the maximum offset. In the
- // worst case scenario, the backend allocation is already aligned on
- // MaxAlignment, so in order to store the header and still be aligned, we
- // add an extra MaxAlignment. As a result, the offset from the beginning of
- // the backend allocation to the chunk will be MaxAlignment -
- // ChunkHeaderSize.
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be 0. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
UnpackedHeader Header = {};
- uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
- Header.Offset = MaximumOffset;
- if (Header.Offset != MaximumOffset) {
+ uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
+ SizeClassMap::kMaxSize - MinAlignment);
+ uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
+ MinAlignmentLog;
+ Header.Offset = MaxOffset;
+ if (Header.Offset != MaxOffset) {
dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
"header\n");
}
+ // Verify that we can fit the maximum amount of unused bytes in the header.
+ // Given that the Secondary fits the allocation to a page, the worst case
+ // scenario happens in the Primary. It will depend on the second to last
+ // and last class sizes, as well as the dynamic base for the Primary. The
+ // following is an over-approximation that works for our needs.
+ uptr MaxUnusedBytes = SizeClassMap::kMaxSize - 1 - AlignedChunkHeaderSize;
+ Header.UnusedBytes = MaxUnusedBytes;
+ if (Header.UnusedBytes != MaxUnusedBytes) {
+ dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
+ "the header\n");
+ }
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
DeleteSizeMismatch = Options.DeleteSizeMismatch;
ZeroContents = Options.ZeroContents;
- BackendAllocator.Init(Options.MayReturnNull);
- AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
- static_cast<uptr>(
- Options.ThreadLocalQuarantineSizeKb) << 10);
+ BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
+ AllocatorQuarantine.Init(
+ static_cast<uptr>(Options.QuarantineSizeMb) << 20,
+ static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
BackendAllocator.InitCache(&FallbackAllocatorCache);
Cookie = Prng.Next();
}
+ // Helper function that checks for a valid Scudo chunk.
+ bool isValidPointer(const void *UserPtr) {
+ uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
+ if (!IsAligned(ChunkBeg, MinAlignment)) {
+ return false;
+ }
+ ScudoChunk *Chunk =
+ reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
+ return Chunk->isValid();
+ }
+
// Allocates a chunk.
void *allocate(uptr Size, uptr Alignment, AllocType Type) {
if (UNLIKELY(!ThreadInited))
initThread();
if (!IsPowerOfTwo(Alignment)) {
- dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
+ dieWithMessage("ERROR: alignment is not a power of 2\n");
}
if (Alignment > MaxAlignment)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
if (Alignment < MinAlignment)
Alignment = MinAlignment;
if (Size == 0)
Size = 1;
if (Size >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDie();
- uptr RoundedSize = RoundUpTo(Size, MinAlignment);
- uptr ExtraBytes = ChunkHeaderSize;
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
+
+ uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
if (Alignment > MinAlignment)
- ExtraBytes += Alignment;
- uptr NeededSize = RoundedSize + ExtraBytes;
+ NeededSize += Alignment;
if (NeededSize >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
+
+ // Primary backed and Secondary backed allocations have a different
+ // treatment. We deal with alignment requirements of Primary serviced
+ // allocations here, but the Secondary will take care of its own alignment
+ // needs, which means we also have to work around some limitations of the
+ // combined allocator to accommodate the situation.
+ bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
void *Ptr;
if (LIKELY(!ThreadTornDown)) {
- Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
+ Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
+ FromPrimary ? MinAlignment : Alignment);
} else {
SpinMutexLock l(&FallbackMutex);
Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
- MinAlignment);
+ FromPrimary ? MinAlignment : Alignment);
}
if (!Ptr)
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnOOM();
+
+ uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
+ // If the allocation was serviced by the secondary, the returned pointer
+ // accounts for ChunkHeaderSize to pass the alignment check of the combined
+ // allocator. Adjust it here.
+ if (!FromPrimary) {
+ AllocBeg -= AlignedChunkHeaderSize;
+ if (Alignment > MinAlignment)
+ NeededSize -= Alignment;
+ }
+ uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
+ reinterpret_cast<void *>(AllocBeg));
// If requested, we will zero out the entire contents of the returned chunk.
- if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
- memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
+ if (ZeroContents && FromPrimary)
+ memset(Ptr, 0, ActuallyAllocatedSize);
- uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
- uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
+ uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize;
if (!IsAligned(ChunkBeg, Alignment))
ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
ScudoChunk *Chunk =
- reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+ reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
UnpackedHeader Header = {};
Header.State = ChunkAllocated;
- Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
+ uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg;
+ Header.Offset = Offset >> MinAlignmentLog;
Header.AllocType = Type;
- Header.RequestedSize = Size;
- Header.Salt = static_cast<u16>(Prng.Next());
+ Header.UnusedBytes = ActuallyAllocatedSize - Offset -
+ AlignedChunkHeaderSize - Size;
+ Header.Salt = static_cast<u8>(Prng.Next());
Chunk->storeHeader(&Header);
void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
// TODO(kostyak): hooks sound like a terrible idea security wise but might
@@ -387,13 +481,14 @@ struct Allocator {
"aligned at address %p\n", UserPtr);
}
ScudoChunk *Chunk =
- reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+ reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
UnpackedHeader OldHeader;
Chunk->loadHeader(&OldHeader);
if (OldHeader.State != ChunkAllocated) {
dieWithMessage("ERROR: invalid chunk state when deallocating address "
- "%p\n", Chunk);
+ "%p\n", UserPtr);
}
+ uptr UsableSize = Chunk->getUsableSize(&OldHeader);
UnpackedHeader NewHeader = OldHeader;
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
@@ -407,69 +502,40 @@ struct Allocator {
}
}
}
- uptr Size = NewHeader.RequestedSize;
+ uptr Size = UsableSize - OldHeader.UnusedBytes;
if (DeleteSizeMismatch) {
if (DeleteSize && DeleteSize != Size) {
dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Chunk);
}
}
+
if (LIKELY(!ThreadTornDown)) {
AllocatorQuarantine.Put(&ThreadQuarantineCache,
- QuarantineCallback(&Cache), Chunk, Size);
+ QuarantineCallback(&Cache), Chunk, UsableSize);
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
QuarantineCallback(&FallbackAllocatorCache),
- Chunk, Size);
+ Chunk, UsableSize);
}
}
- // Returns the actual usable size of a chunk. Since this requires loading the
- // header, we will return it in the second parameter, as it can be required
- // by the caller to perform additional processing.
- uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
- if (UNLIKELY(!ThreadInited))
- initThread();
- if (!Ptr)
- return 0;
- uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
- ScudoChunk *Chunk =
- reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
- Chunk->loadHeader(Header);
- // Getting the usable size of a chunk only makes sense if it's allocated.
- if (Header->State != ChunkAllocated) {
- dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
- "address %p\n", Chunk);
- }
- uptr Size =
- BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
- // UsableSize works as malloc_usable_size, which is also what (AFAIU)
- // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
- // means we will return the size of the chunk from the user beginning to
- // the end of the 'user' allocation, hence us subtracting the header size
- // and the offset from the size.
- if (Size == 0)
- return Size;
- return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
- }
-
- // Helper function that doesn't care about the header.
- uptr getUsableSize(const void *Ptr) {
- UnpackedHeader Header;
- return getUsableSize(Ptr, &Header);
- }
-
// Reallocates a chunk. We can save on a new allocation if the new requested
// size still fits in the chunk.
void *reallocate(void *OldPtr, uptr NewSize) {
if (UNLIKELY(!ThreadInited))
initThread();
- UnpackedHeader OldHeader;
- uptr Size = getUsableSize(OldPtr, &OldHeader);
uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
ScudoChunk *Chunk =
- reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+ reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
+ UnpackedHeader OldHeader;
+ Chunk->loadHeader(&OldHeader);
+ if (OldHeader.State != ChunkAllocated) {
+ dieWithMessage("ERROR: invalid chunk state when reallocating address "
+ "%p\n", OldPtr);
+ }
+ uptr Size = Chunk->getUsableSize(&OldHeader);
if (OldHeader.AllocType != FromMalloc) {
dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Chunk);
@@ -477,7 +543,7 @@ struct Allocator {
UnpackedHeader NewHeader = OldHeader;
// The new size still fits in the current chunk.
if (NewSize <= Size) {
- NewHeader.RequestedSize = NewSize;
+ NewHeader.UnusedBytes = Size - NewSize;
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
return OldPtr;
}
@@ -485,29 +551,48 @@ struct Allocator {
// old one.
void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
if (NewPtr) {
- uptr OldSize = OldHeader.RequestedSize;
+ uptr OldSize = Size - OldHeader.UnusedBytes;
memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
if (LIKELY(!ThreadTornDown)) {
AllocatorQuarantine.Put(&ThreadQuarantineCache,
- QuarantineCallback(&Cache), Chunk, OldSize);
+ QuarantineCallback(&Cache), Chunk, Size);
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
QuarantineCallback(&FallbackAllocatorCache),
- Chunk, OldSize);
+ Chunk, Size);
}
}
return NewPtr;
}
+ // Helper function that returns the actual usable size of a chunk.
+ uptr getUsableSize(const void *Ptr) {
+ if (UNLIKELY(!ThreadInited))
+ initThread();
+ if (!Ptr)
+ return 0;
+ uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
+ ScudoChunk *Chunk =
+ reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
+ UnpackedHeader Header;
+ Chunk->loadHeader(&Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (Header.State != ChunkAllocated) {
+ dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
+ Ptr);
+ }
+ return Chunk->getUsableSize(&Header);
+ }
+
void *calloc(uptr NMemB, uptr Size) {
if (UNLIKELY(!ThreadInited))
initThread();
uptr Total = NMemB * Size;
if (Size != 0 && Total / Size != NMemB) // Overflow check
- return BackendAllocator.ReturnNullOrDie();
+ return BackendAllocator.ReturnNullOrDieOnBadRequest();
void *Ptr = allocate(Total, MinAlignment, FromMalloc);
// If ZeroContents, the content of the chunk has already been zero'd out.
if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
@@ -536,7 +621,7 @@ void drainQuarantine() {
}
void *scudoMalloc(uptr Size, AllocType Type) {
- return Instance.allocate(Size, Allocator::MinAlignment, Type);
+ return Instance.allocate(Size, MinAlignment, Type);
}
void scudoFree(void *Ptr, AllocType Type) {
@@ -549,7 +634,7 @@ void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
void *scudoRealloc(void *Ptr, uptr Size) {
if (!Ptr)
- return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
+ return Instance.allocate(Size, MinAlignment, FromMalloc);
if (Size == 0) {
Instance.deallocate(Ptr, 0, FromMalloc);
return nullptr;
@@ -596,7 +681,7 @@ uptr scudoMallocUsableSize(void *Ptr) {
return Instance.getUsableSize(Ptr);
}
-} // namespace __scudo
+} // namespace __scudo
using namespace __scudo;
@@ -626,10 +711,10 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-int __sanitizer_get_ownership(const void *p) {
- return Instance.getUsableSize(p) != 0;
+int __sanitizer_get_ownership(const void *Ptr) {
+ return Instance.isValidPointer(Ptr);
}
-uptr __sanitizer_get_allocated_size(const void *p) {
- return Instance.getUsableSize(p);
+uptr __sanitizer_get_allocated_size(const void *Ptr) {
+ return Instance.getUsableSize(Ptr);
}
diff --git a/lib/scudo/scudo_allocator.h b/lib/scudo/scudo_allocator.h
index 7e9c7886055e..6431a2aa07d7 100644
--- a/lib/scudo/scudo_allocator.h
+++ b/lib/scudo/scudo_allocator.h
@@ -14,14 +14,12 @@
#ifndef SCUDO_ALLOCATOR_H_
#define SCUDO_ALLOCATOR_H_
-#ifndef __x86_64__
-# error "The Scudo hardened allocator currently only supports x86_64."
-#endif
-
#include "scudo_flags.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include <atomic>
+
namespace __scudo {
enum AllocType : u8 {
@@ -31,10 +29,49 @@ enum AllocType : u8 {
FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
};
+enum ChunkState : u8 {
+ ChunkAvailable = 0,
+ ChunkAllocated = 1,
+ ChunkQuarantine = 2
+};
+
+// Our header requires 64 bits of storage. Having the offset saves us from
+// using functions such as GetBlockBegin, that is fairly costly. Our first
+// implementation used the MetaData as well, which offers the advantage of
+// being stored away from the chunk itself, but accessing it was costly as
+// well. The header will be atomically loaded and stored using the 16-byte
+// primitives offered by the platform (likely requires cmpxchg16b support).
+typedef u64 PackedHeader;
+struct UnpackedHeader {
+ u64 Checksum : 16;
+ u64 UnusedBytes : 20; // Needed for reallocation purposes.
+ u64 State : 2; // available, allocated, or quarantined
+ u64 AllocType : 2; // malloc, new, new[], or memalign
+ u64 Offset : 16; // Offset from the beginning of the backend
+ // allocation to the beginning of the chunk itself,
+ // in multiples of MinAlignment. See comment about
+ // its maximum value and test in init().
+ u64 Salt : 8;
+};
+
+typedef std::atomic<PackedHeader> AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
+const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
+const uptr MaxAlignmentLog = 24; // 16 MB
+const uptr MinAlignment = 1 << MinAlignmentLog;
+const uptr MaxAlignment = 1 << MaxAlignmentLog;
+
+const uptr ChunkHeaderSize = sizeof(PackedHeader);
+const uptr AlignedChunkHeaderSize =
+ (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
+
struct AllocatorOptions {
u32 QuarantineSizeMb;
u32 ThreadLocalQuarantineSizeKb;
bool MayReturnNull;
+ s32 ReleaseToOSIntervalMs;
bool DeallocationTypeMismatch;
bool DeleteSizeMismatch;
bool ZeroContents;
@@ -58,6 +95,8 @@ int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
void *scudoAlignedAlloc(uptr Alignment, uptr Size);
uptr scudoMallocUsableSize(void *Ptr);
-} // namespace __scudo
+#include "scudo_allocator_secondary.h"
+
+} // namespace __scudo
#endif // SCUDO_ALLOCATOR_H_
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
new file mode 100644
index 000000000000..b984f0db4dbd
--- /dev/null
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -0,0 +1,188 @@
+//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Secondary Allocator.
+/// This services allocation that are too large to be serviced by the Primary
+/// Allocator. It is directly backed by the memory mapping functions of the
+/// operating system.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
+#define SCUDO_ALLOCATOR_SECONDARY_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+class ScudoLargeMmapAllocator {
+ public:
+
+ void Init(bool AllocatorMayReturnNull) {
+ PageSize = GetPageSizeCached();
+ atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
+ }
+
+ void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ // The Scudo frontend prevents us from allocating more than
+ // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
+ uptr MapSize = Size + SecondaryHeaderSize;
+ MapSize = RoundUpTo(MapSize, PageSize);
+ // Account for 2 guard pages, one before and one after the chunk.
+ MapSize += 2 * PageSize;
+ // The size passed to the Secondary comprises the alignment, if large
+ // enough. Subtract it here to get the requested size, including header.
+ if (Alignment > MinAlignment)
+ Size -= Alignment;
+
+ uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
+ if (MapBeg == ~static_cast<uptr>(0))
+ return ReturnNullOrDieOnOOM();
+ // A page-aligned pointer is assumed after that, so check it now.
+ CHECK(IsAligned(MapBeg, PageSize));
+ uptr MapEnd = MapBeg + MapSize;
+ // The beginning of the user area for that allocation comes after the
+ // initial guard page, and both headers. This is the pointer that has to
+ // abide by alignment requirements.
+ uptr UserBeg = MapBeg + PageSize + HeadersSize;
+
+ // In the rare event of larger alignments, we will attempt to fit the mmap
+ // area better and unmap extraneous memory. This will also ensure that the
+ // offset and unused bytes field of the header stay small.
+ if (Alignment > MinAlignment) {
+ if (UserBeg & (Alignment - 1))
+ UserBeg += Alignment - (UserBeg & (Alignment - 1));
+ CHECK_GE(UserBeg, MapBeg);
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
+ CHECK_GE(NewMapBeg, MapBeg);
+ uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
+ PageSize) + PageSize;
+ CHECK_LE(NewMapEnd, MapEnd);
+ // Unmap the extra memory if it's large enough, on both sides.
+ uptr Diff = NewMapBeg - MapBeg;
+ if (Diff > PageSize)
+ UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
+ Diff = MapEnd - NewMapEnd;
+ if (Diff > PageSize)
+ UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
+ MapBeg = NewMapBeg;
+ MapEnd = NewMapEnd;
+ MapSize = NewMapEnd - NewMapBeg;
+ }
+
+ uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
+ CHECK_LE(UserEnd, MapEnd - PageSize);
+ // Actually mmap the memory, preserving the guard pages on either side.
+ CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
+ MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
+ uptr Ptr = UserBeg - AlignedChunkHeaderSize;
+ SecondaryHeader *Header = getHeader(Ptr);
+ Header->MapBeg = MapBeg;
+ Header->MapSize = MapSize;
+ // The primary adds the whole class size to the stats when allocating a
+ // chunk, so we will do something similar here. But we will not account for
+ // the guard pages.
+ Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
+ Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
+
+ return reinterpret_cast<void *>(UserBeg);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (atomic_load(&MayReturnNull, memory_order_acquire))
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (atomic_load(&MayReturnNull, memory_order_acquire))
+ return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool AllocatorMayReturnNull) {
+ atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
+ }
+
+ void Deallocate(AllocatorStats *Stats, void *Ptr) {
+ SecondaryHeader *Header = getHeader(Ptr);
+ Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
+ Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
+ UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ UNIMPLEMENTED();
+ }
+
+ bool PointerIsMine(const void *Ptr) {
+ UNIMPLEMENTED();
+ }
+
+ uptr GetActuallyAllocatedSize(void *Ptr) {
+ SecondaryHeader *Header = getHeader(Ptr);
+ // Deduct PageSize as MapEnd includes the trailing guard page.
+ uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
+ return MapEnd - reinterpret_cast<uptr>(Ptr);
+ }
+
+ void *GetMetaData(const void *Ptr) {
+ UNIMPLEMENTED();
+ }
+
+ void *GetBlockBegin(const void *Ptr) {
+ UNIMPLEMENTED();
+ }
+
+ void *GetBlockBeginFastLocked(void *Ptr) {
+ UNIMPLEMENTED();
+ }
+
+ void PrintStats() {
+ UNIMPLEMENTED();
+ }
+
+ void ForceLock() {
+ UNIMPLEMENTED();
+ }
+
+ void ForceUnlock() {
+ UNIMPLEMENTED();
+ }
+
+ void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
+ UNIMPLEMENTED();
+ }
+
+ private:
+ // A Secondary allocated chunk header contains the base of the mapping and
+ // its size. Currently, the base is always a page before the header, but
+ // we might want to extend that number in the future based on the size of
+ // the allocation.
+ struct SecondaryHeader {
+ uptr MapBeg;
+ uptr MapSize;
+ };
+ // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
+ COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
+
+ SecondaryHeader *getHeader(uptr Ptr) {
+ return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
+ }
+ SecondaryHeader *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+ }
+
+ const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
+ const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
+ uptr PageSize;
+ atomic_uint8_t MayReturnNull;
+};
+
+#endif // SCUDO_ALLOCATOR_SECONDARY_H_
diff --git a/lib/scudo/scudo_flags.cpp b/lib/scudo/scudo_flags.cpp
index 430dcd2995dd..b9c838107305 100644
--- a/lib/scudo/scudo_flags.cpp
+++ b/lib/scudo/scudo_flags.cpp
@@ -17,9 +17,12 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __scudo_default_options();
+
namespace __scudo {
-Flags scudo_flags_dont_use_directly; // use via flags().
+Flags ScudoFlags; // Use via getFlags().
void Flags::setDefaults() {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
@@ -34,6 +37,10 @@ static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
#undef SCUDO_FLAG
}
+static const char *callGetScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
void initFlags() {
SetCommonFlagsDefaults();
{
@@ -45,11 +52,16 @@ void initFlags() {
Flags *f = getFlags();
f->setDefaults();
- FlagParser scudo_parser;
- RegisterScudoFlags(&scudo_parser, f);
- RegisterCommonFlags(&scudo_parser);
+ FlagParser ScudoParser;
+ RegisterScudoFlags(&ScudoParser, f);
+ RegisterCommonFlags(&ScudoParser);
- scudo_parser.ParseString(GetEnv("SCUDO_OPTIONS"));
+ // Override from user-specified string.
+ const char *ScudoDefaultOptions = callGetScudoDefaultOptions();
+ ScudoParser.ParseString(ScudoDefaultOptions);
+
+ // Override from environment.
+ ScudoParser.ParseString(GetEnv("SCUDO_OPTIONS"));
InitializeCommonFlags();
@@ -75,7 +87,7 @@ void initFlags() {
}
Flags *getFlags() {
- return &scudo_flags_dont_use_directly;
+ return &ScudoFlags;
}
-}
+} // namespace __scudo
diff --git a/lib/scudo/scudo_flags.h b/lib/scudo/scudo_flags.h
index c16f635d366f..d4ae31031fbb 100644
--- a/lib/scudo/scudo_flags.h
+++ b/lib/scudo/scudo_flags.h
@@ -28,6 +28,6 @@ Flags *getFlags();
void initFlags();
-} // namespace __scudo
+} // namespace __scudo
#endif // SCUDO_FLAGS_H_
diff --git a/lib/scudo/scudo_interceptors.cpp b/lib/scudo/scudo_interceptors.cpp
index 9204652d8b7d..735a13196757 100644
--- a/lib/scudo/scudo_interceptors.cpp
+++ b/lib/scudo/scudo_interceptors.cpp
@@ -72,4 +72,4 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX
diff --git a/lib/scudo/scudo_new_delete.cpp b/lib/scudo/scudo_new_delete.cpp
index 172f5659c933..c022bd0acbe7 100644
--- a/lib/scudo/scudo_new_delete.cpp
+++ b/lib/scudo/scudo_new_delete.cpp
@@ -24,7 +24,7 @@ using namespace __scudo;
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
-} // namespace std
+} // namespace std
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) {
diff --git a/lib/scudo/scudo_termination.cpp b/lib/scudo/scudo_termination.cpp
index 32421d3d810f..c441ff3c126a 100644
--- a/lib/scudo/scudo_termination.cpp
+++ b/lib/scudo/scudo_termination.cpp
@@ -13,15 +13,17 @@
///
//===----------------------------------------------------------------------===//
+#include "scudo_utils.h"
+
#include "sanitizer_common/sanitizer_common.h"
namespace __sanitizer {
-bool AddDieCallback(DieCallbackType callback) { return true; }
+bool AddDieCallback(DieCallbackType Callback) { return true; }
-bool RemoveDieCallback(DieCallbackType callback) { return true; }
+bool RemoveDieCallback(DieCallbackType Callback) { return true; }
-void SetUserDieCallback(DieCallbackType callback) {}
+void SetUserDieCallback(DieCallbackType Callback) {}
void NORETURN Die() {
if (common_flags()->abort_on_error)
@@ -31,11 +33,10 @@ void NORETURN Die() {
void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
-void NORETURN CheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
- v1, v2);
- Die();
+void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
+ u64 Value1, u64 Value2) {
+ __scudo::dieWithMessage("Scudo CHECK failed: %s:%d %s (%lld, %lld)\n",
+ File, Line, Condition, Value1, Value2);
}
-} // namespace __sanitizer
+} // namespace __sanitizer
diff --git a/lib/scudo/scudo_utils.cpp b/lib/scudo/scudo_utils.cpp
index 6b96e84ee9d9..c0269ec11efb 100644
--- a/lib/scudo/scudo_utils.cpp
+++ b/lib/scudo/scudo_utils.cpp
@@ -17,6 +17,9 @@
#include <fcntl.h>
#include <stdarg.h>
#include <unistd.h>
+#if defined(__x86_64__) || defined(__i386__)
+# include <cpuid.h>
+#endif
#include <cstring>
@@ -28,14 +31,14 @@ namespace __sanitizer {
extern int VSNPrintf(char *buff, int buff_length, const char *format,
va_list args);
-} // namespace __sanitizer
+} // namespace __sanitizer
namespace __scudo {
FORMAT(1, 2)
-void dieWithMessage(const char *Format, ...) {
- // Our messages are tiny, 128 characters is more than enough.
- char Message[128];
+void NORETURN dieWithMessage(const char *Format, ...) {
+ // Our messages are tiny, 256 characters is more than enough.
+ char Message[256];
va_list Args;
va_start(Args, Format);
__sanitizer::VSNPrintf(Message, sizeof(Message), Format, Args);
@@ -44,60 +47,61 @@ void dieWithMessage(const char *Format, ...) {
Die();
}
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
typedef struct {
u32 Eax;
u32 Ebx;
u32 Ecx;
u32 Edx;
-} CPUIDInfo;
+} CPUIDRegs;
-static void getCPUID(CPUIDInfo *info, u32 leaf, u32 subleaf)
+static void getCPUID(CPUIDRegs *Regs, u32 Level)
{
- asm volatile("cpuid"
- : "=a" (info->Eax), "=b" (info->Ebx), "=c" (info->Ecx), "=d" (info->Edx)
- : "a" (leaf), "c" (subleaf)
- );
+ __get_cpuid(Level, &Regs->Eax, &Regs->Ebx, &Regs->Ecx, &Regs->Edx);
}
-// Returns true is the CPU is a "GenuineIntel" or "AuthenticAMD"
-static bool isSupportedCPU()
-{
- CPUIDInfo Info;
-
- getCPUID(&Info, 0, 0);
- if (memcmp(reinterpret_cast<char *>(&Info.Ebx), "Genu", 4) == 0 &&
- memcmp(reinterpret_cast<char *>(&Info.Edx), "ineI", 4) == 0 &&
- memcmp(reinterpret_cast<char *>(&Info.Ecx), "ntel", 4) == 0) {
- return true;
- }
- if (memcmp(reinterpret_cast<char *>(&Info.Ebx), "Auth", 4) == 0 &&
- memcmp(reinterpret_cast<char *>(&Info.Edx), "enti", 4) == 0 &&
- memcmp(reinterpret_cast<char *>(&Info.Ecx), "cAMD", 4) == 0) {
- return true;
+CPUIDRegs getCPUFeatures() {
+ CPUIDRegs VendorRegs = {};
+ getCPUID(&VendorRegs, 0);
+ bool IsIntel =
+ (VendorRegs.Ebx == signature_INTEL_ebx) &&
+ (VendorRegs.Edx == signature_INTEL_edx) &&
+ (VendorRegs.Ecx == signature_INTEL_ecx);
+ bool IsAMD =
+ (VendorRegs.Ebx == signature_AMD_ebx) &&
+ (VendorRegs.Edx == signature_AMD_edx) &&
+ (VendorRegs.Ecx == signature_AMD_ecx);
+ // Default to an empty feature set if not on a supported CPU.
+ CPUIDRegs FeaturesRegs = {};
+ if (IsIntel || IsAMD) {
+ getCPUID(&FeaturesRegs, 1);
}
- return false;
+ return FeaturesRegs;
}
-bool testCPUFeature(CPUFeature feature)
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+bool testCPUFeature(CPUFeature Feature)
{
- static bool InfoInitialized = false;
- static CPUIDInfo CPUInfo = {};
-
- if (InfoInitialized == false) {
- if (isSupportedCPU() == true)
- getCPUID(&CPUInfo, 1, 0);
- else
- UNIMPLEMENTED();
- InfoInitialized = true;
- }
- switch (feature) {
- case SSE4_2:
- return ((CPUInfo.Ecx >> 20) & 0x1) != 0;
+ static CPUIDRegs FeaturesRegs = getCPUFeatures();
+
+ switch (Feature) {
+ case CRC32CPUFeature: // CRC32 is provided by SSE 4.2.
+ return !!(FeaturesRegs.Ecx & bit_SSE4_2);
default:
break;
}
return false;
}
+#else
+bool testCPUFeature(CPUFeature Feature) {
+ return false;
+}
+#endif // defined(__x86_64__) || defined(__i386__)
// readRetry will attempt to read Count bytes from the Fd specified, and if
// interrupted will retry to read additional bytes to reach Count.
@@ -117,17 +121,77 @@ static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) {
return AmountRead;
}
-// Default constructor for Xorshift128Plus seeds the state with /dev/urandom
-Xorshift128Plus::Xorshift128Plus() {
+static void fillRandom(u8 *Data, ssize_t Size) {
int Fd = open("/dev/urandom", O_RDONLY);
- bool Success = readRetry(Fd, reinterpret_cast<u8 *>(&State_0_),
- sizeof(State_0_)) == sizeof(State_0_);
- Success &= readRetry(Fd, reinterpret_cast<u8 *>(&State_1_),
- sizeof(State_1_)) == sizeof(State_1_);
+ if (Fd < 0) {
+ dieWithMessage("ERROR: failed to open /dev/urandom.\n");
+ }
+ bool Success = readRetry(Fd, Data, Size) == Size;
close(Fd);
if (!Success) {
dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n");
}
}
-} // namespace __scudo
+// Default constructor for Xorshift128Plus seeds the state with /dev/urandom.
+// TODO(kostyak): investigate using getrandom() if available.
+Xorshift128Plus::Xorshift128Plus() {
+ fillRandom(reinterpret_cast<u8 *>(State), sizeof(State));
+}
+
+const static u32 CRC32Table[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+u32 computeCRC32(u32 Crc, uptr Data)
+{
+ for (uptr i = 0; i < sizeof(Data); i++) {
+ Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
+ Data >>= 8;
+ }
+ return Crc;
+}
+
+} // namespace __scudo
diff --git a/lib/scudo/scudo_utils.h b/lib/scudo/scudo_utils.h
index 07394ff4b91d..f93f26ef122a 100644
--- a/lib/scudo/scudo_utils.h
+++ b/lib/scudo/scudo_utils.h
@@ -28,11 +28,11 @@ inline Dest bit_cast(const Source& source) {
return dest;
}
-void dieWithMessage(const char *Format, ...);
+void NORETURN dieWithMessage(const char *Format, ...);
-enum CPUFeature {
- SSE4_2 = 0,
- ENUM_CPUFEATURE_MAX
+enum CPUFeature {
+ CRC32CPUFeature = 0,
+ MaxCPUFeature,
};
bool testCPUFeature(CPUFeature feature);
@@ -42,18 +42,20 @@ struct Xorshift128Plus {
public:
Xorshift128Plus();
u64 Next() {
- u64 x = State_0_;
- const u64 y = State_1_;
- State_0_ = y;
+ u64 x = State[0];
+ const u64 y = State[1];
+ State[0] = y;
x ^= x << 23;
- State_1_ = x ^ y ^ (x >> 17) ^ (y >> 26);
- return State_1_ + y;
+ State[1] = x ^ y ^ (x >> 17) ^ (y >> 26);
+ return State[1] + y;
}
private:
- u64 State_0_;
- u64 State_1_;
+ u64 State[2];
};
-} // namespace __scudo
+// Software CRC32 functions, to be used when SSE 4.2 support is not detected.
+u32 computeCRC32(u32 Crc, uptr Data);
+
+} // namespace __scudo
#endif // SCUDO_UTILS_H_
diff --git a/lib/stats/stats_client.cc b/lib/stats/stats_client.cc
index fa4b2d909f5c..5caf09728f73 100644
--- a/lib/stats/stats_client.cc
+++ b/lib/stats/stats_client.cc
@@ -16,6 +16,7 @@
//===----------------------------------------------------------------------===//
#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <dlfcn.h>
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index 7c84e0aa8d85..b26a884b1a4c 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -17,7 +17,7 @@ endif()
set(TSAN_RTL_CFLAGS ${TSAN_CFLAGS})
append_list_if(COMPILER_RT_HAS_MSSE3_FLAG -msse3 TSAN_RTL_CFLAGS)
-append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=512
+append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=530
TSAN_RTL_CFLAGS)
append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
TSAN_RTL_CFLAGS)
@@ -96,8 +96,7 @@ set(TSAN_HEADERS
rtl/tsan_vector.h)
set(TSAN_RUNTIME_LIBRARIES)
-add_custom_target(tsan)
-set_target_properties(tsan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(tsan)
if(APPLE)
set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
@@ -160,6 +159,11 @@ else()
# Pass ASM file directly to the C++ compiler.
set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
LANGUAGE C)
+ elseif(arch MATCHES "mips64|mips64le")
+ set(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S)
+ # Pass ASM file directly to the C++ compiler.
+ set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
+ LANGUAGE C)
else()
set(TSAN_ASM_SOURCES)
endif()
@@ -193,8 +197,6 @@ else()
endforeach()
endif()
-add_dependencies(compiler-rt tsan)
-
# Make sure that non-platform-specific files don't include any system headers.
# FreeBSD does not install a number of Clang-provided headers for the compiler
# in the base system due to incompatibilities between FreeBSD's and Clang's
diff --git a/lib/tsan/go/build.bat b/lib/tsan/go/build.bat
index 3ada9ab116f1..3a64a2413b97 100644
--- a/lib/tsan/go/build.bat
+++ b/lib/tsan/go/build.bat
@@ -1,4 +1,4 @@
type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_rtl_proc.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc ..\..\sanitizer_common\sanitizer_termination.cc > gotsan.cc
-gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
+gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index 834e325bcb0a..42d479064c49 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -113,7 +113,7 @@ for F in $SRCS; do
cat $F >> $DIR/gotsan.cc
done
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
if [ "$DEBUG" = "" ]; then
FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer"
else
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index bc0d55304339..34625c8af0b0 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -271,6 +271,11 @@ void __tsan_go_ignore_sync_end(ThreadState *thr) {
ThreadIgnoreSyncEnd(thr, 0);
}
+void __tsan_report_count(u64 *pn) {
+ Lock lock(&ctx->report_mtx);
+ *pn = ctx->nreported;
+}
+
} // extern "C"
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 1e2050d1f203..32435adfdf33 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -82,7 +82,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc
index ac24c89be7da..d9fb6861bc0c 100644
--- a/lib/tsan/rtl/tsan_debugging.cc
+++ b/lib/tsan/rtl/tsan_debugging.cc
@@ -15,6 +15,8 @@
#include "tsan_report.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
using namespace __tsan;
static const char *ReportTypeDescription(ReportType typ) {
@@ -160,3 +162,78 @@ int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
*tid = rep->unique_tids[idx];
return 1;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(addr)) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(addr)) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry->Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry->Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ uptr *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index cdc23d0a7e49..55580a5c4436 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -29,7 +29,7 @@
#endif
#ifndef TSAN_CONTAINS_UBSAN
-# if CAN_SANITIZE_UB && !defined(SANITIZER_GO)
+# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
# else
# define TSAN_CONTAINS_UBSAN 0
@@ -38,19 +38,9 @@
namespace __tsan {
-#ifdef SANITIZER_GO
-const bool kGoMode = true;
-const bool kCppMode = false;
-const char *const kTsanOptionsEnv = "GORACE";
-#else
-const bool kGoMode = false;
-const bool kCppMode = true;
-const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
-#endif
-
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 93f598616f3a..d8d4746ab59b 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -61,7 +61,7 @@ void InitializeFlags(Flags *f, const char *env) {
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.allow_addr2line = true;
- if (kGoMode) {
+ if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
cf.abort_on_error = false;
// Go does not have mutexes.
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
index 4fb443612c42..071cf427d23b 100644
--- a/lib/tsan/rtl/tsan_flags.inc
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -61,8 +61,9 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ int, history_size, SANITIZER_GO ? 1 : 3,
"Per-thread history size, controls how many previous memory accesses "
"are remembered per thread. Possible values are [0..7]. "
"history_size=0 amounts to 32K memory accesses. Each next value doubles "
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index fb6227651d21..a3a50e13f9bf 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -88,8 +88,6 @@ extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
-extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
- __sanitizer_sigset_t *oldset);
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
@@ -485,6 +483,8 @@ static void LongJmp(ThreadState *thr, uptr *env) {
#elif defined(SANITIZER_LINUX)
# ifdef __aarch64__
uptr mangled_sp = env[13];
+# elif defined(__mips64)
+ uptr mangled_sp = env[1];
# else
uptr mangled_sp = env[6];
# endif
@@ -1762,6 +1762,31 @@ TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
namespace __tsan {
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
@@ -1833,7 +1858,8 @@ void ProcessPendingSignals(ThreadState *thr) {
atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
internal_sigfillset(&sctx->emptyset);
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->emptyset, &sctx->oldset));
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
for (int sig = 0; sig < kSigCount; sig++) {
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
@@ -1842,7 +1868,8 @@ void ProcessPendingSignals(ThreadState *thr) {
&signal->siginfo, &signal->ctx);
}
}
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sctx->oldset, 0));
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
}
@@ -1958,11 +1985,6 @@ TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
return old.sa_handler;
}
-TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
- SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
- return REAL(sigsuspend)(mask);
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2553,6 +2575,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
TSAN_INTERCEPT(raise);
TSAN_INTERCEPT(kill);
TSAN_INTERCEPT(pthread_kill);
diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc
index 593963825c58..fc5eb0499076 100644
--- a/lib/tsan/rtl/tsan_interceptors_mac.cc
+++ b/lib/tsan/rtl/tsan_interceptors_mac.cc
@@ -119,24 +119,23 @@ OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
-#define OSATOMIC_INTERCEPTOR_BITOP(f, op, m, mo) \
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
char *byte_ptr = ((char *)ptr) + (n >> 3); \
- char bit_index = n & 7; \
- char mask = m; \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
char orig_byte = op((a8 *)byte_ptr, mask, mo); \
- return orig_byte & mask; \
+ return orig_byte & bit; \
}
-#define OSATOMIC_INTERCEPTORS_BITOP(f, op, m) \
- OSATOMIC_INTERCEPTOR_BITOP(f, op, m, kMacOrderNonBarrier) \
- OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, m, kMacOrderBarrier)
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
-OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or,
- 0x80u >> bit_index)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
- ~(0x80u >> bit_index))
+ true)
TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
size_t offset) {
@@ -298,18 +297,20 @@ struct fake_shared_weak_count {
};
} // namespace
-// This adds a libc++ interceptor for:
+// The following code adds libc++ interceptors for:
// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// bool __shared_count::__release_shared() _NOEXCEPT;
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
-// destructor code. This interceptor re-implements the whole function so that
+// destructor code. These interceptors re-implements the whole functions so that
// the mo_acq_rel semantics of the atomic decrement are visible.
//
-// Unfortunately, this interceptor cannot simply Acquire/Release some sync
+// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
// the sync and the destruction of the object. Calling both under a lock will
// not work because the destructor can invoke this interceptor again (and even
// in a different thread, so recursive locks don't help).
+
STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
@@ -328,6 +329,47 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
}
}
+STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ return true;
+ }
+ return false;
+}
+
+namespace {
+struct call_once_callback_args {
+ void (*orig_func)(void *arg);
+ void *orig_arg;
+ void *flag;
+};
+
+void call_once_callback_wrapper(void *arg) {
+ call_once_callback_args *new_args = (call_once_callback_args *)arg;
+ new_args->orig_func(new_args->orig_arg);
+ __tsan_release(new_args->flag);
+}
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __call_once(volatile unsigned long&, void*, void(*)(void*));
+// C++11 call_once is implemented via an internal function __call_once which is
+// inside libc++.dylib, and the atomic release store inside it is thus
+// TSan-invisible. To avoid false positives, this interceptor wraps the callback
+// function and performs an explicit Release after the user code has run.
+STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
+ void *arg, void (*func)(void *arg)) {
+ call_once_callback_args new_args = {func, arg, flag};
+ REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
+ call_once_callback_wrapper);
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index fbb099d07764..bae01bd707d7 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -17,6 +17,7 @@
#define TSAN_INTERFACE_H
#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -25,7 +26,7 @@
extern "C" {
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
@@ -72,6 +73,9 @@ void __tsan_vptr_update(void **vptr_p, void *new_val);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
@@ -132,6 +136,17 @@ int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ uptr *os_id);
+
#endif // SANITIZER_GO
#ifdef __cplusplus
@@ -145,7 +160,7 @@ typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index dc0873f7948b..5238b66a2e51 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -28,7 +28,7 @@
using namespace __tsan; // NOLINT
-#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
+#if !SANITIZER_GO && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
@@ -102,7 +102,7 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
&& __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
@@ -176,7 +176,7 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -212,7 +212,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -242,7 +242,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -267,7 +267,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
+ ReleaseStoreImpl(thr, pc, &s->clock);
NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
}
@@ -434,7 +434,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -446,7 +446,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#endif
// Interface functions follow.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// C/C++
@@ -845,7 +845,7 @@ void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
// Go
@@ -928,4 +928,4 @@ void __tsan_go_atomic64_compare_exchange(
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h
index 8852aa348b8e..fff83ee17806 100644
--- a/lib/tsan/rtl/tsan_interface_inl.h
+++ b/lib/tsan/rtl/tsan_interface_inl.h
@@ -108,6 +108,14 @@ void __tsan_func_exit() {
FuncExit(cur_thread());
}
+void __tsan_ignore_thread_begin() {
+ ThreadIgnoreBegin(cur_thread(), CALLERPC);
+}
+
+void __tsan_ignore_thread_end() {
+ ThreadIgnoreEnd(cur_thread(), CALLERPC);
+}
+
void __tsan_read_range(void *addr, uptr size) {
MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
}
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 95be85994c64..5bdc04f07567 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -150,6 +150,23 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
}
}
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ SCOPED_JAVA_FUNC(__tsan_java_find);
+ DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
+ CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ CHECK_EQ(to % kHeapAlignment, 0);
+ CHECK_GE(*from_ptr, jctx->heap_begin);
+ CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
void __tsan_java_finalize() {
SCOPED_JAVA_FUNC(__tsan_java_finalize);
DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h
index 30153a1d8505..0bd49ac3c999 100644
--- a/lib/tsan/rtl/tsan_interface_java.h
+++ b/lib/tsan/rtl/tsan_interface_java.h
@@ -57,6 +57,10 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
// It ensures necessary synchronization between
// java object creation and finalization.
void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index 529cedba4d2b..d8c689ebb5fc 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -68,13 +68,17 @@ static bool IsQueueSerial(dispatch_queue_t q) {
return width == 1;
}
-static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+static dispatch_queue_t GetTargetQueueFromQueue(dispatch_queue_t q) {
CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
- dispatch_queue_t target_queue =
- *(dispatch_queue_t *)(((uptr)source) +
- dispatch_queue_offsets.dqo_target_queue);
- CHECK_NE(target_queue, 0);
- return target_queue;
+ dispatch_queue_t tq = *(
+ dispatch_queue_t *)(((uptr)q) + dispatch_queue_offsets.dqo_target_queue);
+ return tq;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ dispatch_queue_t tq = GetTargetQueueFromQueue((dispatch_queue_t)source);
+ CHECK_NE(tq, 0);
+ return tq;
}
static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
@@ -92,27 +96,53 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
return new_context;
}
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr); \
+ bool serial_task = context->is_barrier_block || is_queue_serial
+
+static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ Acquire(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ Acquire(thr, pc, serial_sync);
+ if (serial_task) Acquire(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
static void dispatch_callback_wrap(void *param) {
SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
tsan_block_context_t *context = (tsan_block_context_t *)param;
- bool is_queue_serial = context->queue && IsQueueSerial(context->queue);
- uptr sync_ptr = (uptr)context->queue ?: context->non_queue_sync_object;
- uptr serial_sync = (uptr)sync_ptr;
- uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr);
- uptr submit_sync = (uptr)context;
- bool serial_task = context->is_barrier_block || is_queue_serial;
-
- Acquire(thr, pc, submit_sync);
- Acquire(thr, pc, serial_sync);
- if (serial_task) Acquire(thr, pc, concurrent_sync);
+ dispatch_sync_pre_execute(thr, pc, context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
context->orig_work(context->orig_context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
- Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
- if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+ dispatch_sync_post_execute(thr, pc, context);
if (context->free_context_in_callback) user_free(thr, pc, context);
}
@@ -676,6 +706,15 @@ TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
return REAL(dispatch_io_close)(channel, flags);
}
+// Resuming a suspended queue needs to synchronize with all subsequent
+// executions of blocks in that queue.
+TSAN_INTERCEPTOR(void, dispatch_resume, dispatch_object_t o) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_resume, o);
+ Release(thr, pc, (uptr)o); // Synchronizes with the Acquire() on serial_sync
+ // in dispatch_sync_pre_execute
+ return REAL(dispatch_resume)(o);
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 7693077f622b..2dea24915e8a 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -54,7 +54,8 @@ struct MapUnmapCallback {
diff = p + size - RoundDown(p + size, kPageSize);
if (diff != 0)
size -= diff;
- FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
}
};
@@ -111,7 +112,9 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
}
void InitializeAllocator() {
- allocator()->Init(common_flags()->allocator_may_return_null);
+ allocator()->Init(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
@@ -148,7 +151,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
@@ -161,7 +164,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -195,20 +198,16 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- if (sz) {
- p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
- }
- }
- if (p)
+ void *p2 = user_alloc(thr, pc, sz);
+ if (p2 == 0)
+ return 0;
+ if (p) {
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
user_free(thr, pc, p);
+ }
return p2;
}
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 68f0ec26fa25..605c21a9c08f 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -43,7 +43,7 @@ class MutexSet {
}
private:
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -55,7 +55,7 @@ class MutexSet {
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 2bd6637d0ea3..1dd9d91d4c92 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -24,40 +24,46 @@
namespace __tsan {
-#if !defined(SANITIZER_GO)
+#if !SANITIZER_GO
#if defined(__x86_64__)
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
-0100 0000 0000 - 0200 0000 0000: -
-0200 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: -
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7e80 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kTraceMemEnd = 0x620000000000ull;
- static const uptr kShadowBeg = 0x020000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
- static const uptr kHeapMemBeg = 0x7d0000000000ull;
- static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
- static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
- static const uptr kAppMemMsk = 0x7c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kAppMemMsk = 0x780000000000ull;
+ static const uptr kAppMemXor = 0x040000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__mips64)
/*
C/C++ on linux/mips64
@@ -74,22 +80,26 @@ ff00 0000 00 - ff80 0000 00: -
ff80 0000 00 - ffff ffff ff: modules and main thread stack
*/
struct Mapping {
- static const uptr kMetaShadowBeg = 0x3000000000ull;
- static const uptr kMetaShadowEnd = 0x4000000000ull;
- static const uptr kTraceMemBeg = 0x6000000000ull;
- static const uptr kTraceMemEnd = 0x6200000000ull;
- static const uptr kShadowBeg = 0x1400000000ull;
- static const uptr kShadowEnd = 0x2400000000ull;
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2400000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
static const uptr kHiAppMemBeg = 0xff80000000ull;
static const uptr kHiAppMemEnd = 0xffffffffffull;
- static const uptr kAppMemMsk = 0xfc00000000ull;
- static const uptr kAppMemXor = 0x0400000000ull;
+ static const uptr kAppMemMsk = 0xf800000000ull;
+ static const uptr kAppMemXor = 0x0800000000ull;
static const uptr kVdsoBeg = 0xfffff00000ull;
};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__aarch64__)
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
@@ -121,7 +131,6 @@ struct Mapping39 {
static const uptr kMetaShadowEnd = 0x3400000000ull;
static const uptr kMidAppMemBeg = 0x5500000000ull;
static const uptr kMidAppMemEnd = 0x5600000000ull;
- static const uptr kMidShadowOff = 0x5000000000ull;
static const uptr kTraceMemBeg = 0x6000000000ull;
static const uptr kTraceMemEnd = 0x6200000000ull;
static const uptr kHeapMemBeg = 0x7c00000000ull;
@@ -157,7 +166,6 @@ struct Mapping42 {
static const uptr kMetaShadowEnd = 0x28000000000ull;
static const uptr kMidAppMemBeg = 0x2aa00000000ull;
static const uptr kMidAppMemEnd = 0x2ab00000000ull;
- static const uptr kMidShadowOff = 0x28000000000ull;
static const uptr kTraceMemBeg = 0x36200000000ull;
static const uptr kTraceMemEnd = 0x36400000000ull;
static const uptr kHeapMemBeg = 0x3e000000000ull;
@@ -169,6 +177,26 @@ struct Mapping42 {
static const uptr kVdsoBeg = 0x37f00000000ull;
};
+struct Mapping48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kAppMemMsk = 0x0fff800000000ull;
+ static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
+
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
// Indicates that mapping defines a mid range memory segment.
@@ -248,7 +276,7 @@ struct Mapping46 {
#define TSAN_RUNTIME_VMA 1
#endif
-#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+#elif SANITIZER_GO && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -274,7 +302,7 @@ struct Mapping {
static const uptr kAppMemEnd = 0x00e000000000ull;
};
-#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
+#elif SANITIZER_GO && SANITIZER_WINDOWS
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
@@ -334,7 +362,7 @@ enum MappingType {
template<typename Mapping, int Type>
uptr MappingImpl(void) {
switch (Type) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
# ifdef TSAN_MID_APP_RANGE
@@ -362,11 +390,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MappingImpl<Mapping39, Type>();
- else
- return MappingImpl<Mapping42, Type>();
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MappingImpl<Mapping44, Type>();
@@ -378,7 +408,7 @@ uptr MappingArchImpl(void) {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ALWAYS_INLINE
uptr LoAppMemBeg(void) {
return MappingArchImpl<MAPPING_LO_APP_BEG>();
@@ -440,7 +470,7 @@ bool GetUserRegion(int i, uptr *start, uptr *end) {
switch (i) {
default:
return false;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case 0:
*start = LoAppMemBeg();
*end = LoAppMemEnd();
@@ -498,7 +528,7 @@ uptr TraceMemEnd(void) {
template<typename Mapping>
bool IsAppMemImpl(uptr mem) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
# ifdef TSAN_MID_APP_RANGE
(mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
@@ -513,11 +543,13 @@ bool IsAppMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsAppMemImpl<Mapping39>(mem);
- else
- return IsAppMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsAppMemImpl<Mapping39>(mem);
+ case 42: return IsAppMemImpl<Mapping42>(mem);
+ case 48: return IsAppMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsAppMemImpl<Mapping44>(mem);
@@ -538,11 +570,13 @@ bool IsShadowMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsShadowMemImpl<Mapping39>(mem);
- else
- return IsShadowMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsShadowMemImpl<Mapping39>(mem);
+ case 42: return IsShadowMemImpl<Mapping42>(mem);
+ case 48: return IsShadowMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsShadowMemImpl<Mapping44>(mem);
@@ -563,11 +597,13 @@ bool IsMetaMemImpl(uptr mem) {
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return IsMetaMemImpl<Mapping39>(mem);
- else
- return IsMetaMemImpl<Mapping42>(mem);
+ switch (vmaSize) {
+ case 39: return IsMetaMemImpl<Mapping39>(mem);
+ case 42: return IsMetaMemImpl<Mapping42>(mem);
+ case 48: return IsMetaMemImpl<Mapping48>(mem);
+ }
DCHECK(0);
+ return false;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return IsMetaMemImpl<Mapping44>(mem);
@@ -583,7 +619,7 @@ bool IsMetaMem(uptr mem) {
template<typename Mapping>
uptr MemToShadowImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
^ Mapping::kAppMemXor) * kShadowCnt;
#else
@@ -598,11 +634,13 @@ uptr MemToShadowImpl(uptr x) {
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MemToShadowImpl<Mapping39>(x);
- else
- return MemToShadowImpl<Mapping42>(x);
+ switch (vmaSize) {
+ case 39: return MemToShadowImpl<Mapping39>(x);
+ case 42: return MemToShadowImpl<Mapping42>(x);
+ case 48: return MemToShadowImpl<Mapping48>(x);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToShadowImpl<Mapping44>(x);
@@ -618,24 +656,30 @@ uptr MemToShadow(uptr x) {
template<typename Mapping>
u32 *MemToMetaImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
- return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))
- ^ Mapping::kAppMemXor) / kMetaShadowCell * kMetaShadowSize)
- | Mapping::kMetaShadowBeg);
+#if !SANITIZER_GO
+ return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
#else
+# ifndef SANITIZER_WINDOWS
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+# else
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
+# endif
#endif
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MemToMetaImpl<Mapping39>(x);
- else
- return MemToMetaImpl<Mapping42>(x);
+ switch (vmaSize) {
+ case 39: return MemToMetaImpl<Mapping39>(x);
+ case 42: return MemToMetaImpl<Mapping42>(x);
+ case 48: return MemToMetaImpl<Mapping48>(x);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return MemToMetaImpl<Mapping44>(x);
@@ -651,18 +695,25 @@ u32 *MemToMeta(uptr x) {
template<typename Mapping>
uptr ShadowToMemImpl(uptr s) {
DCHECK(IsShadowMem(s));
-#ifndef SANITIZER_GO
- if (s >= MemToShadow(Mapping::kLoAppMemBeg)
- && s <= MemToShadow(Mapping::kLoAppMemEnd - 1))
- return (s / kShadowCnt) ^ Mapping::kAppMemXor;
+#if !SANITIZER_GO
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
# ifdef TSAN_MID_APP_RANGE
- if (s >= MemToShadow(Mapping::kMidAppMemBeg)
- && s <= MemToShadow(Mapping::kMidAppMemEnd - 1))
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) + Mapping::kMidShadowOff;
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
+ (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
+ if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
# endif
- else
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #if !SANITIZER_GO
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else
@@ -674,11 +725,13 @@ uptr ShadowToMemImpl(uptr s) {
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return ShadowToMemImpl<Mapping39>(s);
- else
- return ShadowToMemImpl<Mapping42>(s);
+ switch (vmaSize) {
+ case 39: return ShadowToMemImpl<Mapping39>(s);
+ case 42: return ShadowToMemImpl<Mapping42>(s);
+ case 48: return ShadowToMemImpl<Mapping48>(s);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return ShadowToMemImpl<Mapping44>(s);
@@ -707,11 +760,13 @@ uptr GetThreadTraceImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return GetThreadTraceImpl<Mapping39>(tid);
- else
- return GetThreadTraceImpl<Mapping42>(tid);
+ switch (vmaSize) {
+ case 39: return GetThreadTraceImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceImpl<Mapping48>(tid);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceImpl<Mapping44>(tid);
@@ -735,11 +790,13 @@ uptr GetThreadTraceHeaderImpl(int tid) {
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return GetThreadTraceHeaderImpl<Mapping39>(tid);
- else
- return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ switch (vmaSize) {
+ case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
+ }
DCHECK(0);
+ return 0;
#elif defined(__powerpc64__)
if (vmaSize == 44)
return GetThreadTraceHeaderImpl<Mapping44>(tid);
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index d7182fdc1a4d..3313288a728a 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -38,6 +38,7 @@
#include <sys/mman.h>
#if SANITIZER_LINUX
#include <sys/personality.h>
+#include <setjmp.h>
#endif
#include <sys/syscall.h>
#include <sys/socket.h>
@@ -67,6 +68,10 @@ extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
+#if SANITIZER_LINUX && defined(__aarch64__)
+void InitializeGuardPtr() __attribute__((visibility("hidden")));
+#endif
+
namespace __tsan {
#ifdef TSAN_RUNTIME_VMA
@@ -93,7 +98,7 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
@@ -129,7 +134,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
}
#endif
@@ -139,7 +144,7 @@ void FlushShadowMemory() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -201,16 +206,16 @@ void InitializeShadowMemoryPlatform() {
MapRodata();
}
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
void InitializePlatformEarly() {
#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
- if (vmaSize != 39 && vmaSize != 42) {
+ if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize);
+ Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
Die();
}
#elif defined(__powerpc64__)
@@ -229,7 +234,7 @@ void InitializePlatform() {
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// is not compiled with -pie.
- if (kCppMode) {
+ if (!SANITIZER_GO) {
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
@@ -264,18 +269,20 @@ void InitializePlatform() {
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
reexec = true;
}
+ // Initialize the guard pointer used in {sig}{set,long}jump.
+ InitializeGuardPtr();
#endif
if (reexec)
ReExec();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
InitTlsSize();
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
@@ -328,11 +335,11 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReplaceSystemMalloc() { }
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_ANDROID
#if defined(__aarch64__)
@@ -393,7 +400,7 @@ void cur_thread_finalize() {
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
#endif // SANITIZER_ANDROID
-#endif // ifndef SANITIZER_GO
+#endif // if !SANITIZER_GO
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 0cc02ab87a3e..25dd241d826f 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -20,10 +20,12 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
@@ -42,7 +44,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
void *val = (void *)atomic_load_relaxed(a);
@@ -100,17 +102,83 @@ void cur_thread_finalize() {
}
#endif
-uptr GetShadowMemoryConsumption() {
- return 0;
+void FlushShadowMemory() {
}
-void FlushShadowMemory() {
+static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
+ vm_address_t address = start;
+ vm_address_t end_address = end;
+ uptr resident_pages = 0;
+ uptr dirty_pages = 0;
+ while (address < end_address) {
+ vm_size_t vm_region_size;
+ mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
+ vm_region_extended_info_data_t vm_region_info;
+ mach_port_t object_name;
+ kern_return_t ret = vm_region_64(
+ mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
+ (vm_region_info_t)&vm_region_info, &count, &object_name);
+ if (ret != KERN_SUCCESS) break;
+
+ resident_pages += vm_region_info.pages_resident;
+ dirty_pages += vm_region_info.pages_dirtied;
+
+ address += vm_region_size;
+ }
+ *res = resident_pages * GetPageSizeCached();
+ *dirty = dirty_pages * GetPageSizeCached();
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr shadow_res, shadow_dirty;
+ uptr meta_res, meta_dirty;
+ uptr trace_res, trace_dirty;
+ RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
+ RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
+ RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
+
+#if !SANITIZER_GO
+ uptr low_res, low_dirty;
+ uptr high_res, high_dirty;
+ uptr heap_res, heap_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
+ RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
+ RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
+#else // !SANITIZER_GO
+ uptr app_res, app_dirty;
+ RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+#endif
+
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#endif
+ "stacks: %ld unique IDs, %ld kB allocated\n"
+ "threads: %ld total, %ld live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+ TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
+#if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+#else // !SANITIZER_GO
+ AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
+#endif
+ stacks->n_uniq_ids, stacks->allocated / 1024,
+ nthread, nlive);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
// On OS X, GCD worker threads are created without a call to pthread_create. We
@@ -160,7 +228,7 @@ void InitializePlatformEarly() {
void InitializePlatform() {
DisableCoreDumperIfNecessary();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
CHECK_EQ(main_thread_identity, 0);
@@ -171,7 +239,7 @@ void InitializePlatform() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc
index 805ce1be4bde..0732c83d689d 100644
--- a/lib/tsan/rtl/tsan_platform_posix.cc
+++ b/lib/tsan/rtl/tsan_platform_posix.cc
@@ -23,7 +23,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow =
diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc
index c6d5058d96fc..08aa588a4c89 100644
--- a/lib/tsan/rtl/tsan_platform_windows.cc
+++ b/lib/tsan/rtl/tsan_platform_windows.cc
@@ -21,10 +21,6 @@
namespace __tsan {
-uptr GetShadowMemoryConsumption() {
- return 0;
-}
-
void FlushShadowMemory() {
}
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 93604946ac38..156876e5c22a 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -71,7 +71,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -361,7 +361,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
}
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
const int kMainThreadId = 1;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 629871ef8f7a..804f3cf64ee8 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -44,7 +44,8 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#if !defined(SANITIZER_GO) && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_MAC
+__attribute__((tls_model("initial-exec")))
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -86,7 +87,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -117,7 +118,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -126,13 +127,13 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
@@ -233,16 +234,17 @@ static void StopBackgroundThread() {
#endif
void DontNeedShadowFor(uptr addr, uptr size) {
- uptr shadow_beg = MemToShadow(addr);
- uptr shadow_end = MemToShadow(addr + size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
}
void MapShadow(uptr addr, uptr size) {
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow");
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
@@ -287,10 +289,15 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
static void CheckShadowMapping() {
uptr beg, end;
for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
+ // Skip cases for empty regions (heap definition for architectures that
+ // do not use 64-bit allocator).
+ if (beg == end)
+ continue;
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ uptr prev = 0;
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
- for (int x = -1; x <= 1; x++) {
- const uptr p = p0 + x;
+ for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
+ const uptr p = RoundDown(p0 + x, kShadowCell);
if (p < beg || p >= end)
continue;
const uptr s = MemToShadow(p);
@@ -298,8 +305,18 @@ static void CheckShadowMapping() {
VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
CHECK(IsAppMem(p));
CHECK(IsShadowMem(s));
- CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ CHECK_EQ(p, ShadowToMem(s));
CHECK(IsMetaMem(m));
+ if (prev) {
+ // Ensure that shadow and meta mappings are linear within a single
+ // user range. Lots of code that processes memory ranges assumes it.
+ const uptr prev_s = MemToShadow(prev);
+ const uptr prev_m = (uptr)MemToMeta(prev);
+ CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
+ CHECK_EQ((m - prev_m) / kMetaShadowSize,
+ (p - prev) / kMetaShadowCell);
+ }
+ prev = p;
}
}
}
@@ -318,12 +335,12 @@ void Initialize(ThreadState *thr) {
SetCheckFailedCallback(TsanCheckFailed);
ctx = new(ctx_placeholder) Context;
- const char *options = GetEnv(kTsanOptionsEnv);
+ const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
AvoidCVE_2016_2143();
InitializePlatformEarly();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -339,14 +356,14 @@ void Initialize(ThreadState *thr) {
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeShadowMemory();
InitializeAllocatorLate();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
// On MIPS, TSan initialization is run before
@@ -370,7 +387,7 @@ void Initialize(ThreadState *thr) {
#endif
ctx->initialized = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Symbolizer::LateInitialize();
#endif
@@ -396,7 +413,7 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
#endif
@@ -404,7 +421,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -419,7 +436,7 @@ int Finalize(ThreadState *thr) {
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -434,7 +451,7 @@ int Finalize(ThreadState *thr) {
return failed ? common_flags()->exitcode : 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -467,7 +484,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) {
}
#endif
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -486,7 +503,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (!thr->is_inited) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -532,7 +549,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -565,7 +582,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem,
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -762,7 +779,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (kCppMode && *shadow_mem == kShadowRodata) {
+ if (!SANITIZER_GO && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -849,7 +866,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
+ if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -935,7 +952,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -955,7 +972,7 @@ void FuncExit(ThreadState *thr) {
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -966,7 +983,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -978,17 +995,25 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
}
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -998,7 +1023,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -1022,7 +1047,7 @@ void build_consistency_nostats() {}
} // namespace __tsan
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index ff69015660b6..7fcb9d48e038 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -52,7 +52,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct MapUnmapCallback;
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
static const uptr kAllocatorSpace = 0;
@@ -66,9 +66,15 @@ typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
MapUnmapCallback> PrimaryAllocator;
#else
-typedef SizeClassAllocator64<Mapping::kHeapMemBeg,
- Mapping::kHeapMemEnd - Mapping::kHeapMemBeg, 0,
- DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -335,7 +341,7 @@ struct JmpBuf {
// A ThreadState must be wired with a Processor to handle events.
struct Processor {
ThreadState *thr; // currently wired thread, or nullptr
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
#endif
@@ -345,7 +351,7 @@ struct Processor {
DDPhysicalThread *dd_pt;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// ScopedGlobalProcessor temporary setups a global processor for the current
// thread, if it does not have one. Intended for interceptors that can run
// at the very thread end, when we already destroyed the thread processor.
@@ -376,7 +382,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -389,7 +395,7 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -417,7 +423,7 @@ struct ThreadState {
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Processor *proc() { return proc1; }
#else
Processor *proc();
@@ -426,7 +432,7 @@ struct ThreadState {
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -443,7 +449,7 @@ struct ThreadState {
uptr tls_addr, uptr tls_size);
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_MAC || SANITIZER_ANDROID
ThreadState *cur_thread();
void cur_thread_finalize();
@@ -541,13 +547,13 @@ extern Context *ctx; // The one and the only global runtime context.
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -584,6 +590,7 @@ class ScopedReport {
void operator = (const ScopedReport&);
};
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset);
@@ -787,7 +794,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -799,7 +806,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
*evp = ev;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
}
diff --git a/lib/tsan/rtl/tsan_rtl_aarch64.S b/lib/tsan/rtl/tsan_rtl_aarch64.S
index 9cea3cf02800..ef06f0444ae4 100644
--- a/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -1,6 +1,62 @@
#include "sanitizer_common/sanitizer_asm.h"
+
+.section .bss
+.type __tsan_pointer_chk_guard, %object
+.size __tsan_pointer_chk_guard, 8
+__tsan_pointer_chk_guard:
+.zero 8
+
.section .text
+// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
+// functions) by XORing them with a random guard pointer. For AArch64 it is a
+// global variable rather than a TCB one (as for x86_64/powerpc) and althought
+// its value is exported by the loader, it lies within a private GLIBC
+// namespace (meaning it should be only used by GLIBC itself and the ABI is
+// not stable). So InitializeGuardPtr obtains the pointer guard value by
+// issuing a setjmp and checking the resulting pointers values against the
+// original ones.
+.hidden _Z18InitializeGuardPtrv
+.global _Z18InitializeGuardPtrv
+.type _Z18InitializeGuardPtrv, @function
+_Z18InitializeGuardPtrv:
+ CFI_STARTPROC
+ // Allocates a jmp_buf for the setjmp call.
+ stp x29, x30, [sp, -336]!
+ CFI_DEF_CFA_OFFSET (336)
+ CFI_OFFSET (29, -336)
+ CFI_OFFSET (30, -328)
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+ add x0, x29, 24
+
+ // Call libc setjmp that mangle the stack pointer value
+ adrp x1, :got:_ZN14__interception12real__setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
+ ldr x1, [x1]
+ blr x1
+
+ // glibc setjmp mangles both the frame pointer (FP, pc+4 on blr) and the
+ // stack pointer (SP). FP will be placed on ((uintptr*)jmp_buf)[11] and
+ // SP at ((uintptr*)jmp_buf)[13].
+ // The mangle operation is just 'value' xor 'pointer guard value' and
+ // if we know the original value (SP) and the expected one, we can derive
+ // the guard pointer value.
+ mov x0, sp
+
+ // Loads the mangled SP pointer.
+ ldr x1, [x29, 128]
+ eor x0, x0, x1
+ adrp x2, __tsan_pointer_chk_guard
+ str x0, [x2, #:lo12:__tsan_pointer_chk_guard]
+ ldp x29, x30, [sp], 336
+ CFI_RESTORE (30)
+ CFI_RESTORE (19)
+ CFI_DEF_CFA (31, 0)
+ ret
+ CFI_ENDPROC
+.size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv
+
.hidden __tsan_setjmp
.comm _ZN14__interception11real_setjmpE,8,8
.type setjmp, @function
@@ -23,10 +79,9 @@ setjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -71,10 +126,9 @@ _setjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -121,10 +175,9 @@ sigsetjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
@@ -173,10 +226,9 @@ __sigsetjmp:
mov x19, x0
// SP pointer mangling (see glibc setjmp)
- adrp x2, :got:__pointer_chk_guard
- ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ adrp x2, __tsan_pointer_chk_guard
+ ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
- ldr x2, [x2]
eor x1, x2, x0
// call tsan interceptor
diff --git a/lib/tsan/rtl/tsan_rtl_mips64.S b/lib/tsan/rtl/tsan_rtl_mips64.S
new file mode 100644
index 000000000000..d0f7a3f9af98
--- /dev/null
+++ b/lib/tsan/rtl/tsan_rtl_mips64.S
@@ -0,0 +1,214 @@
+.section .text
+.set noreorder
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+
+ // save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc setjmp to t9
+ dla $t9,(_ZN14__interception11real_setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size setjmp, .-setjmp
+
+.hidden __tsan_setjmp
+.globl _setjmp
+.comm _ZN14__interception12real__setjmpE,8,8
+.type _setjmp, @function
+_setjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(_setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc _setjmp to t9
+ dla $t9,(_ZN14__interception12real__setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc _setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size _setjmp, .-_setjmp
+
+.hidden __tsan_setjmp
+.globl sigsetjmp
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.type sigsetjmp, @function
+sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc sigsetjmp to t9
+ dla $t9,(_ZN14__interception14real_sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size sigsetjmp, .-sigsetjmp
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer to libc __sigsetjmp in t9
+ dla $t9,(_ZN14__interception16real___sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc __sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size __sigsetjmp, .-__sigsetjmp
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 1806acf063bc..f3b51c30faff 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -50,7 +50,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
uptr addr, u64 mid) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
- if (kGoMode)
+ if (SANITIZER_GO)
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
@@ -76,7 +76,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
- if (kCppMode && s->creation_stack_id == 0)
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
@@ -195,7 +195,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
bool report_bad_unlock = false;
- if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
report_bad_unlock = true;
@@ -412,7 +412,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
diff --git a/lib/tsan/rtl/tsan_rtl_proc.cc b/lib/tsan/rtl/tsan_rtl_proc.cc
index 0c838a1388f5..efccdb590ab3 100644
--- a/lib/tsan/rtl/tsan_rtl_proc.cc
+++ b/lib/tsan/rtl/tsan_rtl_proc.cc
@@ -23,7 +23,7 @@ Processor *ProcCreate() {
internal_memset(mem, 0, sizeof(Processor));
Processor *proc = new(mem) Processor;
proc->thr = nullptr;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcStart(proc);
#endif
if (common_flags()->detect_deadlocks)
@@ -33,7 +33,7 @@ Processor *ProcCreate() {
void ProcDestroy(Processor *proc) {
CHECK_EQ(proc->thr, nullptr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
ctx->clock_alloc.FlushCache(&proc->clock_cache);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 810119b93a7a..bc8944fbfb58 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -38,6 +38,10 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
// on the other hand there is no sense in processing interceptors
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
@@ -71,7 +75,7 @@ static void StackStripMain(SymbolizedStack *frames) {
if (last_frame2 == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
@@ -204,7 +208,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->stack->suppressable = suppressable;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
int unique_id = *(int *)arg;
return tctx->unique_id == (u32)unique_id;
@@ -249,7 +253,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
@@ -305,7 +309,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -355,7 +359,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -660,7 +664,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -689,7 +693,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
ALWAYS_INLINE
void PrintCurrentStackSlow(uptr pc) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index ab8f3c38a960..5b17dc60bcbe 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,7 +30,7 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -68,8 +68,9 @@ void ThreadContext::OnCreated(void *arg) {
void ThreadContext::OnReset() {
CHECK_EQ(sync.size(), 0);
- FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
}
void ThreadContext::OnDetached(void *arg) {
@@ -94,7 +95,7 @@ void ThreadContext::OnStarted(void *arg) {
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -125,6 +126,12 @@ void ThreadContext::OnStarted(void *arg) {
}
void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+#endif
if (!detached) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
@@ -142,7 +149,7 @@ void ThreadContext::OnFinished() {
thr = 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -164,7 +171,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -196,7 +203,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -234,7 +241,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -265,7 +272,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index aea3cb978cb1..bfb64e0018fb 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -21,7 +21,7 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
static const char *const std_suppressions =
// Libstdc++ 4.4 has data races in std::string.
@@ -54,7 +54,7 @@ void InitializeSuppressions() {
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
suppression_ctx->Parse(__tsan_default_suppressions());
suppression_ctx->Parse(std_suppressions);
#endif
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 58b26802b0e7..44c6a26a1e8e 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -30,7 +30,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->next = 0;
creation_stack_id = 0;
- if (kCppMode) // Go does not use them
+ if (!SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
@@ -120,7 +120,7 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
- if (kGoMode) {
+ if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
FreeRange(proc, p, sz);
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index 2bc2f41fbe26..86e6bbd55bac 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -50,16 +50,16 @@ struct SyncVar {
void Reset(Processor *proc);
u64 GetId() const {
- // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
- return GetLsb((u64)addr | (uid << 47), 61);
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
}
bool CheckId(u64 uid) const {
CHECK_EQ(uid, GetLsb(uid, 14));
return GetLsb(this->uid, 14) == uid;
}
static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 47;
- return (uptr)GetLsb(id, 47);
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
}
};
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 2569c7e42a47..96a18ac4101a 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -42,7 +42,7 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -55,7 +55,7 @@ struct TraceHeader {
struct Trace {
Mutex mtx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index d1b1e9611a78..4587e47ba904 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -12,6 +12,10 @@ set(TSAN_UNITTEST_CFLAGS
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
-DGTEST_HAS_RTTI=0)
+if(APPLE)
+ list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS})
+endif()
+
set(TSAN_RTL_HEADERS)
foreach (header ${TSAN_HEADERS})
list(APPEND TSAN_RTL_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
@@ -78,7 +82,7 @@ macro(add_tsan_unittest testname)
add_compiler_rt_test(TsanUnitTests "${testname}-${arch}-Test"
OBJECTS ${TEST_OBJECTS}
DEPS ${TEST_DEPS}
- LINK_FLAGS ${TARGET_LINK_FLAGS}
+ LINK_FLAGS ${TARGET_LINK_FLAGS} ${DARWIN_osx_LINKFLAGS}
-lc++)
endif()
endforeach()
diff --git a/lib/tsan/tests/rtl/tsan_posix.cc b/lib/tsan/tests/rtl/tsan_posix.cc
index e1a61b5e43f1..9c0e013e5735 100644
--- a/lib/tsan/tests/rtl/tsan_posix.cc
+++ b/lib/tsan/tests/rtl/tsan_posix.cc
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_interface.h"
+#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "gtest/gtest.h"
#include <pthread.h>
@@ -30,10 +31,10 @@ struct thread_key {
static void thread_secific_dtor(void *v) {
thread_key *k = (thread_key *)v;
- EXPECT_EQ(pthread_mutex_lock(k->mtx), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(k->mtx), 0);
(*k->cnt)++;
__tsan_write4(&k->cnt);
- EXPECT_EQ(pthread_mutex_unlock(k->mtx), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(k->mtx), 0);
if (k->val == 42) {
// Okay.
} else if (k->val == 43 || k->val == 44) {
@@ -55,22 +56,22 @@ TEST(Posix, ThreadSpecificDtors) {
pthread_key_t key;
EXPECT_EQ(pthread_key_create(&key, thread_secific_dtor), 0);
pthread_mutex_t mtx;
- EXPECT_EQ(pthread_mutex_init(&mtx, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_init(&mtx, 0), 0);
pthread_t th[3];
thread_key k1 = thread_key(key, &mtx, 42, &cnt);
thread_key k2 = thread_key(key, &mtx, 43, &cnt);
thread_key k3 = thread_key(key, &mtx, 44, &cnt);
- EXPECT_EQ(pthread_create(&th[0], 0, dtors_thread, &k1), 0);
- EXPECT_EQ(pthread_create(&th[1], 0, dtors_thread, &k2), 0);
- EXPECT_EQ(pthread_join(th[0], 0), 0);
- EXPECT_EQ(pthread_create(&th[2], 0, dtors_thread, &k3), 0);
- EXPECT_EQ(pthread_join(th[1], 0), 0);
- EXPECT_EQ(pthread_join(th[2], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[0], 0, dtors_thread, &k1), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[1], 0, dtors_thread, &k2), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[0], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th[2], 0, dtors_thread, &k3), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[1], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[2], 0), 0);
EXPECT_EQ(pthread_key_delete(key), 0);
EXPECT_EQ(6, cnt);
}
-#ifndef __aarch64__
+#if !defined(__aarch64__) && !defined(__APPLE__)
static __thread int local_var;
static void *local_thread(void *p) {
@@ -81,10 +82,10 @@ static void *local_thread(void *p) {
const int kThreads = 4;
pthread_t th[kThreads];
for (int i = 0; i < kThreads; i++)
- EXPECT_EQ(pthread_create(&th[i], 0, local_thread,
+ EXPECT_EQ(__interceptor_pthread_create(&th[i], 0, local_thread,
(void*)((long)p - 1)), 0); // NOLINT
for (int i = 0; i < kThreads; i++)
- EXPECT_EQ(pthread_join(th[i], 0), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th[i], 0), 0);
return 0;
}
#endif
@@ -92,7 +93,9 @@ static void *local_thread(void *p) {
TEST(Posix, ThreadLocalAccesses) {
// The test is failing with high thread count for aarch64.
// FIXME: track down the issue and re-enable the test.
-#ifndef __aarch64__
+// On Darwin, we're running unit tests without interceptors and __thread is
+// using malloc and free, which causes false data race reports.
+#if !defined(__aarch64__) && !defined(__APPLE__)
local_thread((void*)2);
#endif
}
@@ -106,46 +109,46 @@ struct CondContext {
static void *cond_thread(void *p) {
CondContext &ctx = *static_cast<CondContext*>(p);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
EXPECT_EQ(ctx.data, 0);
ctx.data = 1;
- EXPECT_EQ(pthread_cond_signal(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_signal(&ctx.c), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 2)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
ctx.data = 3;
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
return 0;
}
TEST(Posix, CondBasic) {
CondContext ctx;
- EXPECT_EQ(pthread_mutex_init(&ctx.m, 0), 0);
- EXPECT_EQ(pthread_cond_init(&ctx.c, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_init(&ctx.m, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_init(&ctx.c, 0), 0);
ctx.data = 0;
pthread_t th;
- EXPECT_EQ(pthread_create(&th, 0, cond_thread, &ctx), 0);
+ EXPECT_EQ(__interceptor_pthread_create(&th, 0, cond_thread, &ctx), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 1)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
ctx.data = 2;
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
EXPECT_EQ(pthread_cond_broadcast(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_lock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_lock(&ctx.m), 0);
while (ctx.data != 3)
- EXPECT_EQ(pthread_cond_wait(&ctx.c, &ctx.m), 0);
- EXPECT_EQ(pthread_mutex_unlock(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_wait(&ctx.c, &ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_unlock(&ctx.m), 0);
- EXPECT_EQ(pthread_join(th, 0), 0);
- EXPECT_EQ(pthread_cond_destroy(&ctx.c), 0);
- EXPECT_EQ(pthread_mutex_destroy(&ctx.m), 0);
+ EXPECT_EQ(__interceptor_pthread_join(th, 0), 0);
+ EXPECT_EQ(__interceptor_pthread_cond_destroy(&ctx.c), 0);
+ EXPECT_EQ(__interceptor_pthread_mutex_destroy(&ctx.m), 0);
}
diff --git a/lib/tsan/tests/rtl/tsan_posix_util.h b/lib/tsan/tests/rtl/tsan_posix_util.h
new file mode 100644
index 000000000000..340693ebb8a0
--- /dev/null
+++ b/lib/tsan/tests/rtl/tsan_posix_util.h
@@ -0,0 +1,77 @@
+//===-- tsan_posix_util.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Test POSIX utils.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_POSIX_UTIL_H
+#define TSAN_POSIX_UTIL_H
+
+#include <pthread.h>
+
+#ifdef __APPLE__
+#define __interceptor_memcpy wrap_memcpy
+#define __interceptor_memset wrap_memset
+#define __interceptor_pthread_create wrap_pthread_create
+#define __interceptor_pthread_join wrap_pthread_join
+#define __interceptor_pthread_detach wrap_pthread_detach
+#define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
+#define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
+#define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
+#define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
+#define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
+#define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
+#define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
+#define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
+#define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
+#define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
+#define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
+#define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
+#define __interceptor_pthread_cond_init wrap_pthread_cond_init
+#define __interceptor_pthread_cond_signal wrap_pthread_cond_signal
+#define __interceptor_pthread_cond_broadcast wrap_pthread_cond_broadcast
+#define __interceptor_pthread_cond_wait wrap_pthread_cond_wait
+#define __interceptor_pthread_cond_destroy wrap_pthread_cond_destroy
+#endif
+
+extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
+extern "C" void *__interceptor_memset(void *, int, uptr);
+extern "C" int __interceptor_pthread_create(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg);
+extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
+extern "C" int __interceptor_pthread_detach(pthread_t thread);
+
+extern "C" int __interceptor_pthread_mutex_init(
+ pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
+extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
+
+extern "C" int __interceptor_pthread_rwlock_init(
+ pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
+extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
+
+extern "C" int __interceptor_pthread_cond_init(pthread_cond_t *cond,
+ const pthread_condattr_t *attr);
+extern "C" int __interceptor_pthread_cond_signal(pthread_cond_t *cond);
+extern "C" int __interceptor_pthread_cond_broadcast(pthread_cond_t *cond);
+extern "C" int __interceptor_pthread_cond_wait(pthread_cond_t *cond,
+ pthread_mutex_t *mutex);
+extern "C" int __interceptor_pthread_cond_destroy(pthread_cond_t *cond);
+
+#endif // #ifndef TSAN_POSIX_UTIL_H
diff --git a/lib/tsan/tests/rtl/tsan_test_util_posix.cc b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
index c8be088d266f..834a271aa8c0 100644
--- a/lib/tsan/tests/rtl/tsan_test_util_posix.cc
+++ b/lib/tsan/tests/rtl/tsan_test_util_posix.cc
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "tsan_interface.h"
+#include "tsan_posix_util.h"
#include "tsan_test_util.h"
#include "tsan_report.h"
@@ -33,52 +34,6 @@ static __thread bool expect_report;
static __thread bool expect_report_reported;
static __thread ReportType expect_report_type;
-#ifdef __APPLE__
-#define __interceptor_memcpy wrap_memcpy
-#define __interceptor_memset wrap_memset
-#define __interceptor_pthread_create wrap_pthread_create
-#define __interceptor_pthread_join wrap_pthread_join
-#define __interceptor_pthread_detach wrap_pthread_detach
-#define __interceptor_pthread_mutex_init wrap_pthread_mutex_init
-#define __interceptor_pthread_mutex_lock wrap_pthread_mutex_lock
-#define __interceptor_pthread_mutex_unlock wrap_pthread_mutex_unlock
-#define __interceptor_pthread_mutex_destroy wrap_pthread_mutex_destroy
-#define __interceptor_pthread_mutex_trylock wrap_pthread_mutex_trylock
-#define __interceptor_pthread_rwlock_init wrap_pthread_rwlock_init
-#define __interceptor_pthread_rwlock_destroy wrap_pthread_rwlock_destroy
-#define __interceptor_pthread_rwlock_trywrlock wrap_pthread_rwlock_trywrlock
-#define __interceptor_pthread_rwlock_wrlock wrap_pthread_rwlock_wrlock
-#define __interceptor_pthread_rwlock_unlock wrap_pthread_rwlock_unlock
-#define __interceptor_pthread_rwlock_rdlock wrap_pthread_rwlock_rdlock
-#define __interceptor_pthread_rwlock_tryrdlock wrap_pthread_rwlock_tryrdlock
-#endif
-
-extern "C" void *__interceptor_memcpy(void *, const void *, uptr);
-extern "C" void *__interceptor_memset(void *, int, uptr);
-extern "C" int __interceptor_pthread_create(pthread_t *thread,
- const pthread_attr_t *attr,
- void *(*start_routine)(void *),
- void *arg);
-extern "C" int __interceptor_pthread_join(pthread_t thread, void **value_ptr);
-extern "C" int __interceptor_pthread_detach(pthread_t thread);
-
-extern "C" int __interceptor_pthread_mutex_init(
- pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
-extern "C" int __interceptor_pthread_mutex_lock(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_unlock(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_destroy(pthread_mutex_t *mutex);
-extern "C" int __interceptor_pthread_mutex_trylock(pthread_mutex_t *mutex);
-
-extern "C" int __interceptor_pthread_rwlock_init(
- pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
-extern "C" int __interceptor_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
-extern "C" int __interceptor_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
-
-
static void *BeforeInitThread(void *param) {
(void)param;
return 0;
@@ -105,11 +60,11 @@ bool OnReport(const ReportDesc *rep, bool suppressed) {
if (rep->typ != expect_report_type) {
printf("Expected report of type %d, got type %d\n",
(int)expect_report_type, (int)rep->typ);
- EXPECT_FALSE("Wrong report type");
+ EXPECT_TRUE(false) << "Wrong report type";
return false;
}
} else {
- EXPECT_FALSE("Unexpected report");
+ EXPECT_TRUE(false) << "Unexpected report";
return false;
}
expect_report_reported = true;
@@ -368,7 +323,7 @@ void ScopedThread::Impl::HandleEvent(Event *ev) {
}
if (expect_report && !expect_report_reported) {
printf("Missed expected report of type %d\n", (int)ev->report_type);
- EXPECT_FALSE("Missed expected race");
+ EXPECT_TRUE(false) << "Missed expected race";
}
expect_report = false;
}
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index 609141c59294..60dea3d43240 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -53,9 +53,9 @@ TEST(Mman, UserRealloc) {
uptr pc = 0;
{
void *p = user_realloc(thr, pc, 0, 0);
- // Strictly saying this is incorrect, realloc(NULL, N) is equivalent to
- // malloc(N), thus must return non-NULL pointer.
- EXPECT_EQ(p, (void*)0);
+ // Realloc(NULL, N) is equivalent to malloc(N), thus must return
+ // non-NULL pointer.
+ EXPECT_NE(p, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
@@ -68,7 +68,7 @@ TEST(Mman, UserRealloc) {
EXPECT_NE(p, (void*)0);
memset(p, 0xde, 100);
void *p2 = user_realloc(thr, pc, p, 0);
- EXPECT_EQ(p2, (void*)0);
+ EXPECT_NE(p2, (void*)0);
}
{
void *p = user_realloc(thr, pc, 0, 100);
diff --git a/lib/ubsan/CMakeLists.txt b/lib/ubsan/CMakeLists.txt
index 901fef2bfdcf..036c65a097f0 100644
--- a/lib/ubsan/CMakeLists.txt
+++ b/lib/ubsan/CMakeLists.txt
@@ -12,7 +12,7 @@ set(UBSAN_STANDALONE_SOURCES
ubsan_init_standalone.cc
)
-set(UBSAN_CXX_SOURCES
+set(UBSAN_CXXABI_SOURCES
ubsan_handlers_cxx.cc
ubsan_type_hash.cc
ubsan_type_hash_itanium.cc
@@ -30,16 +30,15 @@ append_rtti_flag(OFF UBSAN_STANDALONE_CFLAGS)
append_list_if(SANITIZER_CAN_USE_CXXABI -DUBSAN_CAN_USE_CXXABI UBSAN_STANDALONE_CFLAGS)
set(UBSAN_CXXFLAGS ${SANITIZER_COMMON_CFLAGS})
-append_rtti_flag(ON UBSAN_STANDALONE_CXXFLAGS)
+append_rtti_flag(ON UBSAN_CXXFLAGS)
append_list_if(SANITIZER_CAN_USE_CXXABI -DUBSAN_CAN_USE_CXXABI UBSAN_CXXFLAGS)
-add_custom_target(ubsan)
-set_target_properties(ubsan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(ubsan)
if(APPLE)
set(UBSAN_COMMON_SOURCES ${UBSAN_SOURCES})
if(SANITIZER_CAN_USE_CXXABI)
- list(APPEND UBSAN_COMMON_SOURCES ${UBSAN_CXX_SOURCES})
+ list(APPEND UBSAN_COMMON_SOURCES ${UBSAN_CXXABI_SOURCES})
endif()
# Common parts of UBSan runtime.
@@ -73,7 +72,16 @@ else()
add_compiler_rt_object_libraries(RTUbsan
ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH}
SOURCES ${UBSAN_SOURCES} CFLAGS ${UBSAN_CFLAGS})
- # C++-specific parts of UBSan runtime. Requires a C++ ABI library.
+
+ if(SANITIZER_CAN_USE_CXXABI)
+ # C++-specific parts of UBSan runtime. Requires a C++ ABI library.
+ set(UBSAN_CXX_SOURCES ${UBSAN_CXXABI_SOURCES})
+ else()
+ # Dummy target if we don't have C++ ABI library.
+ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/cxx_dummy.cc "")
+ set(UBSAN_CXX_SOURCES ${CMAKE_CURRENT_BINARY_DIR}/cxx_dummy.cc)
+ endif()
+
add_compiler_rt_object_libraries(RTUbsan_cxx
ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH}
SOURCES ${UBSAN_CXX_SOURCES} CFLAGS ${UBSAN_CXXFLAGS})
@@ -116,5 +124,3 @@ else()
endif()
endif()
endif()
-
-add_dependencies(compiler-rt ubsan)
diff --git a/lib/ubsan/ubsan_init.cc b/lib/ubsan/ubsan_init.cc
index 73398ceafc34..b4f42c4b8503 100644
--- a/lib/ubsan/ubsan_init.cc
+++ b/lib/ubsan/ubsan_init.cc
@@ -39,6 +39,7 @@ static void CommonStandaloneInit() {
InitializeFlags();
CacheBinaryName();
__sanitizer_set_report_path(common_flags()->log_path);
+ AndroidLogInit();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
CommonInit();
ubsan_mode = UBSAN_MODE_STANDALONE;
diff --git a/lib/ubsan/ubsan_type_hash_itanium.cc b/lib/ubsan/ubsan_type_hash_itanium.cc
index 26272e3360b2..5ae5ae0dc849 100644
--- a/lib/ubsan/ubsan_type_hash_itanium.cc
+++ b/lib/ubsan/ubsan_type_hash_itanium.cc
@@ -73,6 +73,8 @@ public:
namespace abi = __cxxabiv1;
+using namespace __sanitizer;
+
// We implement a simple two-level cache for type-checking results. For each
// (vptr,type) pair, a hash is computed. This hash is assumed to be globally
// unique; if it collides, we will get false negatives, but:
@@ -165,7 +167,7 @@ static const abi::__class_type_info *findBaseAtOffset(
dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
if (!VTI)
// No base class subobjects.
- return 0;
+ return nullptr;
for (unsigned int base = 0; base != VTI->base_count; ++base) {
sptr OffsetHere = VTI->base_info[base].__offset_flags >>
@@ -180,7 +182,7 @@ static const abi::__class_type_info *findBaseAtOffset(
return Base;
}
- return 0;
+ return nullptr;
}
namespace {
@@ -196,11 +198,11 @@ struct VtablePrefix {
VtablePrefix *getVtablePrefix(void *Vtable) {
VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
if (!Vptr)
- return 0;
+ return nullptr;
VtablePrefix *Prefix = Vptr - 1;
if (!Prefix->TypeInfo)
// This can't possibly be a valid vtable.
- return 0;
+ return nullptr;
return Prefix;
}
@@ -246,9 +248,9 @@ __ubsan::DynamicTypeInfo
__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
if (!Vtable)
- return DynamicTypeInfo(0, 0, 0);
+ return DynamicTypeInfo(nullptr, 0, nullptr);
if (Vtable->Offset < -VptrMaxOffsetToTop || Vtable->Offset > VptrMaxOffsetToTop)
- return DynamicTypeInfo(0, Vtable->Offset, 0);
+ return DynamicTypeInfo(nullptr, Vtable->Offset, nullptr);
const abi::__class_type_info *ObjectType = findBaseAtOffset(
static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
-Vtable->Offset);
diff --git a/lib/xray/CMakeLists.txt b/lib/xray/CMakeLists.txt
new file mode 100644
index 000000000000..9c7cf6ce361c
--- /dev/null
+++ b/lib/xray/CMakeLists.txt
@@ -0,0 +1,79 @@
+# Build for the XRay runtime support library.
+
+# Core XRay runtime library implementation files.
+set(XRAY_SOURCES
+ xray_init.cc
+ xray_interface.cc
+ xray_flags.cc
+ xray_inmemory_log.cc)
+
+# XRay flight data recorder (FDR) implementation files.
+set(XRAY_FDR_SOURCES
+ xray_buffer_queue.cc)
+
+set(x86_64_SOURCES
+ xray_x86_64.cc
+ xray_trampoline_x86_64.S
+ ${XRAY_SOURCES})
+
+set(arm_SOURCES
+ xray_arm.cc
+ xray_trampoline_arm.S
+ ${XRAY_SOURCES})
+
+set(armhf_SOURCES ${arm_SOURCES})
+
+set(aarch64_SOURCES
+ xray_AArch64.cc
+ xray_trampoline_AArch64.S
+ ${XRAY_SOURCES})
+
+include_directories(..)
+include_directories(../../include)
+
+set(XRAY_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+set(XRAY_COMMON_DEFINITIONS XRAY_HAS_EXCEPTIONS=1)
+append_list_if(
+ COMPILER_RT_HAS_XRAY_COMPILER_FLAG XRAY_SUPPORTED=1 XRAY_COMMON_DEFINITIONS)
+
+add_compiler_rt_object_libraries(RTXray
+ ARCHS ${XRAY_SUPPORTED_ARCH}
+ SOURCES ${XRAY_SOURCES} CFLAGS ${XRAY_CFLAGS}
+ DEFS ${XRAY_COMMON_DEFINITIONS})
+
+add_compiler_rt_object_libraries(RTXrayFDR
+ ARCHS ${XRAY_SUPPORTED_ARCH}
+ SOURCES ${XRAY_FDR_SOURCES} CFLAGS ${XRAY_CFLAGS}
+ DEFS ${XRAY_COMMON_DEFINITIONS})
+
+add_compiler_rt_component(xray)
+add_compiler_rt_component(xray-fdr)
+
+set(XRAY_COMMON_RUNTIME_OBJECT_LIBS
+ RTSanitizerCommon
+ RTSanitizerCommonLibc)
+
+foreach(arch ${XRAY_SUPPORTED_ARCH})
+ if(CAN_TARGET_${arch})
+ add_compiler_rt_runtime(clang_rt.xray
+ STATIC
+ ARCHS ${arch}
+ SOURCES ${${arch}_SOURCES}
+ CFLAGS ${XRAY_CFLAGS}
+ DEFS ${XRAY_COMMON_DEFINITIONS}
+ OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS}
+ PARENT_TARGET xray)
+ add_compiler_rt_runtime(clang_rt.xray-fdr
+ STATIC
+ ARCHS ${arch}
+ SOURCES ${XRAY_FDR_SOURCES}
+ CFLAGS ${XRAY_CFLAGS}
+ DEFS ${XRAY_COMMON_DEFINITIONS}
+ OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS}
+ PARENT_TARGET xray-fdr)
+ endif()
+endforeach()
+
+if(COMPILER_RT_INCLUDE_TESTS)
+ add_subdirectory(tests)
+endif()
diff --git a/lib/xray/tests/CMakeLists.txt b/lib/xray/tests/CMakeLists.txt
new file mode 100644
index 000000000000..6cb17934c895
--- /dev/null
+++ b/lib/xray/tests/CMakeLists.txt
@@ -0,0 +1,59 @@
+include_directories(..)
+
+add_custom_target(XRayUnitTests)
+set_target_properties(XRayUnitTests PROPERTIES FOLDER "XRay unittests")
+
+set(XRAY_UNITTEST_CFLAGS
+ ${XRAY_CFLAGS}
+ ${COMPILER_RT_UNITTEST_CFLAGS}
+ ${COMPILER_RT_GTEST_CFLAGS}
+ -I${COMPILER_RT_SOURCE_DIR}/include
+ -I${COMPILER_RT_SOURCE_DIR}/lib/xray)
+
+macro(xray_compile obj_list source arch)
+ get_filename_component(basename ${source} NAME)
+ set(output_obj "${basename}.${arch}.o")
+ get_target_flags_for_arch(${arch} TARGET_CFLAGS)
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND COMPILE_DEPS gtest_main xray-fdr)
+ endif()
+ clang_compile(${output_obj} ${source}
+ CFLAGS ${XRAY_UNITTEST_CFLAGS} ${TARGET_CFLAGS}
+ DEPS ${COMPILE_DEPS})
+ list(APPEND ${obj_list} ${output_obj})
+endmacro()
+
+macro(add_xray_unittest testname)
+ set(XRAY_TEST_ARCH ${XRAY_SUPPORTED_ARCH})
+ if (APPLE)
+ darwin_filter_host_archs(XRAY_SUPPORTED_ARCH)
+ endif()
+ if(UNIX)
+ foreach(arch ${XRAY_TEST_ARCH})
+ cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN})
+ set(TEST_OBJECTS)
+ foreach(SOURCE ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE})
+ xray_compile(TEST_OBJECTS ${SOURCE} ${arch} ${TEST_HEADERS})
+ endforeach()
+ get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
+ set(TEST_DEPS ${TEST_OBJECTS})
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND TEST_DEPS gtest_main xray-fdr)
+ endif()
+ if(NOT APPLE)
+ add_compiler_rt_test(XRayUnitTests ${testname}
+ OBJECTS ${TEST_OBJECTS}
+ DEPS ${TEST_DEPS}
+ LINK_FLAGS ${TARGET_LINK_FLAGS}
+ -lstdc++ -lm ${CMAKE_THREAD_LIBS_INIT}
+ -lpthread
+ -L${COMPILER_RT_LIBRARY_OUTPUT_DIR} -lclang_rt.xray-fdr-${arch})
+ endif()
+ # FIXME: Figure out how to run even just the unit tests on APPLE.
+ endforeach()
+ endif()
+endmacro()
+
+if(COMPILER_RT_CAN_EXECUTE_TESTS)
+ add_subdirectory(unit)
+endif()
diff --git a/lib/xray/tests/unit/CMakeLists.txt b/lib/xray/tests/unit/CMakeLists.txt
new file mode 100644
index 000000000000..3e5412d41e6e
--- /dev/null
+++ b/lib/xray/tests/unit/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_xray_unittest(XRayBufferQueueTest SOURCES
+ buffer_queue_test.cc xray_unit_test_main.cc)
diff --git a/lib/xray/tests/unit/buffer_queue_test.cc b/lib/xray/tests/unit/buffer_queue_test.cc
new file mode 100644
index 000000000000..d46f19402c2a
--- /dev/null
+++ b/lib/xray/tests/unit/buffer_queue_test.cc
@@ -0,0 +1,81 @@
+//===-- buffer_queue_test.cc ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_buffer_queue.h"
+#include "gtest/gtest.h"
+
+#include <future>
+#include <system_error>
+#include <unistd.h>
+
+namespace __xray {
+
+static constexpr size_t kSize = 4096;
+
+TEST(BufferQueueTest, API) { BufferQueue Buffers(kSize, 1); }
+
+TEST(BufferQueueTest, GetAndRelease) {
+ BufferQueue Buffers(kSize, 1);
+ BufferQueue::Buffer Buf;
+ ASSERT_EQ(Buffers.getBuffer(Buf), std::error_code());
+ ASSERT_NE(nullptr, Buf.Buffer);
+ ASSERT_EQ(Buffers.releaseBuffer(Buf), std::error_code());
+ ASSERT_EQ(nullptr, Buf.Buffer);
+}
+
+TEST(BufferQueueTest, GetUntilFailed) {
+ BufferQueue Buffers(kSize, 1);
+ BufferQueue::Buffer Buf0;
+ EXPECT_EQ(Buffers.getBuffer(Buf0), std::error_code());
+ BufferQueue::Buffer Buf1;
+ EXPECT_EQ(std::errc::not_enough_memory, Buffers.getBuffer(Buf1));
+ EXPECT_EQ(Buffers.releaseBuffer(Buf0), std::error_code());
+}
+
+TEST(BufferQueueTest, ReleaseUnknown) {
+ BufferQueue Buffers(kSize, 1);
+ BufferQueue::Buffer Buf;
+ Buf.Buffer = reinterpret_cast<void *>(0xdeadbeef);
+ Buf.Size = kSize;
+ EXPECT_EQ(std::errc::argument_out_of_domain, Buffers.releaseBuffer(Buf));
+}
+
+TEST(BufferQueueTest, ErrorsWhenFinalising) {
+ BufferQueue Buffers(kSize, 2);
+ BufferQueue::Buffer Buf;
+ ASSERT_EQ(Buffers.getBuffer(Buf), std::error_code());
+ ASSERT_NE(nullptr, Buf.Buffer);
+ ASSERT_EQ(Buffers.finalize(), std::error_code());
+ BufferQueue::Buffer OtherBuf;
+ ASSERT_EQ(std::errc::state_not_recoverable, Buffers.getBuffer(OtherBuf));
+ ASSERT_EQ(std::errc::state_not_recoverable, Buffers.finalize());
+ ASSERT_EQ(Buffers.releaseBuffer(Buf), std::error_code());
+}
+
+TEST(BufferQueueTest, MultiThreaded) {
+ BufferQueue Buffers(kSize, 100);
+ auto F = [&] {
+ BufferQueue::Buffer B;
+ while (!Buffers.getBuffer(B)) {
+ Buffers.releaseBuffer(B);
+ }
+ };
+ auto T0 = std::async(std::launch::async, F);
+ auto T1 = std::async(std::launch::async, F);
+ auto T2 = std::async(std::launch::async, [&] {
+ while (!Buffers.finalize())
+ ;
+ });
+ F();
+}
+
+} // namespace __xray
diff --git a/lib/xray/tests/unit/xray_unit_test_main.cc b/lib/xray/tests/unit/xray_unit_test_main.cc
new file mode 100644
index 000000000000..27d17527dd5b
--- /dev/null
+++ b/lib/xray/tests/unit/xray_unit_test_main.cc
@@ -0,0 +1,18 @@
+//===-- xray_unit_test_main.cc --------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#include "gtest/gtest.h"
+
+int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/lib/xray/xray_AArch64.cc b/lib/xray/xray_AArch64.cc
new file mode 100644
index 000000000000..8f24610dab27
--- /dev/null
+++ b/lib/xray/xray_AArch64.cc
@@ -0,0 +1,119 @@
+//===-- xray_AArch64.cc -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of AArch64-specific routines (64-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_emulate_tsc.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+#include <cassert>
+
+namespace __xray {
+
+uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
+ // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
+ // not have a constant frequency like TSC on x86[_64]; it may go faster or
+ // slower depending on CPU's turbo or power saving modes. Furthermore, to
+ // read from CP15 on ARM a kernel modification or a driver is needed.
+ // We can not require this from users of compiler-rt.
+ // So on ARM we use clock_gettime(2) which gives the result in nanoseconds.
+ // To get the measurements per second, we scale this by the number of
+ // nanoseconds per second, pretending that the TSC frequency is 1GHz and
+ // one TSC tick is 1 nanosecond.
+ return NanosecondsPerSecond;
+}
+
+// The machine codes for some instructions used in runtime patching.
+enum class PatchOpcodes : uint32_t {
+ PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
+ PO_LdrW0_12 = 0x18000060, // LDR W0, #12
+ PO_LdrX16_12 = 0x58000070, // LDR X16, #12
+ PO_BlrX16 = 0xD63F0200, // BLR X16
+ PO_LdpX0X30SP_16 = 0xA8C17BE0, // LDP X0, X30, [SP], #16
+ PO_B32 = 0x14000008 // B #32
+};
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B #32
+ // 7 NOPs (24 bytes)
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
+ // LDR W0, #12 ; W0 := function ID
+ // LDR X16,#12 ; X16 := address of the trampoline
+ // BLR X16
+ // ;DATA: 32 bits of function ID
+ // ;DATA: lower 32 bits of the address of the trampoline
+ // ;DATA: higher 32 bits of the address of the trampoline
+ // LDP X0, X30, [SP], #16 ; POP {r0, lr}
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #32
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
+ if (Enable) {
+ uint32_t *CurAddress = FirstAddress + 1;
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
+ CurAddress++;
+ *CurAddress = FuncId;
+ CurAddress++;
+ *reinterpret_cast<void (**)()>(CurAddress) = TracingHook;
+ CurAddress += 2;
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdpX0X30SP_16);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_StpX0X30SP_m16e), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_B32), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionEntry);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_arm.cc b/lib/xray/xray_arm.cc
new file mode 100644
index 000000000000..d89322e833e5
--- /dev/null
+++ b/lib/xray/xray_arm.cc
@@ -0,0 +1,156 @@
+//===-- xray_arm.cc ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of ARM-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_emulate_tsc.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+#include <cassert>
+
+namespace __xray {
+
+uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
+ // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
+ // not have a constant frequency like TSC on x86[_64]; it may go faster or
+ // slower depending on CPU's turbo or power saving modes. Furthermore, to
+ // read from CP15 on ARM a kernel modification or a driver is needed.
+ // We can not require this from users of compiler-rt.
+ // So on ARM we use clock_gettime(2) which gives the result in nanoseconds.
+ // To get the measurements per second, we scale this by the number of
+ // nanoseconds per second, pretending that the TSC frequency is 1GHz and
+ // one TSC tick is 1 nanosecond.
+ return NanosecondsPerSecond;
+}
+
+// The machine codes for some instructions used in runtime patching.
+enum class PatchOpcodes : uint32_t {
+ PO_PushR0Lr = 0xE92D4001, // PUSH {r0, lr}
+ PO_BlxIp = 0xE12FFF3C, // BLX ip
+ PO_PopR0Lr = 0xE8BD4001, // POP {r0, lr}
+ PO_B20 = 0xEA000005 // B #20
+};
+
+// 0xUUUUWXYZ -> 0x000W0XYZ
+inline static uint32_t getMovwMask(const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return (Value & 0xfff) | ((Value & 0xf000) << 4);
+}
+
+// 0xWXYZUUUU -> 0x000W0XYZ
+inline static uint32_t getMovtMask(const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return getMovwMask(Value >> 16);
+}
+
+// Writes the following instructions:
+// MOVW R<regNo>, #<lower 16 bits of the |Value|>
+// MOVT R<regNo>, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+write32bitLoadReg(uint8_t regNo, uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ // This is a fatal error: we cannot just report it and continue execution.
+ assert(regNo <= 15 && "Register number must be 0 to 15.");
+ // MOVW R, #0xWXYZ in machine code is 0xE30WRXYZ
+ *Address = (0xE3000000 | (uint32_t(regNo) << 12) | getMovwMask(Value));
+ Address++;
+ // MOVT R, #0xWXYZ in machine code is 0xE34WRXYZ
+ *Address = (0xE3400000 | (uint32_t(regNo) << 12) | getMovtMask(Value));
+ return Address + 1;
+}
+
+// Writes the following instructions:
+// MOVW r0, #<lower 16 bits of the |Value|>
+// MOVT r0, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+Write32bitLoadR0(uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return write32bitLoadReg(0, Address, Value);
+}
+
+// Writes the following instructions:
+// MOVW ip, #<lower 16 bits of the |Value|>
+// MOVT ip, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+Write32bitLoadIP(uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return write32bitLoadReg(12, Address, Value);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B #20
+ // 6 NOPs (24 bytes)
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // PUSH {r0, lr}
+ // MOVW r0, #<lower 16 bits of function ID>
+ // MOVT r0, #<higher 16 bits of function ID>
+ // MOVW ip, #<lower 16 bits of address of TracingHook>
+ // MOVT ip, #<higher 16 bits of address of TracingHook>
+ // BLX ip
+ // POP {r0, lr}
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #20
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
+ if (Enable) {
+ uint32_t *CurAddress = FirstAddress + 1;
+ CurAddress =
+ Write32bitLoadR0(CurAddress, reinterpret_cast<uint32_t>(FuncId));
+ CurAddress =
+ Write32bitLoadIP(CurAddress, reinterpret_cast<uint32_t>(TracingHook));
+ *CurAddress = uint32_t(PatchOpcodes::PO_BlxIp);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_PopR0Lr);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_PushR0Lr), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_B20), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionEntry);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_buffer_queue.cc b/lib/xray/xray_buffer_queue.cc
new file mode 100644
index 000000000000..7e5462fb8e11
--- /dev/null
+++ b/lib/xray/xray_buffer_queue.cc
@@ -0,0 +1,65 @@
+//===-- xray_buffer_queue.cc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instruementation system.
+//
+// Defines the interface for a buffer queue implementation.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_buffer_queue.h"
+#include <cassert>
+#include <cstdlib>
+
+using namespace __xray;
+
+BufferQueue::BufferQueue(std::size_t B, std::size_t N)
+ : BufferSize(B), Buffers(N), Mutex(), OwnedBuffers(), Finalizing(false) {
+ for (auto &Buf : Buffers) {
+ void *Tmp = malloc(BufferSize);
+ Buf.Buffer = Tmp;
+ Buf.Size = B;
+ if (Tmp != 0)
+ OwnedBuffers.insert(Tmp);
+ }
+}
+
+std::error_code BufferQueue::getBuffer(Buffer &Buf) {
+ if (Finalizing.load(std::memory_order_acquire))
+ return std::make_error_code(std::errc::state_not_recoverable);
+ std::lock_guard<std::mutex> Guard(Mutex);
+ if (Buffers.empty())
+ return std::make_error_code(std::errc::not_enough_memory);
+ Buf = Buffers.front();
+ Buffers.pop_front();
+ return {};
+}
+
+std::error_code BufferQueue::releaseBuffer(Buffer &Buf) {
+ if (OwnedBuffers.count(Buf.Buffer) == 0)
+ return std::make_error_code(std::errc::argument_out_of_domain);
+ std::lock_guard<std::mutex> Guard(Mutex);
+ Buffers.push_back(Buf);
+ Buf.Buffer = nullptr;
+ Buf.Size = BufferSize;
+ return {};
+}
+
+std::error_code BufferQueue::finalize() {
+ if (Finalizing.exchange(true, std::memory_order_acq_rel))
+ return std::make_error_code(std::errc::state_not_recoverable);
+ return {};
+}
+
+BufferQueue::~BufferQueue() {
+ for (auto &Buf : Buffers) {
+ free(Buf.Buffer);
+ Buf.Buffer = nullptr;
+ Buf.Size = 0;
+ }
+}
diff --git a/lib/xray/xray_buffer_queue.h b/lib/xray/xray_buffer_queue.h
new file mode 100644
index 000000000000..bf0b7af9df4d
--- /dev/null
+++ b/lib/xray/xray_buffer_queue.h
@@ -0,0 +1,86 @@
+//===-- xray_buffer_queue.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Defines the interface for a buffer queue implementation.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_BUFFER_QUEUE_H
+#define XRAY_BUFFER_QUEUE_H
+
+#include <atomic>
+#include <cstdint>
+#include <deque>
+#include <mutex>
+#include <system_error>
+#include <unordered_set>
+
+namespace __xray {
+
+/// BufferQueue implements a circular queue of fixed sized buffers (much like a
+/// freelist) but is concerned mostly with making it really quick to initialise,
+/// finalise, and get/return buffers to the queue. This is one key component of
+/// the "flight data recorder" (FDR) mode to support ongoing XRay function call
+/// trace collection.
+class BufferQueue {
+public:
+ struct Buffer {
+ void *Buffer = nullptr;
+ std::size_t Size = 0;
+ };
+
+private:
+ std::size_t BufferSize;
+ std::deque<Buffer> Buffers;
+ std::mutex Mutex;
+ std::unordered_set<void *> OwnedBuffers;
+ std::atomic<bool> Finalizing;
+
+public:
+ /// Initialise a queue of size |N| with buffers of size |B|.
+ BufferQueue(std::size_t B, std::size_t N);
+
+ /// Updates |Buf| to contain the pointer to an appropriate buffer. Returns an
+ /// error in case there are no available buffers to return when we will run
+ /// over the upper bound for the total buffers.
+ ///
+ /// Requirements:
+ /// - BufferQueue is not finalising.
+ ///
+ /// Returns:
+ /// - std::errc::not_enough_memory on exceeding MaxSize.
+ /// - no error when we find a Buffer.
+ /// - std::errc::state_not_recoverable on finalising BufferQueue.
+ std::error_code getBuffer(Buffer &Buf);
+
+ /// Updates |Buf| to point to nullptr, with size 0.
+ ///
+ /// Returns:
+ /// - ...
+ std::error_code releaseBuffer(Buffer &Buf);
+
+ bool finalizing() const { return Finalizing.load(std::memory_order_acquire); }
+
+ // Sets the state of the BufferQueue to finalizing, which ensures that:
+ //
+ // - All subsequent attempts to retrieve a Buffer will fail.
+ // - All releaseBuffer operations will not fail.
+ //
+ // After a call to finalize succeeds, all subsequent calls to finalize will
+ // fail with std::errc::state_not_recoverable.
+ std::error_code finalize();
+
+ // Cleans up allocated buffers.
+ ~BufferQueue();
+};
+
+} // namespace __xray
+
+#endif // XRAY_BUFFER_QUEUE_H
diff --git a/lib/xray/xray_defs.h b/lib/xray/xray_defs.h
new file mode 100644
index 000000000000..e5c37c0665db
--- /dev/null
+++ b/lib/xray/xray_defs.h
@@ -0,0 +1,22 @@
+//===-- xray_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions useful for XRay sources.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_DEFS_H
+#define XRAY_XRAY_DEFS_H
+
+#if XRAY_SUPPORTED
+#define XRAY_NEVER_INSTRUMENT __attribute__((xray_never_instrument))
+#else
+#define XRAY_NEVER_INSTRUMENT
+#endif
+
+#endif // XRAY_XRAY_DEFS_H
diff --git a/lib/xray/xray_emulate_tsc.h b/lib/xray/xray_emulate_tsc.h
new file mode 100644
index 000000000000..a3e8b1c92eb4
--- /dev/null
+++ b/lib/xray/xray_emulate_tsc.h
@@ -0,0 +1,40 @@
+//===-- xray_emulate_tsc.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_EMULATE_TSC_H
+#define XRAY_EMULATE_TSC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "xray_defs.h"
+#include <cerrno>
+#include <cstdint>
+#include <time.h>
+
+namespace __xray {
+
+static constexpr uint64_t NanosecondsPerSecond = 1000ULL * 1000 * 1000;
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettime(2) returned %d, errno=%d.", result, int(errno));
+ TS.tv_sec = 0;
+ TS.tv_nsec = 0;
+ }
+ CPU = 0;
+ return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
+}
+}
+
+#endif // XRAY_EMULATE_TSC_H
diff --git a/lib/xray/xray_flags.cc b/lib/xray/xray_flags.cc
new file mode 100644
index 000000000000..338c2378b8cd
--- /dev/null
+++ b/lib/xray/xray_flags.cc
@@ -0,0 +1,62 @@
+//===-- xray_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "xray_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray_defs.h"
+
+using namespace __sanitizer;
+
+namespace __xray {
+
+Flags xray_flags_dont_use_directly; // use via flags().
+
+void Flags::SetDefaults() XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+}
+
+static void RegisterXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(P, #Name, Description, &F->Name);
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+}
+
+void InitializeFlags() XRAY_NEVER_INSTRUMENT {
+ SetCommonFlagsDefaults();
+ auto *F = flags();
+ F->SetDefaults();
+
+ FlagParser XRayParser;
+ RegisterXRayFlags(&XRayParser, F);
+ RegisterCommonFlags(&XRayParser);
+
+ // Override from command line.
+ XRayParser.ParseString(GetEnv("XRAY_OPTIONS"));
+
+ InitializeCommonFlags();
+
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ XRayParser.PrintFlagDescriptions();
+ }
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_flags.h b/lib/xray/xray_flags.h
new file mode 100644
index 000000000000..2ecf5fb9ba1d
--- /dev/null
+++ b/lib/xray/xray_flags.h
@@ -0,0 +1,37 @@
+//===-- xray_flags.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instruementation system.
+//
+// XRay runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_FLAGS_H
+#define XRAY_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __xray {
+
+struct Flags {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags xray_flags_dont_use_directly;
+inline Flags *flags() { return &xray_flags_dont_use_directly; }
+
+void InitializeFlags();
+
+} // namespace __xray
+
+#endif // XRAY_FLAGS_H
diff --git a/lib/xray/xray_flags.inc b/lib/xray/xray_flags.inc
new file mode 100644
index 000000000000..0f6ced8ead0c
--- /dev/null
+++ b/lib/xray/xray_flags.inc
@@ -0,0 +1,22 @@
+//===-- xray_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FLAG
+#error "Define XRAY_FLAG prior to including this file!"
+#endif
+
+XRAY_FLAG(bool, patch_premain, true,
+ "Whether to patch instrumentation points before main.")
+XRAY_FLAG(bool, xray_naive_log, true,
+ "Whether to install the naive log implementation.")
+XRAY_FLAG(const char *, xray_logfile_base, "xray-log.",
+ "Filename base for the xray logfile.")
diff --git a/lib/xray/xray_init.cc b/lib/xray/xray_init.cc
new file mode 100644
index 000000000000..eb86182910cf
--- /dev/null
+++ b/lib/xray/xray_init.cc
@@ -0,0 +1,70 @@
+//===-- xray_init.cc --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay initialisation logic.
+//===----------------------------------------------------------------------===//
+
+#include <atomic>
+#include <fcntl.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include "xray_interface_internal.h"
+
+extern "C" {
+void __xray_init();
+extern const XRaySledEntry __start_xray_instr_map[] __attribute__((weak));
+extern const XRaySledEntry __stop_xray_instr_map[] __attribute__((weak));
+}
+
+using namespace __sanitizer;
+using namespace __xray;
+
+// When set to 'true' this means the XRay runtime has been initialised. We use
+// the weak symbols defined above (__start_xray_inst_map and
+// __stop_xray_instr_map) to initialise the instrumentation map that XRay uses
+// for runtime patching/unpatching of instrumentation points.
+//
+// FIXME: Support DSO instrumentation maps too. The current solution only works
+// for statically linked executables.
+std::atomic<bool> XRayInitialized{false};
+
+// This should always be updated before XRayInitialized is updated.
+std::atomic<__xray::XRaySledMap> XRayInstrMap{};
+
+// __xray_init() will do the actual loading of the current process' memory map
+// and then proceed to look for the .xray_instr_map section/segment.
+void __xray_init() XRAY_NEVER_INSTRUMENT {
+ InitializeFlags();
+ if (__start_xray_instr_map == nullptr) {
+ Report("XRay instrumentation map missing. Not initializing XRay.\n");
+ return;
+ }
+
+ // Now initialize the XRayInstrMap global struct with the address of the
+ // entries, reinterpreted as an array of XRaySledEntry objects. We use the
+ // virtual pointer we have from the section to provide us the correct
+ // information.
+ __xray::XRaySledMap SledMap{};
+ SledMap.Sleds = __start_xray_instr_map;
+ SledMap.Entries = __stop_xray_instr_map - __start_xray_instr_map;
+ XRayInstrMap.store(SledMap, std::memory_order_release);
+ XRayInitialized.store(true, std::memory_order_release);
+
+ if (flags()->patch_premain)
+ __xray_patch();
+}
+
+__attribute__((section(".preinit_array"),
+ used)) void (*__local_xray_preinit)(void) = __xray_init;
diff --git a/lib/xray/xray_inmemory_log.cc b/lib/xray/xray_inmemory_log.cc
new file mode 100644
index 000000000000..7ec56f486707
--- /dev/null
+++ b/lib/xray/xray_inmemory_log.cc
@@ -0,0 +1,185 @@
+//===-- xray_inmemory_log.cc ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of a simple in-memory log of XRay events. This defines a
+// logging function that's compatible with the XRay handler interface, and
+// routines for exporting data to files.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cassert>
+#include <cstdint>
+#include <cstdio>
+#include <fcntl.h>
+#include <mutex>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <thread>
+#include <unistd.h>
+
+#if defined(__x86_64__)
+#include "xray_x86_64.h"
+#elif defined(__arm__) || defined(__aarch64__)
+#include "xray_emulate_tsc.h"
+#else
+#error "Unsupported CPU Architecture"
+#endif /* Architecture-specific inline intrinsics */
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray/xray_records.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include "xray_interface_internal.h"
+
+// __xray_InMemoryRawLog will use a thread-local aligned buffer capped to a
+// certain size (32kb by default) and use it as if it were a circular buffer for
+// events. We store simple fixed-sized entries in the log for external analysis.
+
+extern "C" {
+void __xray_InMemoryRawLog(int32_t FuncId,
+ XRayEntryType Type) XRAY_NEVER_INSTRUMENT;
+}
+
+namespace __xray {
+
+std::mutex LogMutex;
+
+static void retryingWriteAll(int Fd, char *Begin,
+ char *End) XRAY_NEVER_INSTRUMENT {
+ if (Begin == End)
+ return;
+ auto TotalBytes = std::distance(Begin, End);
+ while (auto Written = write(Fd, Begin, TotalBytes)) {
+ if (Written < 0) {
+ if (errno == EINTR)
+ continue; // Try again.
+ Report("Failed to write; errno = %d\n", errno);
+ return;
+ }
+ TotalBytes -= Written;
+ if (TotalBytes == 0)
+ break;
+ Begin += Written;
+ }
+}
+
+class ThreadExitFlusher {
+ int Fd;
+ XRayRecord *Start;
+ size_t &Offset;
+
+public:
+ explicit ThreadExitFlusher(int Fd, XRayRecord *Start,
+ size_t &Offset) XRAY_NEVER_INSTRUMENT
+ : Fd(Fd),
+ Start(Start),
+ Offset(Offset) {}
+
+ ~ThreadExitFlusher() XRAY_NEVER_INSTRUMENT {
+ std::lock_guard<std::mutex> L(LogMutex);
+ if (Fd > 0 && Start != nullptr) {
+ retryingWriteAll(Fd, reinterpret_cast<char *>(Start),
+ reinterpret_cast<char *>(Start + Offset));
+ // Because this thread's exit could be the last one trying to write to the
+ // file and that we're not able to close out the file properly, we sync
+ // instead and hope that the pending writes are flushed as the thread
+ // exits.
+ fsync(Fd);
+ }
+ }
+};
+
+} // namespace __xray
+
+using namespace __xray;
+
+void PrintToStdErr(const char *Buffer) XRAY_NEVER_INSTRUMENT {
+ fprintf(stderr, "%s", Buffer);
+}
+
+static int __xray_OpenLogFile() XRAY_NEVER_INSTRUMENT {
+ // FIXME: Figure out how to make this less stderr-dependent.
+ SetPrintfAndReportCallback(PrintToStdErr);
+ // Open a temporary file once for the log.
+ static char TmpFilename[256] = {};
+ static char TmpWildcardPattern[] = "XXXXXX";
+ auto E = internal_strncat(TmpFilename, flags()->xray_logfile_base,
+ sizeof(TmpFilename) - 10);
+ if (static_cast<size_t>((E + 6) - TmpFilename) > (sizeof(TmpFilename) - 1)) {
+ Report("XRay log file base too long: %s\n", flags()->xray_logfile_base);
+ return -1;
+ }
+ internal_strncat(TmpFilename, TmpWildcardPattern,
+ sizeof(TmpWildcardPattern) - 1);
+ int Fd = mkstemp(TmpFilename);
+ if (Fd == -1) {
+ Report("XRay: Failed opening temporary file '%s'; not logging events.\n",
+ TmpFilename);
+ return -1;
+ }
+ if (Verbosity())
+ fprintf(stderr, "XRay: Log file in '%s'\n", TmpFilename);
+
+ // Since we're here, we get to write the header. We set it up so that the
+ // header will only be written once, at the start, and let the threads
+ // logging do writes which just append.
+ XRayFileHeader Header;
+ Header.Version = 1;
+ Header.Type = FileTypes::NAIVE_LOG;
+ Header.CycleFrequency = __xray::cycleFrequency();
+
+ // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
+ // before setting the values in the header.
+ Header.ConstantTSC = 1;
+ Header.NonstopTSC = 1;
+ retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
+ reinterpret_cast<char *>(&Header) + sizeof(Header));
+ return Fd;
+}
+
+void __xray_InMemoryRawLog(int32_t FuncId,
+ XRayEntryType Type) XRAY_NEVER_INSTRUMENT {
+ using Buffer =
+ std::aligned_storage<sizeof(XRayRecord), alignof(XRayRecord)>::type;
+ static constexpr size_t BuffLen = 1024;
+ thread_local static Buffer InMemoryBuffer[BuffLen] = {};
+ thread_local static size_t Offset = 0;
+ static int Fd = __xray_OpenLogFile();
+ if (Fd == -1)
+ return;
+ thread_local __xray::ThreadExitFlusher Flusher(
+ Fd, reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer), Offset);
+ thread_local pid_t TId = syscall(SYS_gettid);
+
+ // First we get the useful data, and stuff it into the already aligned buffer
+ // through a pointer offset.
+ auto &R = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer)[Offset];
+ R.RecordType = RecordTypes::NORMAL;
+ R.TSC = __xray::readTSC(R.CPU);
+ R.TId = TId;
+ R.Type = Type;
+ R.FuncId = FuncId;
+ ++Offset;
+ if (Offset == BuffLen) {
+ std::lock_guard<std::mutex> L(LogMutex);
+ auto RecordBuffer = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer);
+ retryingWriteAll(Fd, reinterpret_cast<char *>(RecordBuffer),
+ reinterpret_cast<char *>(RecordBuffer + Offset));
+ Offset = 0;
+ }
+}
+
+static auto Unused = [] {
+ if (flags()->xray_naive_log)
+ __xray_set_handler(__xray_InMemoryRawLog);
+ return true;
+}();
diff --git a/lib/xray/xray_interface.cc b/lib/xray/xray_interface.cc
new file mode 100644
index 000000000000..20a2b66c4401
--- /dev/null
+++ b/lib/xray/xray_interface.cc
@@ -0,0 +1,207 @@
+//===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of the API functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "xray_interface_internal.h"
+
+#include <atomic>
+#include <cstdint>
+#include <cstdio>
+#include <errno.h>
+#include <limits>
+#include <sys/mman.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+
+namespace __xray {
+
+#if defined(__x86_64__)
+// FIXME: The actual length is 11 bytes. Why was length 12 passed to mprotect()
+// ?
+static const int16_t cSledLength = 12;
+#elif defined(__aarch64__)
+static const int16_t cSledLength = 32;
+#elif defined(__arm__)
+static const int16_t cSledLength = 28;
+#else
+#error "Unsupported CPU Architecture"
+#endif /* CPU architecture */
+
+// This is the function to call when we encounter the entry or exit sleds.
+std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction{nullptr};
+
+// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo
+// any successful mprotect(...) changes. This is used to make a page writeable
+// and executable, and upon destruction if it was successful in doing so returns
+// the page into a read-only and executable page.
+//
+// This is only used specifically for runtime-patching of the XRay
+// instrumentation points. This assumes that the executable pages are originally
+// read-and-execute only.
+class MProtectHelper {
+ void *PageAlignedAddr;
+ std::size_t MProtectLen;
+ bool MustCleanup;
+
+public:
+ explicit MProtectHelper(void *PageAlignedAddr,
+ std::size_t MProtectLen) XRAY_NEVER_INSTRUMENT
+ : PageAlignedAddr(PageAlignedAddr),
+ MProtectLen(MProtectLen),
+ MustCleanup(false) {}
+
+ int MakeWriteable() XRAY_NEVER_INSTRUMENT {
+ auto R = mprotect(PageAlignedAddr, MProtectLen,
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+ if (R != -1)
+ MustCleanup = true;
+ return R;
+ }
+
+ ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
+ if (MustCleanup) {
+ mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
+ }
+ }
+};
+
+} // namespace __xray
+
+extern std::atomic<bool> XRayInitialized;
+extern std::atomic<__xray::XRaySledMap> XRayInstrMap;
+
+int __xray_set_handler(void (*entry)(int32_t,
+ XRayEntryType)) XRAY_NEVER_INSTRUMENT {
+ if (XRayInitialized.load(std::memory_order_acquire)) {
+ __xray::XRayPatchedFunction.store(entry, std::memory_order_release);
+ return 1;
+ }
+ return 0;
+}
+
+int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
+ return __xray_set_handler(nullptr);
+}
+
+std::atomic<bool> XRayPatching{false};
+
+using namespace __xray;
+
+// FIXME: Figure out whether we can move this class to sanitizer_common instead
+// as a generic "scope guard".
+template <class Function> class CleanupInvoker {
+ Function Fn;
+
+public:
+ explicit CleanupInvoker(Function Fn) XRAY_NEVER_INSTRUMENT : Fn(Fn) {}
+ CleanupInvoker(const CleanupInvoker &) XRAY_NEVER_INSTRUMENT = default;
+ CleanupInvoker(CleanupInvoker &&) XRAY_NEVER_INSTRUMENT = default;
+ CleanupInvoker &
+ operator=(const CleanupInvoker &) XRAY_NEVER_INSTRUMENT = delete;
+ CleanupInvoker &operator=(CleanupInvoker &&) XRAY_NEVER_INSTRUMENT = delete;
+ ~CleanupInvoker() XRAY_NEVER_INSTRUMENT { Fn(); }
+};
+
+template <class Function>
+CleanupInvoker<Function> ScopeCleanup(Function Fn) XRAY_NEVER_INSTRUMENT {
+ return CleanupInvoker<Function>{Fn};
+}
+
+// ControlPatching implements the common internals of the patching/unpatching
+// implementation. |Enable| defines whether we're enabling or disabling the
+// runtime XRay instrumentation.
+XRayPatchingStatus ControlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!XRayInitialized.load(std::memory_order_acquire))
+ return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
+
+ static bool NotPatching = false;
+ if (!XRayPatching.compare_exchange_strong(NotPatching, true,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
+ return XRayPatchingStatus::ONGOING; // Already patching.
+ }
+
+ bool PatchingSuccess = false;
+ auto XRayPatchingStatusResetter = ScopeCleanup([&PatchingSuccess] {
+ if (!PatchingSuccess) {
+ XRayPatching.store(false, std::memory_order_release);
+ }
+ });
+
+ // Step 1: Compute the function id, as a unique identifier per function in the
+ // instrumentation map.
+ XRaySledMap InstrMap = XRayInstrMap.load(std::memory_order_acquire);
+ if (InstrMap.Entries == 0)
+ return XRayPatchingStatus::NOT_INITIALIZED;
+
+ const uint64_t PageSize = GetPageSizeCached();
+ if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
+ Report("System page size is not a power of two: %lld\n", PageSize);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ uint32_t FuncId = 1;
+ uint64_t CurFun = 0;
+ for (std::size_t I = 0; I < InstrMap.Entries; I++) {
+ auto Sled = InstrMap.Sleds[I];
+ auto F = Sled.Function;
+ if (CurFun == 0)
+ CurFun = F;
+ if (F != CurFun) {
+ ++FuncId;
+ CurFun = F;
+ }
+
+ // While we're here, we should patch the nop sled. To do that we mprotect
+ // the page containing the function to be writeable.
+ void *PageAlignedAddr =
+ reinterpret_cast<void *>(Sled.Address & ~(PageSize - 1));
+ std::size_t MProtectLen = (Sled.Address + cSledLength) -
+ reinterpret_cast<uint64_t>(PageAlignedAddr);
+ MProtectHelper Protector(PageAlignedAddr, MProtectLen);
+ if (Protector.MakeWriteable() == -1) {
+ printf("Failed mprotect: %d\n", errno);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ bool Success = false;
+ switch (Sled.Kind) {
+ case XRayEntryType::ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::EXIT:
+ Success = patchFunctionExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::TAIL:
+ Success = patchFunctionTailExit(Enable, FuncId, Sled);
+ break;
+ default:
+ Report("Unsupported sled kind: %d\n", int(Sled.Kind));
+ continue;
+ }
+ (void)Success;
+ }
+ XRayPatching.store(false, std::memory_order_release);
+ PatchingSuccess = true;
+ return XRayPatchingStatus::SUCCESS;
+}
+
+XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
+ return ControlPatching(true);
+}
+
+XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
+ return ControlPatching(false);
+}
diff --git a/lib/xray/xray_interface_internal.h b/lib/xray/xray_interface_internal.h
new file mode 100644
index 000000000000..a8434a699f86
--- /dev/null
+++ b/lib/xray/xray_interface_internal.h
@@ -0,0 +1,69 @@
+//===-- xray_interface_internal.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of the API functions. See also include/xray/xray_interface.h.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_INTERFACE_INTERNAL_H
+#define XRAY_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "xray/xray_interface.h"
+#include <cstddef>
+#include <cstdint>
+
+extern "C" {
+
+struct XRaySledEntry {
+#if SANITIZER_WORDSIZE == 64
+ uint64_t Address;
+ uint64_t Function;
+ unsigned char Kind;
+ unsigned char AlwaysInstrument;
+ unsigned char Padding[14]; // Need 32 bytes
+#elif SANITIZER_WORDSIZE == 32
+ uint32_t Address;
+ uint32_t Function;
+ unsigned char Kind;
+ unsigned char AlwaysInstrument;
+ unsigned char Padding[6]; // Need 16 bytes
+#else
+#error "Unsupported word size."
+#endif
+};
+}
+
+namespace __xray {
+
+struct XRaySledMap {
+ const XRaySledEntry *Sleds;
+ size_t Entries;
+};
+
+uint64_t cycleFrequency();
+
+bool patchFunctionEntry(bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled);
+bool patchFunctionExit(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled);
+bool patchFunctionTailExit(bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled);
+
+} // namespace __xray
+
+extern "C" {
+// The following functions have to be defined in assembler, on a per-platform
+// basis. See xray_trampoline_*.S files for implementations.
+extern void __xray_FunctionEntry();
+extern void __xray_FunctionExit();
+extern void __xray_FunctionTailExit();
+}
+
+#endif
diff --git a/lib/xray/xray_trampoline_AArch64.S b/lib/xray/xray_trampoline_AArch64.S
new file mode 100644
index 000000000000..f1a471c041f9
--- /dev/null
+++ b/lib/xray/xray_trampoline_AArch64.S
@@ -0,0 +1,89 @@
+ .text
+ /* The variable containing the handler function pointer */
+ .global _ZN6__xray19XRayPatchedFunctionE
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionEntry
+ .type __xray_FunctionEntry, %function
+ /* In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
+ FuncId passed in W0 register. */
+__xray_FunctionEntry:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ STP Q0, Q1, [SP, #-32]!
+ STP Q2, Q3, [SP, #-32]!
+ STP Q4, Q5, [SP, #-32]!
+ STP Q6, Q7, [SP, #-32]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionEntry_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=0 means that we are tracing an entry event */
+ MOV X1, #0
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionEntry_restore:
+ /* Pop the saved registers */
+ LDP Q6, Q7, [SP], #32
+ LDP Q4, Q5, [SP], #32
+ LDP Q2, Q3, [SP], #32
+ LDP Q0, Q1, [SP], #32
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
+
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionExit
+ .type __xray_FunctionExit, %function
+ /* In C++ it is void extern "C" __xray_FunctionExit(uint32_t FuncId) with
+ FuncId passed in W0 register. */
+__xray_FunctionExit:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ STR Q0, [SP, #-16]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionExit_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=1 means that we are tracing an exit event */
+ MOV X1, #1
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionExit_restore:
+ LDR Q0, [SP], #16
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
diff --git a/lib/xray/xray_trampoline_arm.S b/lib/xray/xray_trampoline_arm.S
new file mode 100644
index 000000000000..5d87c971364d
--- /dev/null
+++ b/lib/xray/xray_trampoline_arm.S
@@ -0,0 +1,65 @@
+ .syntax unified
+ .arch armv6t2
+ .fpu vfpv2
+ .code 32
+ .global _ZN6__xray19XRayPatchedFunctionE
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionEntry
+ @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc)
+ @ Assume that "q" part of the floating-point registers is not used
+ @ for passing parameters to C/C++ functions.
+ .type __xray_FunctionEntry, %function
+ @ In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
+ @ FuncId passed in r0 register.
+__xray_FunctionEntry:
+ PUSH {r1-r3,lr}
+ @ Save floating-point parameters of the instrumented function
+ VPUSH {d0-d7}
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionEntry_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ r1=0 means that we are tracing an entry event
+ MOV r1, #0
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionEntry_restore:
+ @ Restore floating-point parameters of the instrumented function
+ VPOP {d0-d7}
+ POP {r1-r3,pc}
+
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionExit
+ @ Assume that d1-d7 are not used for the return value.
+ @ Assume that "q" part of the floating-point registers is not used for the
+ @ return value in C/C++.
+ .type __xray_FunctionExit, %function
+ @ In C++ it is extern "C" void __xray_FunctionExit(uint32_t FuncId) with
+ @ FuncId passed in r0 register.
+__xray_FunctionExit:
+ PUSH {r1-r3,lr}
+ @ Save the floating-point return value of the instrumented function
+ VPUSH {d0}
+ @ Load the handler address
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionExit_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ 1 means that we are tracing an exit event
+ MOV r1, #1
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionExit_restore:
+ @ Restore the floating-point return value of the instrumented function
+ VPOP {d0}
+ POP {r1-r3,pc}
diff --git a/lib/xray/xray_trampoline_x86_64.S b/lib/xray/xray_trampoline_x86_64.S
new file mode 100644
index 000000000000..d90c30cd98e9
--- /dev/null
+++ b/lib/xray/xray_trampoline_x86_64.S
@@ -0,0 +1,147 @@
+//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the X86-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+.macro SAVE_REGISTERS
+ subq $200, %rsp
+ movupd %xmm0, 184(%rsp)
+ movupd %xmm1, 168(%rsp)
+ movupd %xmm2, 152(%rsp)
+ movupd %xmm3, 136(%rsp)
+ movupd %xmm4, 120(%rsp)
+ movupd %xmm5, 104(%rsp)
+ movupd %xmm6, 88(%rsp)
+ movupd %xmm7, 72(%rsp)
+ movq %rdi, 64(%rsp)
+ movq %rax, 56(%rsp)
+ movq %rdx, 48(%rsp)
+ movq %rsi, 40(%rsp)
+ movq %rcx, 32(%rsp)
+ movq %r8, 24(%rsp)
+ movq %r9, 16(%rsp)
+.endm
+
+.macro RESTORE_REGISTERS
+ movupd 184(%rsp), %xmm0
+ movupd 168(%rsp), %xmm1
+ movupd 152(%rsp), %xmm2
+ movupd 136(%rsp), %xmm3
+ movupd 120(%rsp), %xmm4
+ movupd 104(%rsp), %xmm5
+ movupd 88(%rsp) , %xmm6
+ movupd 72(%rsp) , %xmm7
+ movq 64(%rsp), %rdi
+ movq 56(%rsp), %rax
+ movq 48(%rsp), %rdx
+ movq 40(%rsp), %rsi
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %r8
+ movq 16(%rsp), %r9
+ addq $200, %rsp
+.endm
+
+ .text
+ .file "xray_trampoline_x86.S"
+ .globl __xray_FunctionEntry
+ .align 16, 0x90
+ .type __xray_FunctionEntry,@function
+
+__xray_FunctionEntry:
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ SAVE_REGISTERS
+
+ // This load has to be atomic, it's concurrent with __xray_patch().
+ // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
+ movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax
+ testq %rax, %rax
+ je .Ltmp0
+
+ // The patched function prolog puts its xray_instr_map index into %r10d.
+ movl %r10d, %edi
+ xor %esi,%esi
+ callq *%rax
+.Ltmp0:
+ RESTORE_REGISTERS
+ popq %rbp
+ retq
+.Ltmp1:
+ .size __xray_FunctionEntry, .Ltmp1-__xray_FunctionEntry
+ .cfi_endproc
+
+ .globl __xray_FunctionExit
+ .align 16, 0x90
+ .type __xray_FunctionExit,@function
+__xray_FunctionExit:
+ .cfi_startproc
+ // Save the important registers first. Since we're assuming that this
+ // function is only jumped into, we only preserve the registers for
+ // returning.
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ subq $56, %rsp
+ .cfi_def_cfa_offset 32
+ movupd %xmm0, 40(%rsp)
+ movupd %xmm1, 24(%rsp)
+ movq %rax, 16(%rsp)
+ movq %rdx, 8(%rsp)
+ movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax
+ testq %rax,%rax
+ je .Ltmp2
+
+ movl %r10d, %edi
+ movl $1, %esi
+ callq *%rax
+.Ltmp2:
+ // Restore the important registers.
+ movupd 40(%rsp), %xmm0
+ movupd 24(%rsp), %xmm1
+ movq 16(%rsp), %rax
+ movq 8(%rsp), %rdx
+ addq $56, %rsp
+ popq %rbp
+ retq
+.Ltmp3:
+ .size __xray_FunctionExit, .Ltmp3-__xray_FunctionExit
+ .cfi_endproc
+
+ .global __xray_FunctionTailExit
+ .align 16, 0x90
+ .type __xray_FunctionTailExit,@function
+__xray_FunctionTailExit:
+ .cfi_startproc
+ // Save the important registers as in the entry trampoline, but indicate that
+ // this is an exit. In the future, we will introduce a new entry type that
+ // differentiates between a normal exit and a tail exit, but we'd have to do
+ // this and increment the version number for the header.
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ SAVE_REGISTERS
+
+ movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax
+ testq %rax,%rax
+ je .Ltmp4
+
+ movl %r10d, %edi
+ movl $1, %esi
+ callq *%rax
+
+.Ltmp4:
+ RESTORE_REGISTERS
+ popq %rbp
+ retq
+.Ltmp5:
+ .size __xray_FunctionTailExit, .Ltmp5-__xray_FunctionTailExit
+ .cfi_endproc
diff --git a/lib/xray/xray_x86_64.cc b/lib/xray/xray_x86_64.cc
new file mode 100644
index 000000000000..3ee91896c6e0
--- /dev/null
+++ b/lib/xray/xray_x86_64.cc
@@ -0,0 +1,202 @@
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+#include <cstdint>
+#include <errno.h>
+#include <fcntl.h>
+#include <iterator>
+#include <limits>
+#include <tuple>
+#include <unistd.h>
+
+namespace __xray {
+
+static std::pair<ssize_t, bool>
+retryingReadSome(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT {
+ auto BytesToRead = std::distance(Begin, End);
+ ssize_t BytesRead;
+ ssize_t TotalBytesRead = 0;
+ while (BytesToRead && (BytesRead = read(Fd, Begin, BytesToRead))) {
+ if (BytesRead == -1) {
+ if (errno == EINTR)
+ continue;
+ Report("Read error; errno = %d\n", errno);
+ return std::make_pair(TotalBytesRead, false);
+ }
+
+ TotalBytesRead += BytesRead;
+ BytesToRead -= BytesRead;
+ Begin += BytesRead;
+ }
+ return std::make_pair(TotalBytesRead, true);
+}
+
+static bool readValueFromFile(const char *Filename,
+ long long *Value) XRAY_NEVER_INSTRUMENT {
+ int Fd = open(Filename, O_RDONLY | O_CLOEXEC);
+ if (Fd == -1)
+ return false;
+ static constexpr size_t BufSize = 256;
+ char Line[BufSize] = {};
+ ssize_t BytesRead;
+ bool Success;
+ std::tie(BytesRead, Success) = retryingReadSome(Fd, Line, Line + BufSize);
+ if (!Success)
+ return false;
+ close(Fd);
+ char *End = nullptr;
+ long long Tmp = internal_simple_strtoll(Line, &End, 10);
+ bool Result = false;
+ if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) {
+ *Value = Tmp;
+ Result = true;
+ }
+ return Result;
+}
+
+uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
+ long long CPUFrequency = -1;
+ if (readValueFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz",
+ &CPUFrequency)) {
+ CPUFrequency *= 1000;
+ } else if (readValueFromFile(
+ "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &CPUFrequency)) {
+ CPUFrequency *= 1000;
+ } else {
+ Report("Unable to determine CPU frequency for TSC accounting.\n");
+ }
+ return CPUFrequency == -1 ? 0 : static_cast<uint64_t>(CPUFrequency);
+}
+
+static constexpr uint8_t CallOpCode = 0xe8;
+static constexpr uint16_t MovR10Seq = 0xba41;
+static constexpr uint16_t Jmp9Seq = 0x09eb;
+static constexpr uint8_t JmpOpCode = 0xe9;
+static constexpr uint8_t RetOpCode = 0xc3;
+
+static constexpr int64_t MinOffset{std::numeric_limits<int32_t>::min()};
+static constexpr int64_t MaxOffset{std::numeric_limits<int32_t>::max()};
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // xray_sled_n:
+ // jmp +9
+ // <9 byte nop>
+ //
+ // With the following:
+ //
+ // mov r10d, <function id>
+ // call <relative 32bit offset to entry trampoline>
+ //
+ // We need to do this in the following order:
+ //
+ // 1. Put the function id first, 2 bytes from the start of the sled (just
+ // after the 2-byte jmp instruction).
+ // 2. Put the call opcode 6 bytes from the start of the sled.
+ // 3. Put the relative offset 7 bytes from the start of the sled.
+ // 4. Do an atomic write over the jmp instruction for the "mov r10d"
+ // opcode and first operand.
+ //
+ // Prerequisite is to compute the relative offset to the
+ // __xray_FunctionEntry function's address.
+ int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionEntry) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
+ __xray_FunctionEntry, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // xray_sled_n:
+ // ret
+ // <10 byte nop>
+ //
+ // With the following:
+ //
+ // mov r10d, <function id>
+ // jmp <relative 32bit offset to exit trampoline>
+ //
+ // 1. Put the function id first, 2 bytes from the start of the sled (just
+ // after the 1-byte ret instruction).
+ // 2. Put the jmp opcode 6 bytes from the start of the sled.
+ // 3. Put the relative offset 7 bytes from the start of the sled.
+ // 4. Do an atomic write over the jmp instruction for the "mov r10d"
+ // opcode and first operand.
+ //
+ // Prerequisite is to compute the relative offset fo the
+ // __xray_FunctionExit function's address.
+ int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionExit) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
+ __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the tail call sled with a similar
+ // sequence as the entry sled, but calls the tail exit sled instead.
+ int64_t TrampolineOffset =
+ reinterpret_cast<int64_t>(__xray_FunctionTailExit) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
+ __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+} // namespace __xray
diff --git a/lib/xray/xray_x86_64.h b/lib/xray/xray_x86_64.h
new file mode 100644
index 000000000000..52d2dea8f0d9
--- /dev/null
+++ b/lib/xray/xray_x86_64.h
@@ -0,0 +1,32 @@
+//===-- xray_x86_64.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_X86_64_H
+#define XRAY_X86_64_H
+
+#include <cstdint>
+#include <x86intrin.h>
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "xray_defs.h"
+
+namespace __xray {
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ unsigned LongCPU;
+ uint64_t TSC = __rdtscp(&LongCPU);
+ CPU = LongCPU;
+ return TSC;
+}
+}
+
+#endif // XRAY_X86_64_H