aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-12-09 13:28:42 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-12-09 13:28:42 +0000
commitb1c73532ee8997fe5dfbeb7d223027bdf99758a0 (patch)
tree7d6e51c294ab6719475d660217aa0c0ad0526292 /compiler-rt
parent7fa27ce4a07f19b07799a767fc29416f3b625afb (diff)
downloadsrc-b1c73532ee8997fe5dfbeb7d223027bdf99758a0.tar.gz
src-b1c73532ee8997fe5dfbeb7d223027bdf99758a0.zip
Diffstat (limited to 'compiler-rt')
-rw-r--r--compiler-rt/include/fuzzer/FuzzedDataProvider.h2
-rw-r--r--compiler-rt/include/profile/InstrProfData.inc57
-rw-r--r--compiler-rt/include/sanitizer/allocator_interface.h129
-rw-r--r--compiler-rt/include/sanitizer/asan_interface.h96
-rw-r--r--compiler-rt/include/sanitizer/common_interface_defs.h149
-rw-r--r--compiler-rt/include/sanitizer/coverage_interface.h19
-rw-r--r--compiler-rt/include/sanitizer/dfsan_interface.h97
-rw-r--r--compiler-rt/include/sanitizer/hwasan_interface.h158
-rw-r--r--compiler-rt/include/sanitizer/lsan_interface.h106
-rw-r--r--compiler-rt/include/sanitizer/memprof_interface.h19
-rw-r--r--compiler-rt/include/sanitizer/msan_interface.h211
-rw-r--r--compiler-rt/include/sanitizer/scudo_interface.h28
-rw-r--r--compiler-rt/include/sanitizer/tsan_interface.h126
-rw-r--r--compiler-rt/include/sanitizer/tsan_interface_atomic.h273
-rw-r--r--compiler-rt/include/sanitizer/ubsan_interface.h6
-rw-r--r--compiler-rt/lib/asan/asan_allocator.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_allocator.h77
-rw-r--r--compiler-rt/lib/asan/asan_descriptions.cpp70
-rw-r--r--compiler-rt/lib/asan/asan_errors.cpp22
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp40
-rw-r--r--compiler-rt/lib/asan/asan_globals.cpp56
-rw-r--r--compiler-rt/lib/asan/asan_interceptors.cpp174
-rw-r--r--compiler-rt/lib/asan/asan_interceptors.h10
-rw-r--r--compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_internal.h5
-rw-r--r--compiler-rt/lib/asan/asan_lock.h0
-rw-r--r--compiler-rt/lib/asan/asan_mac.cpp5
-rw-r--r--compiler-rt/lib/asan/asan_malloc_linux.cpp7
-rw-r--r--compiler-rt/lib/asan/asan_malloc_mac.cpp80
-rw-r--r--compiler-rt/lib/asan/asan_malloc_win.cpp10
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.cpp9
-rw-r--r--compiler-rt/lib/asan/asan_posix.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_report.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_report.h3
-rw-r--r--compiler-rt/lib/asan/asan_rtl.cpp40
-rw-r--r--compiler-rt/lib/asan/asan_rtl_x86_64.S28
-rw-r--r--compiler-rt/lib/asan/asan_stack.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_stats.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_thread.cpp40
-rw-r--r--compiler-rt/lib/asan/asan_thread.h38
-rw-r--r--compiler-rt/lib/asan/asan_win.cpp18
-rw-r--r--compiler-rt/lib/asan/asan_win_dll_thunk.cpp2
-rw-r--r--compiler-rt/lib/asan_abi/asan_abi.cpp4
-rw-r--r--compiler-rt/lib/asan_abi/asan_abi.h11
-rw-r--r--compiler-rt/lib/asan_abi/asan_abi_shim.cpp66
-rw-r--r--compiler-rt/lib/asan_abi/asan_abi_tbd.txt12
-rw-r--r--compiler-rt/lib/builtins/README.txt21
-rw-r--r--compiler-rt/lib/builtins/aarch64/lse.S40
-rw-r--r--compiler-rt/lib/builtins/aarch64/sme-abi-init.c52
-rw-r--r--compiler-rt/lib/builtins/aarch64/sme-abi.S176
-rw-r--r--compiler-rt/lib/builtins/absvti2.c2
-rw-r--r--compiler-rt/lib/builtins/arm/udivsi3.S2
-rw-r--r--compiler-rt/lib/builtins/ashldi3.c3
-rw-r--r--compiler-rt/lib/builtins/ashlti3.c3
-rw-r--r--compiler-rt/lib/builtins/ashrdi3.c3
-rw-r--r--compiler-rt/lib/builtins/ashrti3.c3
-rw-r--r--compiler-rt/lib/builtins/assembly.h5
-rw-r--r--compiler-rt/lib/builtins/clear_cache.c2
-rw-r--r--compiler-rt/lib/builtins/cpu_model.c155
-rw-r--r--compiler-rt/lib/builtins/divmoddi4.c4
-rw-r--r--compiler-rt/lib/builtins/divmodsi4.c4
-rw-r--r--compiler-rt/lib/builtins/divmodti4.c4
-rw-r--r--compiler-rt/lib/builtins/divtc3.c51
-rw-r--r--compiler-rt/lib/builtins/divxc3.c8
-rw-r--r--compiler-rt/lib/builtins/extenddftf2.c4
-rw-r--r--compiler-rt/lib/builtins/extendhftf2.c4
-rw-r--r--compiler-rt/lib/builtins/extendsftf2.c4
-rw-r--r--compiler-rt/lib/builtins/extendxftf2.c24
-rw-r--r--compiler-rt/lib/builtins/fixunsxfdi.c4
-rw-r--r--compiler-rt/lib/builtins/fixunsxfsi.c4
-rw-r--r--compiler-rt/lib/builtins/fixunsxfti.c4
-rw-r--r--compiler-rt/lib/builtins/fixxfdi.c4
-rw-r--r--compiler-rt/lib/builtins/fixxfti.c4
-rw-r--r--compiler-rt/lib/builtins/floatdidf.c52
-rw-r--r--compiler-rt/lib/builtins/floatdisf.c51
-rw-r--r--compiler-rt/lib/builtins/floatdixf.c4
-rw-r--r--compiler-rt/lib/builtins/floatsidf.c7
-rw-r--r--compiler-rt/lib/builtins/floatsisf.c11
-rw-r--r--compiler-rt/lib/builtins/floatsitf.c2
-rw-r--r--compiler-rt/lib/builtins/floattidf.c52
-rw-r--r--compiler-rt/lib/builtins/floattisf.c51
-rw-r--r--compiler-rt/lib/builtins/floattitf.c55
-rw-r--r--compiler-rt/lib/builtins/floattixf.c4
-rw-r--r--compiler-rt/lib/builtins/floatundidf.c49
-rw-r--r--compiler-rt/lib/builtins/floatundisf.c48
-rw-r--r--compiler-rt/lib/builtins/floatundixf.c4
-rw-r--r--compiler-rt/lib/builtins/floatuntidf.c49
-rw-r--r--compiler-rt/lib/builtins/floatuntisf.c48
-rw-r--r--compiler-rt/lib/builtins/floatuntitf.c52
-rw-r--r--compiler-rt/lib/builtins/floatuntixf.c4
-rw-r--r--compiler-rt/lib/builtins/fp_extend.h101
-rw-r--r--compiler-rt/lib/builtins/fp_extend_impl.inc83
-rw-r--r--compiler-rt/lib/builtins/fp_fixint_impl.inc2
-rw-r--r--compiler-rt/lib/builtins/fp_lib.h50
-rw-r--r--compiler-rt/lib/builtins/fp_trunc.h87
-rw-r--r--compiler-rt/lib/builtins/fp_trunc_impl.inc119
-rw-r--r--compiler-rt/lib/builtins/i386/chkstk.S24
-rw-r--r--compiler-rt/lib/builtins/i386/chkstk2.S41
-rw-r--r--compiler-rt/lib/builtins/i386/floatdixf.S2
-rw-r--r--compiler-rt/lib/builtins/i386/floatundixf.S2
-rw-r--r--compiler-rt/lib/builtins/int_lib.h2
-rw-r--r--compiler-rt/lib/builtins/int_math.h10
-rw-r--r--compiler-rt/lib/builtins/int_to_fp.h82
-rw-r--r--compiler-rt/lib/builtins/int_to_fp_impl.inc72
-rw-r--r--compiler-rt/lib/builtins/int_types.h82
-rw-r--r--compiler-rt/lib/builtins/multc3.c53
-rw-r--r--compiler-rt/lib/builtins/mulxc3.c12
-rw-r--r--compiler-rt/lib/builtins/negdi2.c2
-rw-r--r--compiler-rt/lib/builtins/negti2.c2
-rw-r--r--compiler-rt/lib/builtins/negvti2.c2
-rw-r--r--compiler-rt/lib/builtins/powitf2.c4
-rw-r--r--compiler-rt/lib/builtins/powixf2.c4
-rw-r--r--compiler-rt/lib/builtins/trunctfdf2.c2
-rw-r--r--compiler-rt/lib/builtins/trunctfhf2.c4
-rw-r--r--compiler-rt/lib/builtins/trunctfsf2.c2
-rw-r--r--compiler-rt/lib/builtins/trunctfxf2.c23
-rw-r--r--compiler-rt/lib/builtins/x86_64/chkstk.S2
-rw-r--r--compiler-rt/lib/builtins/x86_64/chkstk2.S43
-rw-r--r--compiler-rt/lib/builtins/x86_64/floatdixf.c4
-rw-r--r--compiler-rt/lib/builtins/x86_64/floatundixf.S2
-rw-r--r--compiler-rt/lib/cfi/cfi.cpp4
-rw-r--r--compiler-rt/lib/dfsan/dfsan.cpp12
-rw-r--r--compiler-rt/lib/dfsan/dfsan_custom.cpp352
-rw-r--r--compiler-rt/lib/dfsan/done_abilist.txt4
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerCommand.h1
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerCorpus.h8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDriver.cpp9
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerFlags.def2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerLoop.cpp2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtil.h2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp5
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp4
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp5
-rw-r--r--compiler-rt/lib/gwp_asan/guarded_pool_allocator.h9
-rw-r--r--compiler-rt/lib/hwasan/hwasan.cpp10
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocator.cpp38
-rw-r--r--compiler-rt/lib/hwasan/hwasan_flags.inc7
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors.cpp71
-rw-r--r--compiler-rt/lib/hwasan/hwasan_linux.cpp48
-rw-r--r--compiler-rt/lib/hwasan/hwasan_platform_interceptors.h20
-rw-r--r--compiler-rt/lib/hwasan/hwasan_report.cpp856
-rw-r--r--compiler-rt/lib/hwasan/hwasan_report.h2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S8
-rw-r--r--compiler-rt/lib/interception/interception.h10
-rw-r--r--compiler-rt/lib/interception/interception_win.cpp37
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.h24
-rw-r--r--compiler-rt/lib/lsan/lsan_common.cpp4
-rw-r--r--compiler-rt/lib/lsan/lsan_common_fuchsia.cpp3
-rw-r--r--compiler-rt/lib/lsan/lsan_interceptors.cpp4
-rw-r--r--compiler-rt/lib/memprof/memprof_allocator.cpp10
-rw-r--r--compiler-rt/lib/memprof/memprof_descriptions.cpp8
-rw-r--r--compiler-rt/lib/memprof/memprof_interface_internal.h1
-rw-r--r--compiler-rt/lib/msan/msan.cpp4
-rw-r--r--compiler-rt/lib/msan/msan_interceptors.cpp138
-rw-r--r--compiler-rt/lib/msan/msan_report.cpp2
-rw-r--r--compiler-rt/lib/orc/bitmask_enum.h151
-rw-r--r--compiler-rt/lib/orc/elfnix_tls.ppc64.S33
-rw-r--r--compiler-rt/lib/orc/macho_platform.cpp273
-rw-r--r--compiler-rt/lib/orc/simple_packed_serialization.h45
-rw-r--r--compiler-rt/lib/orc/stl_extras.h12
-rw-r--r--compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp143
-rw-r--r--compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp5
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.c4
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.h28
-rw-r--r--compiler-rt/lib/profile/InstrProfilingBuffer.c47
-rw-r--r--compiler-rt/lib/profile/InstrProfilingFile.c84
-rw-r--r--compiler-rt/lib/profile/InstrProfilingInternal.h8
-rw-r--r--compiler-rt/lib/profile/InstrProfilingMerge.c81
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformAIX.c7
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c9
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformLinux.c10
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformOther.c4
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformWindows.c7
-rw-r--r--compiler-rt/lib/profile/InstrProfilingWriter.c47
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_asm.h6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.cpp5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc113
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc31
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp37
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_dl.h26
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_file.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.inc10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp16
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libc.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp1614
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.h114
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp459
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp164
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform.h24
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp0
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h0
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp9
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h16
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp30
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp129
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h135
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h9
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp224
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h79
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h (renamed from compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h)17
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp85
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp60
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp33
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp66
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp6
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp54
-rwxr-xr-xcompiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh95
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt19
-rw-r--r--compiler-rt/lib/sanitizer_common/weak_symbols.txt1
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_common.h85
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.h53
-rw-r--r--compiler-rt/lib/scudo/standalone/atomic_helpers.h8
-rw-r--r--compiler-rt/lib/scudo/standalone/chunk.h13
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h217
-rw-r--r--compiler-rt/lib/scudo/standalone/common.cpp18
-rw-r--r--compiler-rt/lib/scudo/standalone/common.h29
-rw-r--r--compiler-rt/lib/scudo/standalone/condition_variable.h60
-rw-r--r--compiler-rt/lib/scudo/standalone/condition_variable_base.h56
-rw-r--r--compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp52
-rw-r--r--compiler-rt/lib/scudo/standalone/condition_variable_linux.h38
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.cpp3
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.inc12
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.cpp24
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.h3
-rw-r--r--compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp9
-rw-r--r--compiler-rt/lib/scudo/standalone/include/scudo/interface.h17
-rw-r--r--compiler-rt/lib/scudo/standalone/linux.cpp50
-rw-r--r--compiler-rt/lib/scudo/standalone/local_cache.h125
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map.h5
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp12
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_linux.cpp153
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_linux.h67
-rw-r--r--compiler-rt/lib/scudo/standalone/mutex.h19
-rw-r--r--compiler-rt/lib/scudo/standalone/options.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/platform.h14
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h294
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h541
-rw-r--r--compiler-rt/lib/scudo/standalone/release.cpp3
-rw-r--r--compiler-rt/lib/scudo/standalone/release.h171
-rw-r--r--compiler-rt/lib/scudo/standalone/report.cpp33
-rw-r--r--compiler-rt/lib/scudo/standalone/report.h8
-rw-r--r--compiler-rt/lib/scudo/standalone/report_linux.cpp58
-rw-r--r--compiler-rt/lib/scudo/standalone/report_linux.h34
-rw-r--r--compiler-rt/lib/scudo/standalone/rss_limit_checker.cpp37
-rw-r--r--compiler-rt/lib/scudo/standalone/rss_limit_checker.h63
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h190
-rw-r--r--compiler-rt/lib/scudo/standalone/size_class_map.h26
-rw-r--r--compiler-rt/lib/scudo/standalone/stack_depot.h5
-rw-r--r--compiler-rt/lib/scudo/standalone/trusty.cpp6
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd.h17
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_shared.h5
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h51
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.cpp3
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.inc108
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp21
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp66
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan.syms.extra1
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_debugging.cpp4
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp4
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface.h8
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp22
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp24
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform.h117
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp50
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_report.cpp12
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_report.h3
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cpp2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.h4
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S203
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp1
-rw-r--r--compiler-rt/lib/ubsan/ubsan_diag.cpp81
-rw-r--r--compiler-rt/lib/ubsan/ubsan_monitor.cpp3
-rw-r--r--compiler-rt/lib/xray/xray_utils.cpp2
298 files changed, 9211 insertions, 5167 deletions
diff --git a/compiler-rt/include/fuzzer/FuzzedDataProvider.h b/compiler-rt/include/fuzzer/FuzzedDataProvider.h
index 8a8214bd99fe..5903ed837917 100644
--- a/compiler-rt/include/fuzzer/FuzzedDataProvider.h
+++ b/compiler-rt/include/fuzzer/FuzzedDataProvider.h
@@ -158,7 +158,7 @@ FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
// picking its contents.
std::string result;
- // Reserve the anticipated capaticity to prevent several reallocations.
+ // Reserve the anticipated capacity to prevent several reallocations.
result.reserve(std::min(max_length, remaining_bytes_));
for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
diff --git a/compiler-rt/include/profile/InstrProfData.inc b/compiler-rt/include/profile/InstrProfData.inc
index 94261f4705b9..44a449800923 100644
--- a/compiler-rt/include/profile/InstrProfData.inc
+++ b/compiler-rt/include/profile/InstrProfData.inc
@@ -76,18 +76,21 @@ INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
Inc->getHash()->getZExtValue()))
INSTR_PROF_DATA(const IntPtrT, IntPtrTy, CounterPtr, RelativeCounterPtr)
+INSTR_PROF_DATA(const IntPtrT, IntPtrTy, BitmapPtr, RelativeBitmapPtr)
/* This is used to map function pointers for the indirect call targets to
* function name hashes during the conversion from raw to merged profile
* data.
*/
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
+INSTR_PROF_DATA(const IntPtrT, llvm::PointerType::getUnqual(Ctx), FunctionPointer, \
FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
+INSTR_PROF_DATA(IntPtrT, llvm::PointerType::getUnqual(Ctx), Values, \
ValuesPtrExpr)
INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
- ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
+ ConstantArray::get(Int16ArrayTy, Int16ArrayVals)) \
+INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumBitmapBytes, \
+ ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumBitmapBytes))
#undef INSTR_PROF_DATA
/* INSTR_PROF_DATA end. */
@@ -113,7 +116,7 @@ INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
+INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::PointerType::getUnqual(Ctx), Next, \
ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
#undef INSTR_PROF_VALUE_NODE
/* INSTR_PROF_VALUE_NODE end. */
@@ -128,15 +131,17 @@ INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
-/* FIXME: A more accurate name is NumData */
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
+INSTR_PROF_RAW_HEADER(uint64_t, NumData, NumData)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
-/* FIXME: A more accurate name is NumCounters */
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
+INSTR_PROF_RAW_HEADER(uint64_t, NumCounters, NumCounters)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
+INSTR_PROF_RAW_HEADER(uint64_t, NumBitmapBytes, NumBitmapBytes)
+INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterBitmapBytes, PaddingBytesAfterBitmapBytes)
INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta,
(uintptr_t)CountersBegin - (uintptr_t)DataBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, BitmapDelta,
+ (uintptr_t)BitmapBegin - (uintptr_t)DataBegin)
INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
#undef INSTR_PROF_RAW_HEADER
@@ -155,7 +160,7 @@ INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
#endif
VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(void *, Data, PointerType::getUnqual(Ctx)) INSTR_PROF_COMMA
VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
#undef VALUE_PROF_FUNC_PARAM
#undef INSTR_PROF_COMMA
@@ -208,9 +213,9 @@ VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
#define INSTR_PROF_DATA_DEFINED
#endif
#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
+COVMAP_FUNC_RECORD(const IntPtrT, llvm::PointerType::getUnqual(Ctx), \
NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
- llvm::Type::getInt8PtrTy(Ctx)))
+ llvm::PointerType::getUnqual(Ctx)))
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
NameValue.size()))
@@ -269,6 +274,9 @@ INSTR_PROF_SECT_ENTRY(IPSK_data, \
INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
INSTR_PROF_CNTS_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_bitmap, \
+ INSTR_PROF_QUOTE(INSTR_PROF_BITS_COMMON), \
+ INSTR_PROF_BITS_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_name, \
INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
INSTR_PROF_NAME_COFF, "__DATA,")
@@ -342,7 +350,7 @@ typedef struct ValueProfRecord {
* Do byte swap for this instance. \c Old is the original order before
* the swap, and \c New is the New byte order.
*/
- void swapBytes(support::endianness Old, support::endianness New);
+ void swapBytes(llvm::endianness Old, llvm::endianness New);
#endif
} ValueProfRecord;
@@ -397,15 +405,15 @@ typedef struct ValueProfData {
static Expected<std::unique_ptr<ValueProfData>>
getValueProfData(const unsigned char *SrcBuffer,
const unsigned char *const SrcBufferEnd,
- support::endianness SrcDataEndianness);
+ llvm::endianness SrcDataEndianness);
/*!
* Swap byte order from \c Endianness order to host byte order.
*/
- void swapBytesToHost(support::endianness Endianness);
+ void swapBytesToHost(llvm::endianness Endianness);
/*!
* Swap byte order from host byte order to \c Endianness order.
*/
- void swapBytesFromHost(support::endianness Endianness);
+ void swapBytesFromHost(llvm::endianness Endianness);
/*!
* Return the total size of \c ValueProfileData.
*/
@@ -646,17 +654,16 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
(uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
(uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-/* FIXME: Please remedy the fixme in the header before bumping the version. */
/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 8
+#define INSTR_PROF_RAW_VERSION 9
/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 10
+#define INSTR_PROF_INDEX_VERSION 11
/* Coverage mapping format version (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 5
+#define INSTR_PROF_COVMAP_VERSION 6
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
+/* Profile version is always of type uint64_t. Reserve the upper 32 bits in the
+ * version for other variants of profile. We set the 8th most significant bit
+ * (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
* generated profile, and 0 if this is a Clang FE generated profile.
* 1 in bit 57 indicates there are context-sensitive records in the profile.
* The 59th bit indicates whether to use debug info to correlate profiles.
@@ -665,7 +672,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
* The 62nd bit indicates whether memory profile information is present.
* The 63rd bit indicates if this is a temporal profile.
*/
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
+#define VARIANT_MASKS_ALL 0xffffffff00000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
@@ -689,6 +696,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define INSTR_PROF_DATA_COMMON __llvm_prf_data
#define INSTR_PROF_NAME_COMMON __llvm_prf_names
#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
+#define INSTR_PROF_BITS_COMMON __llvm_prf_bits
#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
@@ -700,6 +708,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define INSTR_PROF_DATA_COFF ".lprfd$M"
#define INSTR_PROF_NAME_COFF ".lprfn$M"
#define INSTR_PROF_CNTS_COFF ".lprfc$M"
+#define INSTR_PROF_BITS_COFF ".lprfb$M"
#define INSTR_PROF_VALS_COFF ".lprfv$M"
#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
@@ -711,6 +720,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
+#define INSTR_PROF_BITS_SECT_NAME INSTR_PROF_BITS_COFF
/* Array of pointers. Each pointer points to a list
* of value nodes associated with one value site.
*/
@@ -725,6 +735,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
+#define INSTR_PROF_BITS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_BITS_COMMON)
/* Array of pointers. Each pointer points to a list
* of value nodes associated with one value site.
*/
diff --git a/compiler-rt/include/sanitizer/allocator_interface.h b/compiler-rt/include/sanitizer/allocator_interface.h
index 367e6409258f..a792e9f0136e 100644
--- a/compiler-rt/include/sanitizer/allocator_interface.h
+++ b/compiler-rt/include/sanitizer/allocator_interface.h
@@ -11,86 +11,89 @@
#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
#define SANITIZER_ALLOCATOR_INTERFACE_H
+#include <sanitizer/common_interface_defs.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
- /* Returns the estimated number of bytes that will be reserved by allocator
- for request of "size" bytes. If allocator can't allocate that much
- memory, returns the maximal possible allocation size, otherwise returns
- "size". */
- size_t __sanitizer_get_estimated_allocated_size(size_t size);
+/* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+size_t SANITIZER_CDECL __sanitizer_get_estimated_allocated_size(size_t size);
- /* Returns true if p was returned by the allocator and
- is not yet freed. */
- int __sanitizer_get_ownership(const volatile void *p);
+/* Returns true if p was returned by the allocator and
+ is not yet freed. */
+int SANITIZER_CDECL __sanitizer_get_ownership(const volatile void *p);
- /* If a pointer lies within an allocation, it will return the start address
- of the allocation. Otherwise, it returns nullptr. */
- const void *__sanitizer_get_allocated_begin(const void *p);
+/* If a pointer lies within an allocation, it will return the start address
+ of the allocation. Otherwise, it returns nullptr. */
+const void *SANITIZER_CDECL __sanitizer_get_allocated_begin(const void *p);
- /* Returns the number of bytes reserved for the pointer p.
- Requires (get_ownership(p) == true) or (p == 0). */
- size_t __sanitizer_get_allocated_size(const volatile void *p);
+/* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+size_t SANITIZER_CDECL __sanitizer_get_allocated_size(const volatile void *p);
- /* Returns the number of bytes reserved for the pointer p.
- Requires __sanitizer_get_allocated_begin(p) == p. */
- size_t __sanitizer_get_allocated_size_fast(const volatile void *p);
+/* Returns the number of bytes reserved for the pointer p.
+ Requires __sanitizer_get_allocated_begin(p) == p. */
+size_t SANITIZER_CDECL
+__sanitizer_get_allocated_size_fast(const volatile void *p);
- /* Number of bytes, allocated and not yet freed by the application. */
- size_t __sanitizer_get_current_allocated_bytes(void);
+/* Number of bytes, allocated and not yet freed by the application. */
+size_t SANITIZER_CDECL __sanitizer_get_current_allocated_bytes(void);
- /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
- Generally, for request of X bytes, allocator can reserve and add to free
- lists a large number of chunks of size X to use them for future requests.
- All these chunks count toward the heap size. Currently, allocator never
- releases memory to OS (instead, it just puts freed chunks to free
- lists). */
- size_t __sanitizer_get_heap_size(void);
+/* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+size_t SANITIZER_CDECL __sanitizer_get_heap_size(void);
- /* Number of bytes, mmaped by the allocator, which can be used to fulfill
- allocation requests. When a user program frees memory chunk, it can first
- fall into quarantine and will count toward __sanitizer_get_free_bytes()
- later. */
- size_t __sanitizer_get_free_bytes(void);
+/* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+size_t SANITIZER_CDECL __sanitizer_get_free_bytes(void);
- /* Number of bytes in unmapped pages, that are released to OS. Currently,
- always returns 0. */
- size_t __sanitizer_get_unmapped_bytes(void);
+/* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+size_t SANITIZER_CDECL __sanitizer_get_unmapped_bytes(void);
- /* Malloc hooks that may be optionally provided by user.
- __sanitizer_malloc_hook(ptr, size) is called immediately after
- allocation of "size" bytes, which returned "ptr".
- __sanitizer_free_hook(ptr) is called immediately before
- deallocation of "ptr". */
- void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
- void __sanitizer_free_hook(const volatile void *ptr);
+/* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+void SANITIZER_CDECL __sanitizer_malloc_hook(const volatile void *ptr,
+ size_t size);
+void SANITIZER_CDECL __sanitizer_free_hook(const volatile void *ptr);
- /* Installs a pair of hooks for malloc/free.
- Several (currently, 5) hook pairs may be installed, they are executed
- in the order they were installed and after calling
- __sanitizer_malloc_hook/__sanitizer_free_hook.
- Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
- chained and do not rely on weak symbols working on the platform, but
- require __sanitizer_install_malloc_and_free_hooks to be called at startup
- and thus will not be called on malloc/free very early in the process.
- Returns the number of hooks currently installed or 0 on failure.
- Not thread-safe, should be called in the main thread before starting
- other threads.
- */
- int __sanitizer_install_malloc_and_free_hooks(
- void (*malloc_hook)(const volatile void *, size_t),
- void (*free_hook)(const volatile void *));
+/* Installs a pair of hooks for malloc/free.
+ Several (currently, 5) hook pairs may be installed, they are executed
+ in the order they were installed and after calling
+ __sanitizer_malloc_hook/__sanitizer_free_hook.
+ Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
+ chained and do not rely on weak symbols working on the platform, but
+ require __sanitizer_install_malloc_and_free_hooks to be called at startup
+ and thus will not be called on malloc/free very early in the process.
+ Returns the number of hooks currently installed or 0 on failure.
+ Not thread-safe, should be called in the main thread before starting
+ other threads.
+*/
+int SANITIZER_CDECL __sanitizer_install_malloc_and_free_hooks(
+ void(SANITIZER_CDECL *malloc_hook)(const volatile void *, size_t),
+ void(SANITIZER_CDECL *free_hook)(const volatile void *));
- /* Drains allocator quarantines (calling thread's and global ones), returns
- freed memory back to OS and releases other non-essential internal allocator
- resources in attempt to reduce process RSS.
- Currently available with ASan only.
- */
- void __sanitizer_purge_allocator(void);
+/* Drains allocator quarantines (calling thread's and global ones), returns
+ freed memory back to OS and releases other non-essential internal allocator
+ resources in attempt to reduce process RSS.
+ Currently available with ASan only.
+*/
+void SANITIZER_CDECL __sanitizer_purge_allocator(void);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
#endif
diff --git a/compiler-rt/include/sanitizer/asan_interface.h b/compiler-rt/include/sanitizer/asan_interface.h
index 9bff21c117b3..37b6d08f4db1 100644
--- a/compiler-rt/include/sanitizer/asan_interface.h
+++ b/compiler-rt/include/sanitizer/asan_interface.h
@@ -31,7 +31,8 @@ extern "C" {
///
/// \param addr Start of memory region.
/// \param size Size of memory region.
-void __asan_poison_memory_region(void const volatile *addr, size_t size);
+void SANITIZER_CDECL __asan_poison_memory_region(void const volatile *addr,
+ size_t size);
/// Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
///
@@ -45,10 +46,19 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size);
///
/// \param addr Start of memory region.
/// \param size Size of memory region.
-void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+void SANITIZER_CDECL __asan_unpoison_memory_region(void const volatile *addr,
+ size_t size);
// Macros provided for convenience.
-#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+#ifdef __has_feature
+#if __has_feature(address_sanitizer)
+#define ASAN_DEFINE_REGION_MACROS
+#endif
+#elif defined(__SANITIZE_ADDRESS__)
+#define ASAN_DEFINE_REGION_MACROS
+#endif
+
+#ifdef ASAN_DEFINE_REGION_MACROS
/// Marks a memory region as unaddressable.
///
/// \note Macro provided for convenience; defined as a no-op if ASan is not
@@ -56,7 +66,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
///
/// \param addr Start of memory region.
/// \param size Size of memory region.
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
+#define ASAN_POISON_MEMORY_REGION(addr, size) \
__asan_poison_memory_region((addr), (size))
/// Marks a memory region as addressable.
@@ -66,14 +76,13 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
///
/// \param addr Start of memory region.
/// \param size Size of memory region.
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
__asan_unpoison_memory_region((addr), (size))
#else
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
- ((void)(addr), (void)(size))
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
- ((void)(addr), (void)(size))
+#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
#endif
+#undef ASAN_DEFINE_REGION_MACROS
/// Checks if an address is poisoned.
///
@@ -85,7 +94,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
///
/// \retval 1 Address is poisoned.
/// \retval 0 Address is not poisoned.
-int __asan_address_is_poisoned(void const volatile *addr);
+int SANITIZER_CDECL __asan_address_is_poisoned(void const volatile *addr);
/// Checks if a region is poisoned.
///
@@ -95,14 +104,14 @@ int __asan_address_is_poisoned(void const volatile *addr);
/// \param beg Start of memory region.
/// \param size Start of memory region.
/// \returns Address of first poisoned byte.
-void *__asan_region_is_poisoned(void *beg, size_t size);
+void *SANITIZER_CDECL __asan_region_is_poisoned(void *beg, size_t size);
/// Describes an address (useful for calling from the debugger).
///
/// Prints the description of <c><i>addr</i></c>.
///
/// \param addr Address to describe.
-void __asan_describe_address(void *addr);
+void SANITIZER_CDECL __asan_describe_address(void *addr);
/// Checks if an error has been or is being reported (useful for calling from
/// the debugger to get information about an ASan error).
@@ -111,7 +120,7 @@ void __asan_describe_address(void *addr);
///
/// \returns 1 if an error has been (or is being) reported. Otherwise returns
/// 0.
-int __asan_report_present(void);
+int SANITIZER_CDECL __asan_report_present(void);
/// Gets the PC (program counter) register value of an ASan error (useful for
/// calling from the debugger).
@@ -120,7 +129,7 @@ int __asan_report_present(void);
/// Otherwise returns 0.
///
/// \returns PC value.
-void *__asan_get_report_pc(void);
+void *SANITIZER_CDECL __asan_get_report_pc(void);
/// Gets the BP (base pointer) register value of an ASan error (useful for
/// calling from the debugger).
@@ -129,7 +138,7 @@ void *__asan_get_report_pc(void);
/// Otherwise returns 0.
///
/// \returns BP value.
-void *__asan_get_report_bp(void);
+void *SANITIZER_CDECL __asan_get_report_bp(void);
/// Gets the SP (stack pointer) register value of an ASan error (useful for
/// calling from the debugger).
@@ -138,7 +147,7 @@ void *__asan_get_report_bp(void);
/// Otherwise returns 0.
///
/// \returns SP value.
-void *__asan_get_report_sp(void);
+void *SANITIZER_CDECL __asan_get_report_sp(void);
/// Gets the address of the report buffer of an ASan error (useful for calling
/// from the debugger).
@@ -147,7 +156,7 @@ void *__asan_get_report_sp(void);
/// reported. Otherwise returns 0.
///
/// \returns Address of report buffer.
-void *__asan_get_report_address(void);
+void *SANITIZER_CDECL __asan_get_report_address(void);
/// Gets access type of an ASan error (useful for calling from the debugger).
///
@@ -155,7 +164,7 @@ void *__asan_get_report_address(void);
/// reported. Otherwise returns 0.
///
/// \returns Access type (0 = read, 1 = write).
-int __asan_get_report_access_type(void);
+int SANITIZER_CDECL __asan_get_report_access_type(void);
/// Gets access size of an ASan error (useful for calling from the debugger).
///
@@ -163,7 +172,7 @@ int __asan_get_report_access_type(void);
/// returns 0.
///
/// \returns Access size in bytes.
-size_t __asan_get_report_access_size(void);
+size_t SANITIZER_CDECL __asan_get_report_access_size(void);
/// Gets the bug description of an ASan error (useful for calling from a
/// debugger).
@@ -171,7 +180,7 @@ size_t __asan_get_report_access_size(void);
/// \returns Returns a bug description if an error has been (or is being)
/// reported - for example, "heap-use-after-free". Otherwise returns an empty
/// string.
-const char *__asan_get_report_description(void);
+const char *SANITIZER_CDECL __asan_get_report_description(void);
/// Gets information about a pointer (useful for calling from the debugger).
///
@@ -192,8 +201,10 @@ const char *__asan_get_report_description(void);
/// \param[out] region_size Size of the region in bytes.
///
/// \returns Returns the category of the given pointer as a constant string.
-const char *__asan_locate_address(void *addr, char *name, size_t name_size,
- void **region_address, size_t *region_size);
+const char *SANITIZER_CDECL __asan_locate_address(void *addr, char *name,
+ size_t name_size,
+ void **region_address,
+ size_t *region_size);
/// Gets the allocation stack trace and thread ID for a heap address (useful
/// for calling from the debugger).
@@ -207,8 +218,8 @@ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
- int *thread_id);
+size_t SANITIZER_CDECL __asan_get_alloc_stack(void *addr, void **trace,
+ size_t size, int *thread_id);
/// Gets the free stack trace and thread ID for a heap address (useful for
/// calling from the debugger).
@@ -222,15 +233,16 @@ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
- int *thread_id);
+size_t SANITIZER_CDECL __asan_get_free_stack(void *addr, void **trace,
+ size_t size, int *thread_id);
/// Gets the current shadow memory mapping (useful for calling from the
/// debugger).
///
/// \param[out] shadow_scale Shadow scale value.
/// \param[out] shadow_offset Offset value.
-void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+void SANITIZER_CDECL __asan_get_shadow_mapping(size_t *shadow_scale,
+ size_t *shadow_offset);
/// This is an internal function that is called to report an error. However,
/// it is still a part of the interface because you might want to set a
@@ -242,29 +254,31 @@ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
/// \param addr Address of the ASan error.
/// \param is_write True if the error is a write error; false otherwise.
/// \param access_size Size of the memory access of the ASan error.
-void __asan_report_error(void *pc, void *bp, void *sp,
- void *addr, int is_write, size_t access_size);
+void SANITIZER_CDECL __asan_report_error(void *pc, void *bp, void *sp,
+ void *addr, int is_write,
+ size_t access_size);
// Deprecated. Call __sanitizer_set_death_callback instead.
-void __asan_set_death_callback(void (*callback)(void));
+void SANITIZER_CDECL __asan_set_death_callback(void (*callback)(void));
/// Sets the callback function to be called during ASan error reporting.
///
/// The callback provides a string pointer to the report.
///
/// \param callback User-provided function.
-void __asan_set_error_report_callback(void (*callback)(const char *));
+void SANITIZER_CDECL
+__asan_set_error_report_callback(void (*callback)(const char *));
/// User-provided callback on ASan errors.
///
/// You can provide a function that would be called immediately when ASan
/// detects an error. This is useful in cases when ASan detects an error but
/// your program crashes before the ASan report is printed.
-void __asan_on_error(void);
+void SANITIZER_CDECL __asan_on_error(void);
/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
/// debugger).
-void __asan_print_accumulated_stats(void);
+void SANITIZER_CDECL __asan_print_accumulated_stats(void);
/// User-provided default option settings.
///
@@ -273,7 +287,7 @@ void __asan_print_accumulated_stats(void);
/// <c>verbosity=1:halt_on_error=0</c>).
///
/// \returns Default options string.
-const char* __asan_default_options(void);
+const char *SANITIZER_CDECL __asan_default_options(void);
// The following two functions facilitate garbage collection in presence of
// ASan's fake stack.
@@ -285,7 +299,7 @@ const char* __asan_default_options(void);
/// does not have a fake stack.
///
/// \returns An opaque handler to the fake stack or NULL.
-void *__asan_get_current_fake_stack(void);
+void *SANITIZER_CDECL __asan_get_current_fake_stack(void);
/// Checks if an address belongs to a given fake stack.
///
@@ -305,22 +319,22 @@ void *__asan_get_current_fake_stack(void);
/// \param[out] beg Beginning of fake frame.
/// \param[out] end End of fake frame.
/// \returns Stack address or NULL.
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
- void **end);
+void *SANITIZER_CDECL __asan_addr_is_in_fake_stack(void *fake_stack, void *addr,
+ void **beg, void **end);
/// Performs shadow memory cleanup of the current thread's stack before a
/// function marked with the <c>[[noreturn]]</c> attribute is called.
///
/// To avoid false positives on the stack, must be called before no-return
/// functions like <c>_exit()</c> and <c>execl()</c>.
-void __asan_handle_no_return(void);
+void SANITIZER_CDECL __asan_handle_no_return(void);
/// Update allocation stack trace for the given allocation to the current stack
/// trace. Returns 1 if successful, 0 if not.
-int __asan_update_allocation_context(void* addr);
+int SANITIZER_CDECL __asan_update_allocation_context(void *addr);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_ASAN_INTERFACE_H
+#endif // SANITIZER_ASAN_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/common_interface_defs.h b/compiler-rt/include/sanitizer/common_interface_defs.h
index 983df7cea16e..f9fce595b37b 100644
--- a/compiler-rt/include/sanitizer/common_interface_defs.h
+++ b/compiler-rt/include/sanitizer/common_interface_defs.h
@@ -15,9 +15,12 @@
#include <stddef.h>
#include <stdint.h>
-// GCC does not understand __has_feature.
-#if !defined(__has_feature)
-#define __has_feature(x) 0
+// Windows allows a user to set their default calling convention, but we always
+// use __cdecl
+#ifdef _WIN32
+#define SANITIZER_CDECL __cdecl
+#else
+#define SANITIZER_CDECL
#endif
#ifdef __cplusplus
@@ -39,71 +42,73 @@ typedef struct {
} __sanitizer_sandbox_arguments;
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
-void __sanitizer_set_report_path(const char *path);
+void SANITIZER_CDECL __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to the provided file descriptor
// (casted to void *).
-void __sanitizer_set_report_fd(void *fd);
+void SANITIZER_CDECL __sanitizer_set_report_fd(void *fd);
// Get the current full report file path, if a path was specified by
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
-const char *__sanitizer_get_report_path();
+const char *SANITIZER_CDECL __sanitizer_get_report_path();
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
-void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+void SANITIZER_CDECL
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
-void __sanitizer_report_error_summary(const char *error_summary);
+void SANITIZER_CDECL
+__sanitizer_report_error_summary(const char *error_summary);
// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen
// in unaligned loads/stores. To find such bugs reliably, you need to replace
// plain unaligned loads/stores with these calls.
/// Loads a 16-bit unaligned value.
-///
+//
/// \param p Pointer to unaligned memory.
///
/// \returns Loaded value.
-uint16_t __sanitizer_unaligned_load16(const void *p);
+uint16_t SANITIZER_CDECL __sanitizer_unaligned_load16(const void *p);
/// Loads a 32-bit unaligned value.
///
/// \param p Pointer to unaligned memory.
///
/// \returns Loaded value.
-uint32_t __sanitizer_unaligned_load32(const void *p);
+uint32_t SANITIZER_CDECL __sanitizer_unaligned_load32(const void *p);
/// Loads a 64-bit unaligned value.
///
/// \param p Pointer to unaligned memory.
///
/// \returns Loaded value.
-uint64_t __sanitizer_unaligned_load64(const void *p);
+uint64_t SANITIZER_CDECL __sanitizer_unaligned_load64(const void *p);
/// Stores a 16-bit unaligned value.
///
/// \param p Pointer to unaligned memory.
/// \param x 16-bit value to store.
-void __sanitizer_unaligned_store16(void *p, uint16_t x);
+void SANITIZER_CDECL __sanitizer_unaligned_store16(void *p, uint16_t x);
/// Stores a 32-bit unaligned value.
///
/// \param p Pointer to unaligned memory.
/// \param x 32-bit value to store.
-void __sanitizer_unaligned_store32(void *p, uint32_t x);
+void SANITIZER_CDECL __sanitizer_unaligned_store32(void *p, uint32_t x);
/// Stores a 64-bit unaligned value.
///
/// \param p Pointer to unaligned memory.
/// \param x 64-bit value to store.
-void __sanitizer_unaligned_store64(void *p, uint64_t x);
+void SANITIZER_CDECL __sanitizer_unaligned_store64(void *p, uint64_t x);
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
// to ensure only one report is printed when multiple errors occur
// simultaneously.
-int __sanitizer_acquire_crash_state();
+int SANITIZER_CDECL __sanitizer_acquire_crash_state();
/// Annotates the current state of a contiguous container, such as
/// <c>std::vector</c>, <c>std::string</c>, or similar.
@@ -151,10 +156,8 @@ int __sanitizer_acquire_crash_state();
/// \param end End of memory region.
/// \param old_mid Old middle of memory region.
/// \param new_mid New middle of memory region.
-void __sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid);
+void SANITIZER_CDECL __sanitizer_annotate_contiguous_container(
+ const void *beg, const void *end, const void *old_mid, const void *new_mid);
/// Similar to <c>__sanitizer_annotate_contiguous_container</c>.
///
@@ -185,7 +188,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg,
/// \param old_container_end End of used region.
/// \param new_container_beg New beginning of used region.
/// \param new_container_end New end of used region.
-void __sanitizer_annotate_double_ended_contiguous_container(
+void SANITIZER_CDECL __sanitizer_annotate_double_ended_contiguous_container(
const void *storage_beg, const void *storage_end,
const void *old_container_beg, const void *old_container_end,
const void *new_container_beg, const void *new_container_end);
@@ -206,8 +209,9 @@ void __sanitizer_annotate_double_ended_contiguous_container(
///
/// \returns True if the contiguous container <c>[beg, end)</c> is properly
/// poisoned.
-int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
- const void *end);
+int SANITIZER_CDECL __sanitizer_verify_contiguous_container(const void *beg,
+ const void *mid,
+ const void *end);
/// Returns true if the double ended contiguous
/// container <c>[storage_beg, storage_end)</c> is properly poisoned.
@@ -230,7 +234,7 @@ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
/// \returns True if the double-ended contiguous container <c>[storage_beg,
/// container_beg, container_end, end)</c> is properly poisoned - only
/// [container_beg; container_end) is addressable.
-int __sanitizer_verify_double_ended_contiguous_container(
+int SANITIZER_CDECL __sanitizer_verify_double_ended_contiguous_container(
const void *storage_beg, const void *container_beg,
const void *container_end, const void *storage_end);
@@ -244,9 +248,8 @@ int __sanitizer_verify_double_ended_contiguous_container(
/// \param end Old end of memory region.
///
/// \returns The bad address or NULL.
-const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
- const void *mid,
- const void *end);
+const void *SANITIZER_CDECL __sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
/// returns the address of the first improperly poisoned byte.
///
@@ -258,13 +261,14 @@ const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
/// \param storage_end End of memory region.
///
/// \returns The bad address or NULL.
-const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
+const void *SANITIZER_CDECL
+__sanitizer_double_ended_contiguous_container_find_bad_address(
const void *storage_beg, const void *container_beg,
const void *container_end, const void *storage_end);
/// Prints the stack trace leading to this call (useful for calling from the
/// debugger).
-void __sanitizer_print_stack_trace(void);
+void SANITIZER_CDECL __sanitizer_print_stack_trace(void);
// Symbolizes the supplied 'pc' using the format string 'fmt'.
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
@@ -276,17 +280,20 @@ void __sanitizer_print_stack_trace(void);
// Inlined frames can be removed with 'symbolize_inline_frames=0'.
// The format syntax is described in
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
-void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
- size_t out_buf_size);
+void SANITIZER_CDECL __sanitizer_symbolize_pc(void *pc, const char *fmt,
+ char *out_buf,
+ size_t out_buf_size);
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
-void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
- char *out_buf, size_t out_buf_size);
+void SANITIZER_CDECL __sanitizer_symbolize_global(void *data_ptr,
+ const char *fmt,
+ char *out_buf,
+ size_t out_buf_size);
// Determine the return address.
#if !defined(_MSC_VER) || defined(__clang__)
#define __sanitizer_return_address() \
__builtin_extract_return_addr(__builtin_return_address(0))
#else
-extern "C" void *_ReturnAddress(void);
+void *_ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
#define __sanitizer_return_address() _ReturnAddress()
#endif
@@ -296,8 +303,7 @@ extern "C" void *_ReturnAddress(void);
/// Passing 0 will unset the callback.
///
/// \param callback User-provided callback.
-void __sanitizer_set_death_callback(void (*callback)(void));
-
+void SANITIZER_CDECL __sanitizer_set_death_callback(void (*callback)(void));
// Interceptor hooks.
// Whenever a libc function interceptor is called, it checks if the
@@ -313,8 +319,10 @@ void __sanitizer_set_death_callback(void (*callback)(void));
/// \param s2 Pointer to block of memory.
/// \param n Number of bytes to compare.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
- const void *s2, size_t n, int result);
+void SANITIZER_CDECL __sanitizer_weak_hook_memcmp(void *called_pc,
+ const void *s1,
+ const void *s2, size_t n,
+ int result);
/// Interceptor hook for <c>strncmp()</c>.
///
@@ -323,8 +331,10 @@ void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
/// \param s2 Pointer to block of memory.
/// \param n Number of bytes to compare.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
- const char *s2, size_t n, int result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strncmp(void *called_pc,
+ const char *s1,
+ const char *s2, size_t n,
+ int result);
/// Interceptor hook for <c>strncasecmp()</c>.
///
@@ -333,8 +343,10 @@ void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
/// \param s2 Pointer to block of memory.
/// \param n Number of bytes to compare.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
- const char *s2, size_t n, int result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strncasecmp(void *called_pc,
+ const char *s1,
+ const char *s2, size_t n,
+ int result);
/// Interceptor hook for <c>strcmp()</c>.
///
@@ -342,8 +354,9 @@ void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
/// \param s1 Pointer to block of memory.
/// \param s2 Pointer to block of memory.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
- const char *s2, int result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strcmp(void *called_pc,
+ const char *s1,
+ const char *s2, int result);
/// Interceptor hook for <c>strcasecmp()</c>.
///
@@ -351,8 +364,10 @@ void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
/// \param s1 Pointer to block of memory.
/// \param s2 Pointer to block of memory.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
- const char *s2, int result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strcasecmp(void *called_pc,
+ const char *s1,
+ const char *s2,
+ int result);
/// Interceptor hook for <c>strstr()</c>.
///
@@ -360,23 +375,27 @@ void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
/// \param s1 Pointer to block of memory.
/// \param s2 Pointer to block of memory.
/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
- const char *s2, char *result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strstr(void *called_pc,
+ const char *s1,
+ const char *s2, char *result);
-void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
- const char *s2, char *result);
+void SANITIZER_CDECL __sanitizer_weak_hook_strcasestr(void *called_pc,
+ const char *s1,
+ const char *s2,
+ char *result);
-void __sanitizer_weak_hook_memmem(void *called_pc,
- const void *s1, size_t len1,
- const void *s2, size_t len2, void *result);
+void SANITIZER_CDECL __sanitizer_weak_hook_memmem(void *called_pc,
+ const void *s1, size_t len1,
+ const void *s2, size_t len2,
+ void *result);
// Prints stack traces for all live heap allocations ordered by total
// allocation size until top_percent of total live heap is shown. top_percent
// should be between 1 and 100. At most max_number_of_contexts contexts
// (stack traces) are printed.
// Experimental feature currently available only with ASan on Linux/x86_64.
-void __sanitizer_print_memory_profile(size_t top_percent,
- size_t max_number_of_contexts);
+void SANITIZER_CDECL __sanitizer_print_memory_profile(
+ size_t top_percent, size_t max_number_of_contexts);
/// Notify ASan that a fiber switch has started (required only if implementing
/// your own fiber library).
@@ -405,8 +424,9 @@ void __sanitizer_print_memory_profile(size_t top_percent,
/// \param[out] fake_stack_save Fake stack save location.
/// \param bottom Bottom address of stack.
/// \param size Size of stack in bytes.
-void __sanitizer_start_switch_fiber(void **fake_stack_save,
- const void *bottom, size_t size);
+void SANITIZER_CDECL __sanitizer_start_switch_fiber(void **fake_stack_save,
+ const void *bottom,
+ size_t size);
/// Notify ASan that a fiber switch has completed (required only if
/// implementing your own fiber library).
@@ -419,18 +439,17 @@ void __sanitizer_start_switch_fiber(void **fake_stack_save,
/// \param fake_stack_save Fake stack save location.
/// \param[out] bottom_old Bottom address of old stack.
/// \param[out] size_old Size of old stack in bytes.
-void __sanitizer_finish_switch_fiber(void *fake_stack_save,
- const void **bottom_old,
- size_t *size_old);
+void SANITIZER_CDECL __sanitizer_finish_switch_fiber(void *fake_stack_save,
+ const void **bottom_old,
+ size_t *size_old);
// Get full module name and calculate pc offset within it.
// Returns 1 if pc belongs to some module, 0 if module was not found.
-int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
- size_t module_path_len,
- void **pc_offset);
+int SANITIZER_CDECL __sanitizer_get_module_and_offset_for_pc(
+ void *pc, char *module_path, size_t module_path_len, void **pc_offset);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
+#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
diff --git a/compiler-rt/include/sanitizer/coverage_interface.h b/compiler-rt/include/sanitizer/coverage_interface.h
index c063cfe60c5b..6235dfc2d4ba 100644
--- a/compiler-rt/include/sanitizer/coverage_interface.h
+++ b/compiler-rt/include/sanitizer/coverage_interface.h
@@ -18,18 +18,19 @@
extern "C" {
#endif
- // Record and dump coverage info.
- void __sanitizer_cov_dump(void);
+// Record and dump coverage info.
+void SANITIZER_CDECL __sanitizer_cov_dump(void);
- // Clear collected coverage info.
- void __sanitizer_cov_reset(void);
+// Clear collected coverage info.
+void SANITIZER_CDECL __sanitizer_cov_reset(void);
- // Dump collected coverage info. Sorts pcs by module into individual .sancov
- // files.
- void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
+// Dump collected coverage info. Sorts pcs by module into individual .sancov
+// files.
+void SANITIZER_CDECL __sanitizer_dump_coverage(const uintptr_t *pcs,
+ uintptr_t len);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_COVERAG_INTERFACE_H
+#endif // SANITIZER_COVERAG_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/dfsan_interface.h b/compiler-rt/include/sanitizer/dfsan_interface.h
index 519bfffa9a20..4e52e1b54cd8 100644
--- a/compiler-rt/include/sanitizer/dfsan_interface.h
+++ b/compiler-rt/include/sanitizer/dfsan_interface.h
@@ -13,9 +13,9 @@
#ifndef DFSAN_INTERFACE_H
#define DFSAN_INTERFACE_H
+#include <sanitizer/common_interface_defs.h>
#include <stddef.h>
#include <stdint.h>
-#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
@@ -25,29 +25,30 @@ typedef uint8_t dfsan_label;
typedef uint32_t dfsan_origin;
/// Signature of the callback argument to dfsan_set_write_callback().
-typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+typedef void(SANITIZER_CDECL *dfsan_write_callback_t)(int fd, const void *buf,
+ size_t count);
/// Signature of the callback argument to dfsan_set_conditional_callback().
-typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
- dfsan_origin origin);
+typedef void(SANITIZER_CDECL *dfsan_conditional_callback_t)(
+ dfsan_label label, dfsan_origin origin);
/// Signature of the callback argument to dfsan_set_reaches_function_callback().
/// The description is intended to hold the name of the variable.
-typedef void (*dfsan_reaches_function_callback_t)(dfsan_label label,
- dfsan_origin origin,
- const char *file,
- unsigned int line,
- const char *function);
+typedef void(SANITIZER_CDECL *dfsan_reaches_function_callback_t)(
+ dfsan_label label, dfsan_origin origin, const char *file, unsigned int line,
+ const char *function);
/// Computes the union of \c l1 and \c l2, resulting in a union label.
-dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
+dfsan_label SANITIZER_CDECL dfsan_union(dfsan_label l1, dfsan_label l2);
/// Sets the label for each address in [addr,addr+size) to \c label.
-void dfsan_set_label(dfsan_label label, void *addr, size_t size);
+void SANITIZER_CDECL dfsan_set_label(dfsan_label label, void *addr,
+ size_t size);
/// Sets the label for each address in [addr,addr+size) to the union of the
/// current label for that address and \c label.
-void dfsan_add_label(dfsan_label label, void *addr, size_t size);
+void SANITIZER_CDECL dfsan_add_label(dfsan_label label, void *addr,
+ size_t size);
/// Retrieves the label associated with the given data.
///
@@ -55,23 +56,24 @@ void dfsan_add_label(dfsan_label label, void *addr, size_t size);
/// which can be truncated or extended (implicitly or explicitly) as necessary.
/// The truncation/extension operations will preserve the label of the original
/// value.
-dfsan_label dfsan_get_label(long data);
+dfsan_label SANITIZER_CDECL dfsan_get_label(long data);
/// Retrieves the immediate origin associated with the given data. The returned
/// origin may point to another origin.
///
/// The type of 'data' is arbitrary.
-dfsan_origin dfsan_get_origin(long data);
+dfsan_origin SANITIZER_CDECL dfsan_get_origin(long data);
/// Retrieves the label associated with the data at the given address.
-dfsan_label dfsan_read_label(const void *addr, size_t size);
+dfsan_label SANITIZER_CDECL dfsan_read_label(const void *addr, size_t size);
/// Return the origin associated with the first taint byte in the size bytes
/// from the address addr.
-dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
+dfsan_origin SANITIZER_CDECL dfsan_read_origin_of_first_taint(const void *addr,
+ size_t size);
-/// Returns whether the given label label contains the label elem.
-int dfsan_has_label(dfsan_label label, dfsan_label elem);
+/// Returns whether the given label contains the label elem.
+int SANITIZER_CDECL dfsan_has_label(dfsan_label label, dfsan_label elem);
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
/// with the application memory. Use this call to start over the taint tracking
@@ -79,37 +81,39 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
///
/// Note: If another thread is working with tainted data during the flush, that
/// taint could still be written to shadow after the flush.
-void dfsan_flush(void);
+void SANITIZER_CDECL dfsan_flush(void);
/// Sets a callback to be invoked on calls to write(). The callback is invoked
/// before the write is done. The write is not guaranteed to succeed when the
/// callback executes. Pass in NULL to remove any callback.
-void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+void SANITIZER_CDECL
+dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
/// Sets a callback to be invoked on any conditional expressions which have a
/// taint label set. This can be used to find where tainted data influences
/// the behavior of the program.
/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
-void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
+void SANITIZER_CDECL
+dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
/// Conditional expressions occur during signal handlers.
/// Making callbacks that handle signals well is tricky, so when
/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
/// handlers will add the labels they see into a global (bitwise-or together).
/// This function returns all label bits seen in signal handler conditions.
-dfsan_label dfsan_get_labels_in_signal_conditional();
+dfsan_label SANITIZER_CDECL dfsan_get_labels_in_signal_conditional();
/// Sets a callback to be invoked when tainted data reaches a function.
/// This could occur at function entry, or at a load instruction.
/// These callbacks will only be added if -dfsan-reaches-function-callbacks=1.
-void dfsan_set_reaches_function_callback(
- dfsan_reaches_function_callback_t callback);
+void SANITIZER_CDECL
+dfsan_set_reaches_function_callback(dfsan_reaches_function_callback_t callback);
/// Making callbacks that handle signals well is tricky, so when
/// -dfsan-reaches-function-callbacks=true, functions reached in signal
/// handlers will add the labels they see into a global (bitwise-or together).
/// This function returns all label bits seen during signal handlers.
-dfsan_label dfsan_get_labels_in_signal_reaches_function();
+dfsan_label SANITIZER_CDECL dfsan_get_labels_in_signal_reaches_function();
/// Interceptor hooks.
/// Whenever a dfsan's custom function is called the corresponding
@@ -117,20 +121,25 @@ dfsan_label dfsan_get_labels_in_signal_reaches_function();
/// The primary use case is taint-guided fuzzing, where the fuzzer
/// needs to see the parameters of the function and the labels.
/// FIXME: implement more hooks.
-void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
- size_t n, dfsan_label s1_label,
- dfsan_label s2_label, dfsan_label n_label);
-void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
- size_t n, dfsan_label s1_label,
- dfsan_label s2_label, dfsan_label n_label);
+void SANITIZER_CDECL dfsan_weak_hook_memcmp(void *caller_pc, const void *s1,
+ const void *s2, size_t n,
+ dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label);
+void SANITIZER_CDECL dfsan_weak_hook_strncmp(void *caller_pc, const char *s1,
+ const char *s2, size_t n,
+ dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label);
/// Prints the origin trace of the label at the address addr to stderr. It also
/// prints description at the beginning of the trace. If origin tracking is not
/// on, or the address is not labeled, it prints nothing.
-void dfsan_print_origin_trace(const void *addr, const char *description);
+void SANITIZER_CDECL dfsan_print_origin_trace(const void *addr,
+ const char *description);
/// As above, but use an origin id from dfsan_get_origin() instead of address.
/// Does not include header line with taint label and address information.
-void dfsan_print_origin_id_trace(dfsan_origin origin);
+void SANITIZER_CDECL dfsan_print_origin_id_trace(dfsan_origin origin);
/// Prints the origin trace of the label at the address \p addr to a
/// pre-allocated output buffer. If origin tracking is not on, or the address is
@@ -166,12 +175,15 @@ void dfsan_print_origin_id_trace(dfsan_origin origin);
/// \returns The number of symbols that should have been written to \p out_buf
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
- char *out_buf, size_t out_buf_size);
+size_t SANITIZER_CDECL dfsan_sprint_origin_trace(const void *addr,
+ const char *description,
+ char *out_buf,
+ size_t out_buf_size);
/// As above, but use an origin id from dfsan_get_origin() instead of address.
/// Does not include header line with taint label and address information.
-size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
- size_t out_buf_size);
+size_t SANITIZER_CDECL dfsan_sprint_origin_id_trace(dfsan_origin origin,
+ char *out_buf,
+ size_t out_buf_size);
/// Prints the stack trace leading to this call to a pre-allocated output
/// buffer.
@@ -184,19 +196,20 @@ size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
/// \returns The number of symbols that should have been written to \p out_buf
/// (not including trailing null byte '\0'). Thus, the string is truncated iff
/// return value is not less than \p out_buf_size.
-size_t dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size);
+size_t SANITIZER_CDECL dfsan_sprint_stack_trace(char *out_buf,
+ size_t out_buf_size);
/// Retrieves the very first origin associated with the data at the given
/// address.
-dfsan_origin dfsan_get_init_origin(const void *addr);
+dfsan_origin SANITIZER_CDECL dfsan_get_init_origin(const void *addr);
/// Returns the value of -dfsan-track-origins.
/// * 0: do not track origins.
/// * 1: track origins at memory store operations.
/// * 2: track origins at memory load and store operations.
-int dfsan_get_track_origins(void);
+int SANITIZER_CDECL dfsan_get_track_origins(void);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
dfsan_set_label(label, (void *)&data, sizeof(T));
@@ -204,4 +217,4 @@ template <typename T> void dfsan_set_label(dfsan_label label, T &data) {
#endif
-#endif // DFSAN_INTERFACE_H
+#endif // DFSAN_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/hwasan_interface.h b/compiler-rt/include/sanitizer/hwasan_interface.h
index ee742c7f3031..abe310c06669 100644
--- a/compiler-rt/include/sanitizer/hwasan_interface.h
+++ b/compiler-rt/include/sanitizer/hwasan_interface.h
@@ -18,82 +18,88 @@
#ifdef __cplusplus
extern "C" {
#endif
- // Libc hook for program startup in statically linked executables.
- // Initializes enough of the runtime to run instrumented code. This function
- // should only be called in statically linked executables because it modifies
- // the GOT, which won't work in regular binaries because RELRO will already
- // have been applied by the time the function is called. This also means that
- // the function should be called before libc applies RELRO.
- // Does not call libc unless there is an error.
- // Can be called multiple times.
- void __hwasan_init_static(void);
-
- // This function may be optionally provided by user and should return
- // a string containing HWASan runtime options. See asan_flags.h for details.
- const char* __hwasan_default_options(void);
-
- void __hwasan_enable_allocator_tagging(void);
- void __hwasan_disable_allocator_tagging(void);
-
- // Mark region of memory with the given tag. Both address and size need to be
- // 16-byte aligned.
- void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
- size_t size);
-
- /// Set pointer tag. Previous tag is lost.
- void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
-
- // Set memory tag from the current SP address to the given address to zero.
- // This is meant to annotate longjmp and other non-local jumps.
- // This function needs to know the (almost) exact destination frame address;
- // clearing shadow for the entire thread stack like __asan_handle_no_return
- // does would cause false reports.
- void __hwasan_handle_longjmp(const void *sp_dst);
-
- // Set memory tag for the part of the current thread stack below sp_dst to
- // zero. Call this in vfork() before returning in the parent process.
- void __hwasan_handle_vfork(const void *sp_dst);
-
- // Libc hook for thread creation. Should be called in the child thread before
- // any instrumented code.
- void __hwasan_thread_enter();
-
- // Libc hook for thread destruction. No instrumented code should run after
- // this call.
- void __hwasan_thread_exit();
-
- // Print shadow and origin for the memory range to stderr in a human-readable
- // format.
- void __hwasan_print_shadow(const volatile void *x, size_t size);
-
- // Print one-line report about the memory usage of the current process.
- void __hwasan_print_memory_usage();
-
- /* Returns the offset of the first byte in the memory range that can not be
- * accessed through the pointer in x, or -1 if the whole range is good. */
- intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
-
- /* Sets the callback function to be called during HWASan error reporting. */
- void __hwasan_set_error_report_callback(void (*callback)(const char *));
-
- int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
- void * __sanitizer_memalign(size_t alignment, size_t size);
- void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
- void * __sanitizer___libc_memalign(size_t alignment, size_t size);
- void * __sanitizer_valloc(size_t size);
- void * __sanitizer_pvalloc(size_t size);
- void __sanitizer_free(void *ptr);
- void __sanitizer_cfree(void *ptr);
- size_t __sanitizer_malloc_usable_size(const void *ptr);
- struct mallinfo __sanitizer_mallinfo();
- int __sanitizer_mallopt(int cmd, int value);
- void __sanitizer_malloc_stats(void);
- void * __sanitizer_calloc(size_t nmemb, size_t size);
- void * __sanitizer_realloc(void *ptr, size_t size);
- void * __sanitizer_reallocarray(void *ptr, size_t nmemb, size_t size);
- void * __sanitizer_malloc(size_t size);
+// Libc hook for program startup in statically linked executables.
+// Initializes enough of the runtime to run instrumented code. This function
+// should only be called in statically linked executables because it modifies
+// the GOT, which won't work in regular binaries because RELRO will already
+// have been applied by the time the function is called. This also means that
+// the function should be called before libc applies RELRO.
+// Does not call libc unless there is an error.
+// Can be called multiple times.
+void SANITIZER_CDECL __hwasan_init_static(void);
+
+// This function may be optionally provided by user and should return
+// a string containing HWASan runtime options. See asan_flags.h for details.
+const char *SANITIZER_CDECL __hwasan_default_options(void);
+
+void SANITIZER_CDECL __hwasan_enable_allocator_tagging(void);
+void SANITIZER_CDECL __hwasan_disable_allocator_tagging(void);
+
+// Mark region of memory with the given tag. Both address and size need to be
+// 16-byte aligned.
+void SANITIZER_CDECL __hwasan_tag_memory(const volatile void *p,
+ unsigned char tag, size_t size);
+
+/// Set pointer tag. Previous tag is lost.
+void *SANITIZER_CDECL __hwasan_tag_pointer(const volatile void *p,
+ unsigned char tag);
+
+// Set memory tag from the current SP address to the given address to zero.
+// This is meant to annotate longjmp and other non-local jumps.
+// This function needs to know the (almost) exact destination frame address;
+// clearing shadow for the entire thread stack like __asan_handle_no_return
+// does would cause false reports.
+void SANITIZER_CDECL __hwasan_handle_longjmp(const void *sp_dst);
+
+// Set memory tag for the part of the current thread stack below sp_dst to
+// zero. Call this in vfork() before returning in the parent process.
+void SANITIZER_CDECL __hwasan_handle_vfork(const void *sp_dst);
+
+// Libc hook for thread creation. Should be called in the child thread before
+// any instrumented code.
+void SANITIZER_CDECL __hwasan_thread_enter();
+
+// Libc hook for thread destruction. No instrumented code should run after
+// this call.
+void SANITIZER_CDECL __hwasan_thread_exit();
+
+// Print shadow and origin for the memory range to stderr in a human-readable
+// format.
+void SANITIZER_CDECL __hwasan_print_shadow(const volatile void *x, size_t size);
+
+// Print one-line report about the memory usage of the current process.
+void SANITIZER_CDECL __hwasan_print_memory_usage();
+
+/* Returns the offset of the first byte in the memory range that can not be
+ * accessed through the pointer in x, or -1 if the whole range is good. */
+intptr_t SANITIZER_CDECL __hwasan_test_shadow(const volatile void *x,
+ size_t size);
+
+/* Sets the callback function to be called during HWASan error reporting. */
+void SANITIZER_CDECL
+__hwasan_set_error_report_callback(void (*callback)(const char *));
+
+int SANITIZER_CDECL __sanitizer_posix_memalign(void **memptr, size_t alignment,
+ size_t size);
+void *SANITIZER_CDECL __sanitizer_memalign(size_t alignment, size_t size);
+void *SANITIZER_CDECL __sanitizer_aligned_alloc(size_t alignment, size_t size);
+void *SANITIZER_CDECL __sanitizer___libc_memalign(size_t alignment,
+ size_t size);
+void *SANITIZER_CDECL __sanitizer_valloc(size_t size);
+void *SANITIZER_CDECL __sanitizer_pvalloc(size_t size);
+void SANITIZER_CDECL __sanitizer_free(void *ptr);
+void SANITIZER_CDECL __sanitizer_cfree(void *ptr);
+size_t SANITIZER_CDECL __sanitizer_malloc_usable_size(const void *ptr);
+struct mallinfo SANITIZER_CDECL __sanitizer_mallinfo();
+int SANITIZER_CDECL __sanitizer_mallopt(int cmd, int value);
+void SANITIZER_CDECL __sanitizer_malloc_stats(void);
+void *SANITIZER_CDECL __sanitizer_calloc(size_t nmemb, size_t size);
+void *SANITIZER_CDECL __sanitizer_realloc(void *ptr, size_t size);
+void *SANITIZER_CDECL __sanitizer_reallocarray(void *ptr, size_t nmemb,
+ size_t size);
+void *SANITIZER_CDECL __sanitizer_malloc(size_t size);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_HWASAN_INTERFACE_H
+#endif // SANITIZER_HWASAN_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/lsan_interface.h b/compiler-rt/include/sanitizer/lsan_interface.h
index 2bb992672f2e..18f3456a126c 100644
--- a/compiler-rt/include/sanitizer/lsan_interface.h
+++ b/compiler-rt/include/sanitizer/lsan_interface.h
@@ -18,72 +18,72 @@
#ifdef __cplusplus
extern "C" {
#endif
- // Allocations made between calls to __lsan_disable() and __lsan_enable() will
- // be treated as non-leaks. Disable/enable pairs may be nested.
- void __lsan_disable(void);
- void __lsan_enable(void);
+// Allocations made between calls to __lsan_disable() and __lsan_enable() will
+// be treated as non-leaks. Disable/enable pairs may be nested.
+void SANITIZER_CDECL __lsan_disable(void);
+void SANITIZER_CDECL __lsan_enable(void);
- // The heap object into which p points will be treated as a non-leak.
- void __lsan_ignore_object(const void *p);
+// The heap object into which p points will be treated as a non-leak.
+void SANITIZER_CDECL __lsan_ignore_object(const void *p);
- // Memory regions registered through this interface will be treated as sources
- // of live pointers during leak checking. Useful if you store pointers in
- // mapped memory.
- // Points of note:
- // - __lsan_unregister_root_region() must be called with the same pointer and
- // size that have earlier been passed to __lsan_register_root_region()
- // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
- // if you map memory within a larger region that you have mprotect'ed, you can
- // register the entire large region.
- // - the implementation is not optimized for performance. This interface is
- // intended to be used for a small number of relatively static regions.
- void __lsan_register_root_region(const void *p, size_t size);
- void __lsan_unregister_root_region(const void *p, size_t size);
+// Memory regions registered through this interface will be treated as sources
+// of live pointers during leak checking. Useful if you store pointers in
+// mapped memory.
+// Points of note:
+// - __lsan_unregister_root_region() must be called with the same pointer and
+// size that have earlier been passed to __lsan_register_root_region()
+// - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+// if you map memory within a larger region that you have mprotect'ed, you can
+// register the entire large region.
+// - the implementation is not optimized for performance. This interface is
+// intended to be used for a small number of relatively static regions.
+void SANITIZER_CDECL __lsan_register_root_region(const void *p, size_t size);
+void SANITIZER_CDECL __lsan_unregister_root_region(const void *p, size_t size);
- // Check for leaks now. This function behaves identically to the default
- // end-of-process leak check. In particular, it will terminate the process if
- // leaks are found and the exitcode runtime flag is non-zero.
- // Subsequent calls to this function will have no effect and end-of-process
- // leak check will not run. Effectively, end-of-process leak check is moved to
- // the time of first invocation of this function.
- // By calling this function early during process shutdown, you can instruct
- // LSan to ignore shutdown-only leaks which happen later on.
- void __lsan_do_leak_check(void);
+// Check for leaks now. This function behaves identically to the default
+// end-of-process leak check. In particular, it will terminate the process if
+// leaks are found and the exitcode runtime flag is non-zero.
+// Subsequent calls to this function will have no effect and end-of-process
+// leak check will not run. Effectively, end-of-process leak check is moved to
+// the time of first invocation of this function.
+// By calling this function early during process shutdown, you can instruct
+// LSan to ignore shutdown-only leaks which happen later on.
+void SANITIZER_CDECL __lsan_do_leak_check(void);
- // Check for leaks now. Returns zero if no leaks have been found or if leak
- // detection is disabled, non-zero otherwise.
- // This function may be called repeatedly, e.g. to periodically check a
- // long-running process. It prints a leak report if appropriate, but does not
- // terminate the process. It does not affect the behavior of
- // __lsan_do_leak_check() or the end-of-process leak check, and is not
- // affected by them.
- int __lsan_do_recoverable_leak_check(void);
+// Check for leaks now. Returns zero if no leaks have been found or if leak
+// detection is disabled, non-zero otherwise.
+// This function may be called repeatedly, e.g. to periodically check a
+// long-running process. It prints a leak report if appropriate, but does not
+// terminate the process. It does not affect the behavior of
+// __lsan_do_leak_check() or the end-of-process leak check, and is not
+// affected by them.
+int SANITIZER_CDECL __lsan_do_recoverable_leak_check(void);
- // The user may optionally provide this function to disallow leak checking
- // for the program it is linked into (if the return value is non-zero). This
- // function must be defined as returning a constant value; any behavior beyond
- // that is unsupported.
- // To avoid dead stripping, you may need to define this function with
- // __attribute__((used))
- int __lsan_is_turned_off(void);
+// The user may optionally provide this function to disallow leak checking
+// for the program it is linked into (if the return value is non-zero). This
+// function must be defined as returning a constant value; any behavior beyond
+// that is unsupported.
+// To avoid dead stripping, you may need to define this function with
+// __attribute__((used))
+int SANITIZER_CDECL __lsan_is_turned_off(void);
- // This function may be optionally provided by user and should return
- // a string containing LSan runtime options. See lsan_flags.inc for details.
- const char *__lsan_default_options(void);
+// This function may be optionally provided by user and should return
+// a string containing LSan runtime options. See lsan_flags.inc for details.
+const char *SANITIZER_CDECL __lsan_default_options(void);
- // This function may be optionally provided by the user and should return
- // a string containing LSan suppressions.
- const char *__lsan_default_suppressions(void);
+// This function may be optionally provided by the user and should return
+// a string containing LSan suppressions.
+const char *SANITIZER_CDECL __lsan_default_suppressions(void);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
namespace __lsan {
class ScopedDisabler {
- public:
+public:
ScopedDisabler() { __lsan_disable(); }
~ScopedDisabler() { __lsan_enable(); }
};
-} // namespace __lsan
+} // namespace __lsan
#endif
-#endif // SANITIZER_LSAN_INTERFACE_H
+#endif // SANITIZER_LSAN_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/memprof_interface.h b/compiler-rt/include/sanitizer/memprof_interface.h
index 76031de4014c..4660a7818c92 100644
--- a/compiler-rt/include/sanitizer/memprof_interface.h
+++ b/compiler-rt/include/sanitizer/memprof_interface.h
@@ -24,25 +24,26 @@ extern "C" {
///
/// \param addr Start of memory region.
/// \param size Size of memory region.
-void __memprof_record_access_range(void const volatile *addr, size_t size);
+void SANITIZER_CDECL __memprof_record_access_range(void const volatile *addr,
+ size_t size);
/// Records access to a memory address <c><i>addr</i></c>.
///
/// This memory must be previously allocated by your program.
///
/// \param addr Accessed memory address
-void __memprof_record_access(void const volatile *addr);
+void SANITIZER_CDECL __memprof_record_access(void const volatile *addr);
/// User-provided callback on MemProf errors.
///
/// You can provide a function that would be called immediately when MemProf
/// detects an error. This is useful in cases when MemProf detects an error but
/// your program crashes before the MemProf report is printed.
-void __memprof_on_error(void);
+void SANITIZER_CDECL __memprof_on_error(void);
/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
/// debugger).
-void __memprof_print_accumulated_stats(void);
+void SANITIZER_CDECL __memprof_print_accumulated_stats(void);
/// User-provided default option settings.
///
@@ -51,12 +52,18 @@ void __memprof_print_accumulated_stats(void);
/// <c>verbosity=1:print_stats=1</c>).
///
/// \returns Default options string.
-const char *__memprof_default_options(void);
+const char *SANITIZER_CDECL __memprof_default_options(void);
/// Prints the memory profile to the current profile file.
///
/// \returns 0 on success.
-int __memprof_profile_dump(void);
+int SANITIZER_CDECL __memprof_profile_dump(void);
+
+/// Closes the existing file descriptor, if it is valid and not stdout or
+/// stderr, and resets the internal state such that the profile filename is
+/// reopened on the next profile dump attempt. This can be used to enable
+/// multiple rounds of profiling on the same binary.
+void SANITIZER_CDECL __memprof_profile_reset(void);
#ifdef __cplusplus
} // extern "C"
diff --git a/compiler-rt/include/sanitizer/msan_interface.h b/compiler-rt/include/sanitizer/msan_interface.h
index 854b12cda36e..6fedc0312545 100644
--- a/compiler-rt/include/sanitizer/msan_interface.h
+++ b/compiler-rt/include/sanitizer/msan_interface.h
@@ -18,109 +18,118 @@
#ifdef __cplusplus
extern "C" {
#endif
- /* Set raw origin for the memory range. */
- void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
-
- /* Get raw origin for an address. */
- uint32_t __msan_get_origin(const volatile void *a);
-
- /* Test that this_id is a descendant of prev_id (or they are simply equal).
- * "descendant" here means they are part of the same chain, created with
- * __msan_chain_origin. */
- int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
-
- /* Returns non-zero if tracking origins. */
- int __msan_get_track_origins(void);
-
- /* Returns the origin id of the latest UMR in the calling thread. */
- uint32_t __msan_get_umr_origin(void);
-
- /* Make memory region fully initialized (without changing its contents). */
- void __msan_unpoison(const volatile void *a, size_t size);
-
- /* Make a null-terminated string fully initialized (without changing its
- contents). */
- void __msan_unpoison_string(const volatile char *a);
-
- /* Make first n parameters of the next function call fully initialized. */
- void __msan_unpoison_param(size_t n);
-
- /* Make memory region fully uninitialized (without changing its contents).
- This is a legacy interface that does not update origin information. Use
- __msan_allocated_memory() instead. */
- void __msan_poison(const volatile void *a, size_t size);
-
- /* Make memory region partially uninitialized (without changing its contents).
- */
- void __msan_partial_poison(const volatile void *data, void *shadow,
- size_t size);
-
- /* Returns the offset of the first (at least partially) poisoned byte in the
- memory range, or -1 if the whole range is good. */
- intptr_t __msan_test_shadow(const volatile void *x, size_t size);
-
- /* Checks that memory range is fully initialized, and reports an error if it
- * is not. */
- void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
-
- /* For testing:
- __msan_set_expect_umr(1);
- ... some buggy code ...
- __msan_set_expect_umr(0);
- The last line will verify that a UMR happened. */
- void __msan_set_expect_umr(int expect_umr);
-
- /* Change the value of keep_going flag. Non-zero value means don't terminate
- program execution when an error is detected. This will not affect error in
- modules that were compiled without the corresponding compiler flag. */
- void __msan_set_keep_going(int keep_going);
-
- /* Print shadow and origin for the memory range to stderr in a human-readable
- format. */
- void __msan_print_shadow(const volatile void *x, size_t size);
-
- /* Print shadow for the memory range to stderr in a minimalistic
- human-readable format. */
- void __msan_dump_shadow(const volatile void *x, size_t size);
-
- /* Returns true if running under a dynamic tool (DynamoRio-based). */
- int __msan_has_dynamic_component(void);
-
- /* Tell MSan about newly allocated memory (ex.: custom allocator).
- Memory will be marked uninitialized, with origin at the call site. */
- void __msan_allocated_memory(const volatile void* data, size_t size);
-
- /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
- void __sanitizer_dtor_callback(const volatile void* data, size_t size);
- void __sanitizer_dtor_callback_fields(const volatile void *data, size_t size);
- void __sanitizer_dtor_callback_vptr(const volatile void *data);
-
- /* This function may be optionally provided by user and should return
- a string containing Msan runtime options. See msan_flags.h for details. */
- const char* __msan_default_options(void);
-
- /* Deprecated. Call __sanitizer_set_death_callback instead. */
- void __msan_set_death_callback(void (*callback)(void));
-
- /* Update shadow for the application copy of size bytes from src to dst.
- Src and dst are application addresses. This function does not copy the
- actual application memory, it only updates shadow and origin for such
- copy. Source and destination regions can overlap. */
- void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
- size_t size);
-
- /* Disables uninitialized memory checks in interceptors. */
- void __msan_scoped_disable_interceptor_checks(void);
-
- /* Re-enables uninitialized memory checks in interceptors after a previous
- call to __msan_scoped_disable_interceptor_checks. */
- void __msan_scoped_enable_interceptor_checks(void);
-
- void __msan_start_switch_fiber(const void *bottom, size_t size);
- void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
+/* Set raw origin for the memory range. */
+void SANITIZER_CDECL __msan_set_origin(const volatile void *a, size_t size,
+ uint32_t origin);
+
+/* Get raw origin for an address. */
+uint32_t SANITIZER_CDECL __msan_get_origin(const volatile void *a);
+
+/* Test that this_id is a descendant of prev_id (or they are simply equal).
+ * "descendant" here means they are part of the same chain, created with
+ * __msan_chain_origin. */
+int SANITIZER_CDECL __msan_origin_is_descendant_or_same(uint32_t this_id,
+ uint32_t prev_id);
+
+/* Returns non-zero if tracking origins. */
+int SANITIZER_CDECL __msan_get_track_origins(void);
+
+/* Returns the origin id of the latest UMR in the calling thread. */
+uint32_t SANITIZER_CDECL __msan_get_umr_origin(void);
+
+/* Make memory region fully initialized (without changing its contents). */
+void SANITIZER_CDECL __msan_unpoison(const volatile void *a, size_t size);
+
+/* Make a null-terminated string fully initialized (without changing its
+ contents). */
+void SANITIZER_CDECL __msan_unpoison_string(const volatile char *a);
+
+/* Make first n parameters of the next function call fully initialized. */
+void SANITIZER_CDECL __msan_unpoison_param(size_t n);
+
+/* Make memory region fully uninitialized (without changing its contents).
+ This is a legacy interface that does not update origin information. Use
+ __msan_allocated_memory() instead. */
+void SANITIZER_CDECL __msan_poison(const volatile void *a, size_t size);
+
+/* Make memory region partially uninitialized (without changing its contents).
+ */
+void SANITIZER_CDECL __msan_partial_poison(const volatile void *data,
+ void *shadow, size_t size);
+
+/* Returns the offset of the first (at least partially) poisoned byte in the
+ memory range, or -1 if the whole range is good. */
+intptr_t SANITIZER_CDECL __msan_test_shadow(const volatile void *x,
+ size_t size);
+
+/* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+void SANITIZER_CDECL __msan_check_mem_is_initialized(const volatile void *x,
+ size_t size);
+
+/* For testing:
+ __msan_set_expect_umr(1);
+ ... some buggy code ...
+ __msan_set_expect_umr(0);
+ The last line will verify that a UMR happened. */
+void SANITIZER_CDECL __msan_set_expect_umr(int expect_umr);
+
+/* Change the value of keep_going flag. Non-zero value means don't terminate
+ program execution when an error is detected. This will not affect error in
+ modules that were compiled without the corresponding compiler flag. */
+void SANITIZER_CDECL __msan_set_keep_going(int keep_going);
+
+/* Print shadow and origin for the memory range to stderr in a human-readable
+ format. */
+void SANITIZER_CDECL __msan_print_shadow(const volatile void *x, size_t size);
+
+/* Print shadow for the memory range to stderr in a minimalistic
+ human-readable format. */
+void SANITIZER_CDECL __msan_dump_shadow(const volatile void *x, size_t size);
+
+/* Returns true if running under a dynamic tool (DynamoRio-based). */
+int SANITIZER_CDECL __msan_has_dynamic_component(void);
+
+/* Tell MSan about newly allocated memory (ex.: custom allocator).
+ Memory will be marked uninitialized, with origin at the call site. */
+void SANITIZER_CDECL __msan_allocated_memory(const volatile void *data,
+ size_t size);
+
+/* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
+void SANITIZER_CDECL __sanitizer_dtor_callback(const volatile void *data,
+ size_t size);
+void SANITIZER_CDECL __sanitizer_dtor_callback_fields(const volatile void *data,
+ size_t size);
+void SANITIZER_CDECL __sanitizer_dtor_callback_vptr(const volatile void *data);
+
+/* This function may be optionally provided by user and should return
+ a string containing Msan runtime options. See msan_flags.h for details. */
+const char *SANITIZER_CDECL __msan_default_options(void);
+
+/* Deprecated. Call __sanitizer_set_death_callback instead. */
+void SANITIZER_CDECL
+__msan_set_death_callback(void(SANITIZER_CDECL *callback)(void));
+
+/* Update shadow for the application copy of size bytes from src to dst.
+ Src and dst are application addresses. This function does not copy the
+ actual application memory, it only updates shadow and origin for such
+ copy. Source and destination regions can overlap. */
+void SANITIZER_CDECL __msan_copy_shadow(const volatile void *dst,
+ const volatile void *src, size_t size);
+
+/* Disables uninitialized memory checks in interceptors. */
+void SANITIZER_CDECL __msan_scoped_disable_interceptor_checks(void);
+
+/* Re-enables uninitialized memory checks in interceptors after a previous
+ call to __msan_scoped_disable_interceptor_checks. */
+void SANITIZER_CDECL __msan_scoped_enable_interceptor_checks(void);
+
+void SANITIZER_CDECL __msan_start_switch_fiber(const void *bottom, size_t size);
+void SANITIZER_CDECL __msan_finish_switch_fiber(const void **bottom_old,
+ size_t *size_old);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
#endif
diff --git a/compiler-rt/include/sanitizer/scudo_interface.h b/compiler-rt/include/sanitizer/scudo_interface.h
index dd522c1efc21..37fd7bfeccf0 100644
--- a/compiler-rt/include/sanitizer/scudo_interface.h
+++ b/compiler-rt/include/sanitizer/scudo_interface.h
@@ -17,22 +17,22 @@
#ifdef __cplusplus
extern "C" {
#endif
- // This function may be optionally provided by a user and should return
- // a string containing Scudo runtime options. See scudo_flags.h for details.
- const char* __scudo_default_options(void);
+// This function may be optionally provided by a user and should return
+// a string containing Scudo runtime options. See scudo_flags.h for details.
+const char *SANITIZER_CDECL __scudo_default_options(void);
- // This function allows to set the RSS limit at runtime. This can be either
- // the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
- // can be removed by setting LimitMb to 0. This function's parameters should
- // be fully trusted to avoid security mishaps.
- void __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
+// This function allows to set the RSS limit at runtime. This can be either
+// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
+// can be removed by setting LimitMb to 0. This function's parameters should
+// be fully trusted to avoid security mishaps.
+void SANITIZER_CDECL __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
- // This function outputs various allocator statistics for both the Primary
- // and Secondary allocators, including memory usage, number of allocations
- // and deallocations.
- void __scudo_print_stats(void);
+// This function outputs various allocator statistics for both the Primary
+// and Secondary allocators, including memory usage, number of allocations
+// and deallocations.
+void SANITIZER_CDECL __scudo_print_stats(void);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_SCUDO_INTERFACE_H_
+#endif // SANITIZER_SCUDO_INTERFACE_H_
diff --git a/compiler-rt/include/sanitizer/tsan_interface.h b/compiler-rt/include/sanitizer/tsan_interface.h
index f19c79d79ba6..e11a4175cd8e 100644
--- a/compiler-rt/include/sanitizer/tsan_interface.h
+++ b/compiler-rt/include/sanitizer/tsan_interface.h
@@ -21,8 +21,8 @@ extern "C" {
// __tsan_release establishes a happens-before relation with a preceding
// __tsan_acquire on the same address.
-void __tsan_acquire(void *addr);
-void __tsan_release(void *addr);
+void SANITIZER_CDECL __tsan_acquire(void *addr);
+void SANITIZER_CDECL __tsan_release(void *addr);
// Annotations for custom mutexes.
// The annotations allow to get better reports (with sets of locked mutexes),
@@ -52,16 +52,16 @@ static const unsigned __tsan_mutex_not_static = 1 << 8;
// Mutex operation flags:
// Denotes read lock operation.
-static const unsigned __tsan_mutex_read_lock = 1 << 3;
+static const unsigned __tsan_mutex_read_lock = 1 << 3;
// Denotes try lock operation.
-static const unsigned __tsan_mutex_try_lock = 1 << 4;
+static const unsigned __tsan_mutex_try_lock = 1 << 4;
// Denotes that a try lock operation has failed to acquire the mutex.
-static const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
+static const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
// Denotes that the lock operation acquires multiple recursion levels.
// Number of levels is passed in recursion parameter.
// This is useful for annotation of e.g. Java builtin monitors,
// for which wait operation releases all recursive acquisitions of the mutex.
-static const unsigned __tsan_mutex_recursive_lock = 1 << 6;
+static const unsigned __tsan_mutex_recursive_lock = 1 << 6;
// Denotes that the unlock operation releases all recursion levels.
// Number of released levels is returned and later must be passed to
// the corresponding __tsan_mutex_post_lock annotation.
@@ -75,20 +75,20 @@ static const unsigned __tsan_mutex_try_read_lock_failed =
// Annotate creation of a mutex.
// Supported flags: mutex creation flags.
-void __tsan_mutex_create(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_create(void *addr, unsigned flags);
// Annotate destruction of a mutex.
// Supported flags:
// - __tsan_mutex_linker_init
// - __tsan_mutex_not_static
-void __tsan_mutex_destroy(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_destroy(void *addr, unsigned flags);
// Annotate start of lock operation.
// Supported flags:
// - __tsan_mutex_read_lock
// - __tsan_mutex_try_lock
// - all mutex creation flags
-void __tsan_mutex_pre_lock(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_pre_lock(void *addr, unsigned flags);
// Annotate end of lock operation.
// Supported flags:
@@ -97,23 +97,24 @@ void __tsan_mutex_pre_lock(void *addr, unsigned flags);
// - __tsan_mutex_try_lock_failed
// - __tsan_mutex_recursive_lock
// - all mutex creation flags
-void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
+void SANITIZER_CDECL __tsan_mutex_post_lock(void *addr, unsigned flags,
+ int recursion);
// Annotate start of unlock operation.
// Supported flags:
// - __tsan_mutex_read_lock
// - __tsan_mutex_recursive_unlock
-int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
+int SANITIZER_CDECL __tsan_mutex_pre_unlock(void *addr, unsigned flags);
// Annotate end of unlock operation.
// Supported flags:
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
-void __tsan_mutex_post_unlock(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_post_unlock(void *addr, unsigned flags);
// Annotate start/end of notify/signal/broadcast operation.
// Supported flags: none.
-void __tsan_mutex_pre_signal(void *addr, unsigned flags);
-void __tsan_mutex_post_signal(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_pre_signal(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_post_signal(void *addr, unsigned flags);
// Annotate start/end of a region of code where lock/unlock/signal operation
// diverts to do something else unrelated to the mutex. This can be used to
@@ -123,8 +124,12 @@ void __tsan_mutex_post_signal(void *addr, unsigned flags);
// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
// __tsan_mutex_pre/post_signal regions.
// Supported flags: none.
-void __tsan_mutex_pre_divert(void *addr, unsigned flags);
-void __tsan_mutex_post_divert(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_pre_divert(void *addr, unsigned flags);
+void SANITIZER_CDECL __tsan_mutex_post_divert(void *addr, unsigned flags);
+
+// Check that the current thread does not hold any mutexes,
+// report a bug report otherwise.
+void SANITIZER_CDECL __tsan_check_no_mutexes_held();
// External race detection API.
// Can be used by non-instrumented libraries to detect when their objects are
@@ -136,11 +141,14 @@ void __tsan_mutex_post_divert(void *addr, unsigned flags);
// - __tsan_external_register_tag registers a 'tag' with the specified name,
// which is later used in read/write annotations to denote the object type
// - __tsan_external_assign_tag can optionally mark a heap object with a tag
-void *__tsan_external_register_tag(const char *object_type);
-void __tsan_external_register_header(void *tag, const char *header);
-void __tsan_external_assign_tag(void *addr, void *tag);
-void __tsan_external_read(void *addr, void *caller_pc, void *tag);
-void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+void *SANITIZER_CDECL __tsan_external_register_tag(const char *object_type);
+void SANITIZER_CDECL __tsan_external_register_header(void *tag,
+ const char *header);
+void SANITIZER_CDECL __tsan_external_assign_tag(void *addr, void *tag);
+void SANITIZER_CDECL __tsan_external_read(void *addr, void *caller_pc,
+ void *tag);
+void SANITIZER_CDECL __tsan_external_write(void *addr, void *caller_pc,
+ void *tag);
// Fiber switching API.
// - TSAN context for fiber can be created by __tsan_create_fiber
@@ -150,33 +158,33 @@ void __tsan_external_write(void *addr, void *caller_pc, void *tag);
// - __tsan_switch_to_fiber should be called immediately before switch
// to fiber, such as call of swapcontext.
// - Fiber name can be set by __tsan_set_fiber_name.
-void *__tsan_get_current_fiber(void);
-void *__tsan_create_fiber(unsigned flags);
-void __tsan_destroy_fiber(void *fiber);
-void __tsan_switch_to_fiber(void *fiber, unsigned flags);
-void __tsan_set_fiber_name(void *fiber, const char *name);
+void *SANITIZER_CDECL __tsan_get_current_fiber(void);
+void *SANITIZER_CDECL __tsan_create_fiber(unsigned flags);
+void SANITIZER_CDECL __tsan_destroy_fiber(void *fiber);
+void SANITIZER_CDECL __tsan_switch_to_fiber(void *fiber, unsigned flags);
+void SANITIZER_CDECL __tsan_set_fiber_name(void *fiber, const char *name);
// Flags for __tsan_switch_to_fiber:
// Do not establish a happens-before relation between fibers
static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
// User-provided callback invoked on TSan initialization.
-void __tsan_on_initialize();
+void SANITIZER_CDECL __tsan_on_initialize();
// User-provided callback invoked on TSan shutdown.
// `failed` - Nonzero if TSan did detect issues, zero otherwise.
// Return `0` if TSan should exit as if no issues were detected. Return nonzero
// if TSan should exit as if issues were detected.
-int __tsan_on_finalize(int failed);
+int SANITIZER_CDECL __tsan_on_finalize(int failed);
// Release TSan internal memory in a best-effort manner.
-void __tsan_flush_memory();
+void SANITIZER_CDECL __tsan_flush_memory();
// User-provided default TSAN options.
-const char* __tsan_default_options(void);
+const char *SANITIZER_CDECL __tsan_default_options(void);
// User-provided default TSAN suppressions.
-const char* __tsan_default_suppressions(void);
+const char *SANITIZER_CDECL __tsan_default_suppressions(void);
/// Returns a report's description.
///
@@ -198,11 +206,10 @@ const char* __tsan_default_suppressions(void);
/// call.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_data(void *report, const char **description, int *count,
- int *stack_count, int *mop_count, int *loc_count,
- int *mutex_count, int *thread_count,
- int *unique_tid_count, void **sleep_trace,
- unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_data(
+ void *report, const char **description, int *count, int *stack_count,
+ int *mop_count, int *loc_count, int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace, unsigned long trace_size);
/// Returns information about stack traces included in the report.
///
@@ -211,8 +218,9 @@ int __tsan_get_report_data(void *report, const char **description, int *count,
/// \param trace A buffer to store the stack trace.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_stack(void *report, unsigned long idx, void **trace,
- unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_stack(void *report, unsigned long idx,
+ void **trace,
+ unsigned long trace_size);
/// Returns information about memory operations included in the report.
///
@@ -226,9 +234,10 @@ int __tsan_get_report_stack(void *report, unsigned long idx, void **trace,
/// \param trace A buffer to store the stack trace.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_mop(void *report, unsigned long idx, int *tid,
- void **addr, int *size, int *write, int *atomic,
- void **trace, unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_mop(void *report, unsigned long idx,
+ int *tid, void **addr, int *size,
+ int *write, int *atomic, void **trace,
+ unsigned long trace_size);
/// Returns information about locations included in the report.
///
@@ -244,10 +253,12 @@ int __tsan_get_report_mop(void *report, unsigned long idx, int *tid,
/// \param trace A buffer to store the stack trace.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_loc(void *report, unsigned long idx, const char **type,
- void **addr, void **start, unsigned long *size,
- int *tid, int *fd, int *suppressable, void **trace,
- unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_loc(void *report, unsigned long idx,
+ const char **type, void **addr,
+ void **start, unsigned long *size,
+ int *tid, int *fd, int *suppressable,
+ void **trace,
+ unsigned long trace_size);
/// Returns information about mutexes included in the report.
///
@@ -259,9 +270,10 @@ int __tsan_get_report_loc(void *report, unsigned long idx, const char **type,
/// \param trace A buffer to store the stack trace.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_mutex(void *report, unsigned long idx, uint64_t *mutex_id,
- void **addr, int *destroyed, void **trace,
- unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_mutex(void *report, unsigned long idx,
+ uint64_t *mutex_id, void **addr,
+ int *destroyed, void **trace,
+ unsigned long trace_size);
/// Returns information about threads included in the report.
///
@@ -275,10 +287,11 @@ int __tsan_get_report_mutex(void *report, unsigned long idx, uint64_t *mutex_id,
/// \param trace A buffer to store the stack trace.
/// \param trace_size Size in bytes of the trace buffer.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_thread(void *report, unsigned long idx, int *tid,
- uint64_t *os_id, int *running, const char **name,
- int *parent_tid, void **trace,
- unsigned long trace_size);
+int SANITIZER_CDECL __tsan_get_report_thread(void *report, unsigned long idx,
+ int *tid, uint64_t *os_id,
+ int *running, const char **name,
+ int *parent_tid, void **trace,
+ unsigned long trace_size);
/// Returns information about unique thread IDs included in the report.
///
@@ -286,17 +299,18 @@ int __tsan_get_report_thread(void *report, unsigned long idx, int *tid,
/// \param idx Index to the report's unique thread IDs.
/// \param[out] tid Unique thread ID of the report.
/// \returns Returns 1 if successful, 0 if not.
-int __tsan_get_report_unique_tid(void *report, unsigned long idx, int *tid);
+int SANITIZER_CDECL __tsan_get_report_unique_tid(void *report,
+ unsigned long idx, int *tid);
/// Returns the current report.
///
/// If TSan is currently reporting a detected issue on the current thread,
/// returns an opaque pointer to the current report. Otherwise returns NULL.
/// \returns An opaque pointer to the current report. Otherwise returns NULL.
-void *__tsan_get_current_report();
+void *SANITIZER_CDECL __tsan_get_current_report();
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_TSAN_INTERFACE_H
+#endif // SANITIZER_TSAN_INTERFACE_H
diff --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index 5e41e2256c30..de3a1c393609 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -13,6 +13,8 @@
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
+#include <sanitizer/common_interface_defs.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -21,12 +23,12 @@ typedef char __tsan_atomic8;
typedef short __tsan_atomic16;
typedef int __tsan_atomic32;
typedef long __tsan_atomic64;
-#if defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)
+#if defined(__SIZEOF_INT128__) || \
+ (__clang_major__ * 100 + __clang_minor__ >= 302)
__extension__ typedef __int128 __tsan_atomic128;
-# define __TSAN_HAS_INT128 1
+#define __TSAN_HAS_INT128 1
#else
-# define __TSAN_HAS_INT128 0
+#define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
@@ -40,182 +42,187 @@ typedef enum {
__tsan_memory_order_seq_cst
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
+ const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
#endif
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v,
+ __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v,
+ __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
-__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // TSAN_INTERFACE_ATOMIC_H
+#endif // TSAN_INTERFACE_ATOMIC_H
diff --git a/compiler-rt/include/sanitizer/ubsan_interface.h b/compiler-rt/include/sanitizer/ubsan_interface.h
index 59fc6c3c184c..435eb1ae332c 100644
--- a/compiler-rt/include/sanitizer/ubsan_interface.h
+++ b/compiler-rt/include/sanitizer/ubsan_interface.h
@@ -23,10 +23,10 @@ extern "C" {
/// <c>verbosity=1:halt_on_error=0</c>).
///
/// \returns Default options string.
-const char* __ubsan_default_options(void);
+const char *SANITIZER_CDECL __ubsan_default_options(void);
#ifdef __cplusplus
-} // extern "C"
+} // extern "C"
#endif
-#endif // SANITIZER_UBSAN_INTERFACE_H
+#endif // SANITIZER_UBSAN_INTERFACE_H
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 15eefcb96063..22dcf6132707 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -527,7 +527,7 @@ struct Allocator {
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(!AsanInited()))
AsanInitFromRtl();
if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h
index ffeedadf0772..c3c4fae85b12 100644
--- a/compiler-rt/lib/asan/asan_allocator.h
+++ b/compiler-rt/lib/asan/asan_allocator.h
@@ -120,27 +120,92 @@ struct AsanMapUnmapCallback {
#if SANITIZER_CAN_USE_ALLOCATOR64
# if SANITIZER_FUCHSIA
+// This is a sentinel indicating we do not want the primary allocator arena to
+// be placed at a fixed address. It will be anonymously mmap'd.
const uptr kAllocatorSpace = ~(uptr)0;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# if SANITIZER_RISCV64
+
+// These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +
+// Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,
+// libc, and scudo tests with this configuration.
+//
+// TODO: This is specifically tuned for Sv39. 48/57 will likely require other
+// tunings, or possibly use the same tunings Fuchsia uses for other archs. The
+// VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is
+// supported, we'd need a way of dynamically checking what the VMA size is and
+// determining optimal configuration.
+
+// This indicates the total amount of space dedicated for the primary allocator
+// during initialization. This is roughly proportional to the size set by the
+// FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could
+// lead to some failures in sanitized bringup tests where we can't allocate new
+// vmars because there wouldn't be enough contiguous space. We could try 2^34 if
+// we re-evaluate the SizeClassMap settings.
+const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
+
+// This is roughly equivalent to the configuration for the VeryDenseSizeClassMap
+// but has fewer size classes (ideally at most 32). Fewer class sizes means the
+// region size for each class is larger, thus less chances of running out of
+// space for each region. The main differences are the MidSizeLog (which is
+// smaller) and the MaxSizeLog (which is larger).
+//
+// - The MaxSizeLog is higher to allow some of the largest allocations I've
+// observed to be placed in the primary allocator's arena as opposed to being
+// mmap'd by the secondary allocator. This helps reduce fragmentation from
+// large classes. A huge example of this the scudo allocator tests (and its
+// testing infrastructure) which malloc's/new's objects on the order of
+// hundreds of kilobytes which normally would not be in the primary allocator
+// arena with the default VeryDenseSizeClassMap.
+// - The MidSizeLog is reduced to help shrink the number of size classes and
+// increase region size. Without this, we'd see ASan complain many times about
+// a region running out of available space.
+//
+// This differs a bit from the fuchsia config in scudo, mainly from the NumBits,
+// MaxSizeLog, and NumCachedHintT. This should place the number of size classes
+// for scudo at 45 and some large objects allocated by this config would be
+// placed in the arena whereas scudo would mmap them. The asan allocator needs
+// to have a number of classes that are a power of 2 for various internal things
+// to work, so we can't match the scudo settings to a tee. The sanitizer
+// allocator is slightly slower than scudo's but this is enough to get
+// memory-intensive scudo tests to run with asan instrumentation.
+typedef SizeClassMap</*kNumBits=*/2,
+ /*kMinSizeLog=*/5,
+ /*kMidSizeLog=*/8,
+ /*kMaxSizeLog=*/18,
+ /*kNumCachedHintT=*/8,
+ /*kMaxBytesCachedLog=*/10>
+ SizeClassMap;
+static_assert(SizeClassMap::kNumClassesRounded <= 32,
+ "The above tunings were specifically selected to ensure there "
+ "would be at most 32 size classes. This restriction could be "
+ "loosened to 64 size classes if we can find a configuration of "
+ "allocator size and SizeClassMap tunings that allows us to "
+ "reliably run all bringup tests in a sanitized environment.");
+
+# else
+// These are the default allocator tunings for non-RISCV environments where the
+// VMA is usually 48 bits and we have lots of space.
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif defined(__powerpc64__)
+# endif
+# elif defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif defined(__aarch64__) && SANITIZER_ANDROID
+# elif defined(__aarch64__) && SANITIZER_ANDROID
// Android needs to support 39, 42 and 48 bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryDenseSizeClassMap SizeClassMap;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif SANITIZER_WINDOWS
+# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
diff --git a/compiler-rt/lib/asan/asan_descriptions.cpp b/compiler-rt/lib/asan/asan_descriptions.cpp
index fbe92572b55b..ef6f3e0a096f 100644
--- a/compiler-rt/lib/asan/asan_descriptions.cpp
+++ b/compiler-rt/lib/asan/asan_descriptions.cpp
@@ -49,14 +49,14 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
InternalScopedString str;
- str.append("Thread %s", AsanThreadIdAndName(context).c_str());
+ str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
+ str.Append(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
- str.append(" created by %s here:\n",
- AsanThreadIdAndName(context->parent_tid).c_str());
+ str.AppendF(" created by %s here:\n",
+ AsanThreadIdAndName(context->parent_tid).c_str());
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
@@ -126,29 +126,29 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
InternalScopedString str;
- str.append("%s", d.Location());
+ str.Append(d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
- str.append("%p is located %zd bytes before",
- (void *)descr.bad_addr, descr.offset);
+ str.AppendF("%p is located %zd bytes before", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeRight:
- str.append("%p is located %zd bytes after",
- (void *)descr.bad_addr, descr.offset);
+ str.AppendF("%p is located %zd bytes after", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeInside:
- str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
- descr.offset);
+ str.AppendF("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeUnknown:
- str.append(
+ str.AppendF(
"%p is located somewhere around (this is AddressSanitizer bug!)",
(void *)descr.bad_addr);
}
- str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
- (void *)descr.chunk_begin,
- (void *)(descr.chunk_begin + descr.chunk_size));
- str.append("%s", d.Default());
+ str.AppendF(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.Append(d.Default());
Printf("%s", str.data());
}
@@ -243,24 +243,24 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
pos_descr = "underflows";
}
InternalScopedString str;
- str.append(" [%zd, %zd)", var.beg, var_end);
+ str.AppendF(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
- str.append(" '");
+ str.AppendF(" '");
for (uptr i = 0; i < var.name_len; ++i) {
- str.append("%c", var.name_pos[i]);
+ str.AppendF("%c", var.name_pos[i]);
}
- str.append("'");
+ str.AppendF("'");
if (var.line > 0) {
- str.append(" (line %zd)", var.line);
+ str.AppendF(" (line %zd)", var.line);
}
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
- str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.Default());
+ str.AppendF("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.Default());
} else {
- str.append("\n");
+ str.AppendF("\n");
}
Printf("%s", str.data());
}
@@ -277,23 +277,23 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
InternalScopedString str;
Decorator d;
- str.append("%s", d.Location());
+ str.Append(d.Location());
if (addr < g.beg) {
- str.append("%p is located %zd bytes before", (void *)addr,
- g.beg - addr);
+ str.AppendF("%p is located %zd bytes before", (void *)addr, g.beg - addr);
} else if (addr + access_size > g.beg + g.size) {
if (addr < g.beg + g.size) addr = g.beg + g.size;
- str.append("%p is located %zd bytes after", (void *)addr,
- addr - (g.beg + g.size));
+ str.AppendF("%p is located %zd bytes after", (void *)addr,
+ addr - (g.beg + g.size));
} else {
// Can it happen?
- str.append("%p is located %zd bytes inside of", (void *)addr, addr - g.beg);
+ str.AppendF("%p is located %zd bytes inside of", (void *)addr,
+ addr - g.beg);
}
- str.append(" global variable '%s' defined in '",
- MaybeDemangleGlobalName(g.name));
- PrintGlobalLocation(&str, g);
- str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.Default());
+ str.AppendF(" global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g, /*print_module_name=*/false);
+ str.AppendF("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.Append(d.Default());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
diff --git a/compiler-rt/lib/asan/asan_errors.cpp b/compiler-rt/lib/asan/asan_errors.cpp
index cc8dc26f5b75..3f2d13e31464 100644
--- a/compiler-rt/lib/asan/asan_errors.cpp
+++ b/compiler-rt/lib/asan/asan_errors.cpp
@@ -362,8 +362,8 @@ void ErrorODRViolation::Print() {
Printf("%s", d.Default());
InternalScopedString g1_loc;
InternalScopedString g2_loc;
- PrintGlobalLocation(&g1_loc, global1);
- PrintGlobalLocation(&g2_loc, global2);
+ PrintGlobalLocation(&g1_loc, global1, /*print_module_name=*/true);
+ PrintGlobalLocation(&g2_loc, global2, /*print_module_name=*/true);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
MaybeDemangleGlobalName(global1.name), g1_loc.data());
Printf(" [2] size=%zd '%s' %s\n", global2.size,
@@ -379,8 +379,8 @@ void ErrorODRViolation::Print() {
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg;
- error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
- MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ error_msg.AppendF("%s: global '%s' at %s", scariness.GetDescription(),
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
@@ -517,15 +517,15 @@ static void PrintShadowByte(InternalScopedString *str, const char *before,
}
static void PrintLegend(InternalScopedString *str) {
- str->append(
+ str->AppendF(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
(int)ASAN_SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
- str->append(" Partially addressable: ");
+ str->AppendF(" Partially addressable: ");
for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++)
PrintShadowByte(str, "", i, " ");
- str->append("\n");
+ str->AppendF("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
@@ -559,8 +559,8 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
if (before)
- str->append("%s%p:", before,
- (void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
+ str->AppendF("%s%p:", before,
+ (void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before =
@@ -568,7 +568,7 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
const char *after = p == guilty ? "]" : "";
PrintShadowByte(str, before, *p, after);
}
- str->append("\n");
+ str->AppendF("\n");
}
static void PrintShadowMemoryForAddress(uptr addr) {
@@ -577,7 +577,7 @@ static void PrintShadowMemoryForAddress(uptr addr) {
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str;
- str.append("Shadow bytes around the buggy address:\n");
+ str.AppendF("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
// Skip rows that would be outside the shadow range. This can happen when
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index 74a039b65798..7443ff166984 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -68,8 +68,8 @@ void FakeStack::Destroy(int tid) {
if (Verbosity() >= 2) {
InternalScopedString str;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
- str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
- NumberOfFrames(stack_size_log(), class_id));
+ str.AppendF("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
@@ -133,6 +133,12 @@ void FakeStack::HandleNoReturn() {
needs_gc_ = true;
}
+// Hack: The statement below is not true if we take into account sigaltstack or
+// makecontext. It should be possible to make GC to discard wrong stack frame if
+// we use these tools. For now, let's support the simplest case and allow GC to
+// discard only frames from the default stack, assuming there is no buffer on
+// the stack which is used for makecontext or sigaltstack.
+//
// When throw, longjmp or some such happens we don't call OnFree() and
// as the result may leak one or more fake frames, but the good news is that
// we are notified about all such events by HandleNoReturn().
@@ -140,6 +146,14 @@ void FakeStack::HandleNoReturn() {
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
+ AsanThread *curr_thread = GetCurrentThread();
+ if (!curr_thread)
+ return; // Try again when we have a thread.
+ auto top = curr_thread->stack_top();
+ auto bottom = curr_thread->stack_bottom();
+ if (real_stack < bottom || real_stack > top)
+ return; // Not the default stack.
+
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
@@ -147,8 +161,12 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
- if (ff->real_stack < real_stack) {
+ // GC only on the default stack.
+ if (bottom < ff->real_stack && ff->real_stack < real_stack) {
flags[i] = 0;
+ // Poison the frame, so the any access will be reported as UAR.
+ SetShadow(reinterpret_cast<uptr>(ff), BytesInSizeClass(class_id),
+ class_id, kMagic8);
}
}
}
@@ -205,11 +223,12 @@ static FakeStack *GetFakeStackFastAlways() {
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
- if (!fs) return 0;
- uptr local_stack;
- uptr real_stack = reinterpret_cast<uptr>(&local_stack);
- FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
- if (!ff) return 0; // Out of fake stack.
+ if (!fs)
+ return 0;
+ FakeFrame *ff =
+ fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
+ if (!ff)
+ return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
@@ -219,9 +238,8 @@ static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFastAlways();
if (!fs)
return 0;
- uptr local_stack;
- uptr real_stack = reinterpret_cast<uptr>(&local_stack);
- FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
+ FakeFrame *ff =
+ fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
diff --git a/compiler-rt/lib/asan/asan_globals.cpp b/compiler-rt/lib/asan/asan_globals.cpp
index 4d391cb2a885..6ac64c4b776b 100644
--- a/compiler-rt/lib/asan/asan_globals.cpp
+++ b/compiler-rt/lib/asan/asan_globals.cpp
@@ -36,7 +36,6 @@ struct ListOfGlobals {
};
static Mutex mu_for_globals;
-static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals;
static const int kDynamicInitGlobalsInitialCapacity = 512;
@@ -81,18 +80,19 @@ static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
}
static void ReportGlobal(const Global &g, const char *prefix) {
+ DataInfo info;
+ bool symbolized = Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
Report(
- "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu "
+ "%s Global[%p]: beg=%p size=%zu/%zu name=%s source=%s module=%s "
+ "dyn_init=%zu "
"odr_indicator=%p\n",
prefix, (void *)&g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
- g.module_name, g.has_dynamic_init, (void *)g.odr_indicator);
+ g.module_name, (symbolized ? info.module : "?"), g.has_dynamic_init,
+ (void *)g.odr_indicator);
- DataInfo info;
- Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
- if (info.line != 0) {
+ if (symbolized && info.line != 0) {
Report(" location: name=%s, %d\n", info.file, static_cast<int>(info.line));
- }
- else if (g.gcc_location != 0) {
+ } else if (g.gcc_location != 0) {
// Fallback to Global::gcc_location
Report(" location: name=%s, %d\n", g.gcc_location->filename, g.gcc_location->line_no);
}
@@ -199,7 +199,7 @@ static inline bool UseODRIndicator(const Global *g) {
// This function may be called more than once for every global
// so we store the globals in a map.
static void RegisterGlobal(const Global *g) {
- CHECK(asan_inited);
+ CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
@@ -225,13 +225,13 @@ static void RegisterGlobal(const Global *g) {
}
if (CanPoisonMemory())
PoisonRedZones(*g);
- ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
+ ListOfGlobals *l = new (GetGlobalLowLevelAllocator()) ListOfGlobals;
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
if (!dynamic_init_globals) {
- dynamic_init_globals = new (allocator_for_globals) VectorOfGlobals;
+ dynamic_init_globals = new (GetGlobalLowLevelAllocator()) VectorOfGlobals;
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };
@@ -240,7 +240,7 @@ static void RegisterGlobal(const Global *g) {
}
static void UnregisterGlobal(const Global *g) {
- CHECK(asan_inited);
+ CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals);
@@ -296,24 +296,28 @@ void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char *)(g.beg + g.size - 1) != '\0') return;
- str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
- (char *)g.beg);
+ str->AppendF(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
}
-void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
+ bool print_module_name) {
DataInfo info;
- Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
-
- if (info.line != 0) {
- str->append("%s:%d", info.file, static_cast<int>(info.line));
+ if (Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info) && info.line != 0) {
+ str->AppendF("%s:%d", info.file, static_cast<int>(info.line));
} else if (g.gcc_location != 0) {
// Fallback to Global::gcc_location
- str->append("%s", g.gcc_location->filename ? g.gcc_location->filename : g.module_name);
- if (g.gcc_location->line_no) str->append(":%d", g.gcc_location->line_no);
- if (g.gcc_location->column_no) str->append(":%d", g.gcc_location->column_no);
+ str->AppendF("%s", g.gcc_location->filename ? g.gcc_location->filename
+ : g.module_name);
+ if (g.gcc_location->line_no)
+ str->AppendF(":%d", g.gcc_location->line_no);
+ if (g.gcc_location->column_no)
+ str->AppendF(":%d", g.gcc_location->column_no);
} else {
- str->append("%s", g.module_name);
+ str->AppendF("%s", g.module_name);
}
+ if (print_module_name && info.module)
+ str->AppendF(" in %s", info.module);
}
} // namespace __asan
@@ -367,7 +371,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
Lock lock(&mu_for_globals);
if (!global_registration_site_vector) {
global_registration_site_vector =
- new (allocator_for_globals) GlobalRegistrationSiteVector;
+ new (GetGlobalLowLevelAllocator()) GlobalRegistrationSiteVector;
global_registration_site_vector->reserve(128);
}
GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
@@ -429,7 +433,7 @@ void __asan_before_dynamic_init(const char *module_name) {
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(module_name);
- CHECK(asan_inited);
+ CHECK(AsanInited());
Lock lock(&mu_for_globals);
if (flags()->report_globals >= 3)
Printf("DynInitPoison module: %s\n", module_name);
@@ -453,7 +457,7 @@ void __asan_after_dynamic_init() {
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
- CHECK(asan_inited);
+ CHECK(AsanInited());
Lock lock(&mu_for_globals);
// FIXME: Optionally report that we're unpoisoning globals from a module.
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp
index b9b82564b330..4de2fa356374 100644
--- a/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -96,15 +96,17 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- ASAN_INTERCEPTOR_ENTER(ctx, func); \
- do { \
- if (asan_init_is_running) \
- return REAL(func)(__VA_ARGS__); \
- if (SANITIZER_APPLE && UNLIKELY(!asan_inited)) \
- return REAL(func)(__VA_ARGS__); \
- ENSURE_ASAN_INITED(); \
- } while (false)
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ do { \
+ if constexpr (SANITIZER_APPLE) { \
+ if (UNLIKELY(!AsanInited())) \
+ return REAL(func)(__VA_ARGS__); \
+ } else { \
+ if (!TryAsanInitFromRtl()) \
+ return REAL(func)(__VA_ARGS__); \
+ } \
+ } while (false)
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
@@ -138,7 +140,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
-# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!AsanInited())
# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
if (AsanThread *t = GetCurrentThread()) { \
*begin = t->tls_begin(); \
@@ -194,10 +196,13 @@ static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
__lsan::ScopedInterceptorDisabler disabler
#endif
-#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_ASAN_INITED()
+# define SIGNAL_INTERCEPTOR_ENTER() \
+ do { \
+ AsanInitFromRtl(); \
+ } while (false)
-#include "sanitizer_common/sanitizer_common_interceptors.inc"
-#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
+# include "sanitizer_common/sanitizer_signal_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
// for them.
@@ -222,9 +227,17 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
SetCurrentThread(t);
auto self = GetThreadSelf();
auto args = asanThreadArgRetval().GetArgs(self);
- thread_return_t retval = t->ThreadStart(GetTid());
+ t->ThreadStart(GetTid());
+
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ __sanitizer_sigset_t sigset;
+ t->GetStartData(sigset);
+ SetSigProcMask(&sigset, nullptr);
+# endif
+
+ thread_return_t retval = (*args.routine)(args.arg_retval);
asanThreadArgRetval().Finish(self, retval);
- CHECK_EQ(args.arg_retval, t->get_arg());
return retval;
}
@@ -242,8 +255,14 @@ INTERCEPTOR(int, pthread_create, void *thread, void *attr,
}();
u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+
+ __sanitizer_sigset_t sigset = {};
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ ScopedBlockSignals block(&sigset);
+# endif
+
+ AsanThread *t = AsanThread::Create(sigset, current_tid, &stack, detached);
int result;
{
@@ -286,9 +305,9 @@ INTERCEPTOR(int, pthread_detach, void *thread) {
return result;
}
-INTERCEPTOR(int, pthread_exit, void *retval) {
+INTERCEPTOR(void, pthread_exit, void *retval) {
asanThreadArgRetval().Finish(GetThreadSelf(), retval);
- return REAL(pthread_exit)(retval);
+ REAL(pthread_exit)(retval);
}
# if ASAN_INTERCEPT_TRYJOIN
@@ -480,7 +499,7 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char *, strcat, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcat);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_length = internal_strlen(from);
ASAN_READ_RANGE(ctx, from, from_length + 1);
@@ -501,7 +520,7 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncat);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
@@ -520,16 +539,16 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char *, strcpy, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcpy);
-#if SANITIZER_APPLE
- if (UNLIKELY(!asan_inited))
- return REAL(strcpy)(to, from);
-#endif
- // strcpy is called from malloc_default_purgeable_zone()
- // in __asan::ReplaceSystemAlloc() on Mac.
- if (asan_init_is_running) {
- return REAL(strcpy)(to, from);
+ if constexpr (SANITIZER_APPLE) {
+ // strcpy is called from malloc_default_purgeable_zone()
+ // in __asan::ReplaceSystemAlloc() on Mac.
+ if (UNLIKELY(!AsanInited()))
+ return REAL(strcpy)(to, from);
+ } else {
+ if (!TryAsanInitFromRtl())
+ return REAL(strcpy)(to, from);
}
- ENSURE_ASAN_INITED();
+
if (flags()->replace_str) {
uptr from_size = internal_strlen(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
@@ -542,8 +561,8 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
- if (UNLIKELY(!asan_inited)) return internal_strdup(s);
- ENSURE_ASAN_INITED();
+ if (UNLIKELY(!TryAsanInitFromRtl()))
+ return internal_strdup(s);
uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
@@ -560,8 +579,8 @@ INTERCEPTOR(char*, strdup, const char *s) {
INTERCEPTOR(char*, __strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
- if (UNLIKELY(!asan_inited)) return internal_strdup(s);
- ENSURE_ASAN_INITED();
+ if (UNLIKELY(!TryAsanInitFromRtl()))
+ return internal_strdup(s);
uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
@@ -578,7 +597,7 @@ INTERCEPTOR(char*, __strdup, const char *s) {
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
@@ -588,26 +607,40 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
return REAL(strncpy)(to, from, size);
}
-INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strtol);
- ENSURE_ASAN_INITED();
- if (!flags()->replace_str) {
- return REAL(strtol)(nptr, endptr, base);
- }
+template <typename Fn>
+static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
+ char **endptr, int base)
+ -> decltype(real(nullptr, nullptr, 0)) {
+ if (!flags()->replace_str)
+ return real(nptr, endptr, base);
char *real_endptr;
- long result = REAL(strtol)(nptr, &real_endptr, base);
+ auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return result;
+ return res;
}
+# define INTERCEPTOR_STRTO_BASE(ret_type, func) \
+ INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
+ void *ctx; \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ AsanInitFromRtl(); \
+ return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \
+ }
+
+INTERCEPTOR_STRTO_BASE(long, strtol)
+INTERCEPTOR_STRTO_BASE(long long, strtoll)
+
+# if SANITIZER_GLIBC
+INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol)
+INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
+# endif
+
INTERCEPTOR(int, atoi, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
-#if SANITIZER_APPLE
- if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
-#endif
- ENSURE_ASAN_INITED();
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(atoi)(nptr);
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoi)(nptr);
}
@@ -625,10 +658,9 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atol);
-#if SANITIZER_APPLE
- if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
-#endif
- ENSURE_ASAN_INITED();
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(atol)(nptr);
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atol)(nptr);
}
@@ -639,24 +671,10 @@ INTERCEPTOR(long, atol, const char *nptr) {
return result;
}
-#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
-INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
- ENSURE_ASAN_INITED();
- if (!flags()->replace_str) {
- return REAL(strtoll)(nptr, endptr, base);
- }
- char *real_endptr;
- long long result = REAL(strtoll)(nptr, &real_endptr, base);
- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return result;
-}
-
INTERCEPTOR(long long, atoll, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoll)(nptr);
}
@@ -666,7 +684,6 @@ INTERCEPTOR(long long, atoll, const char *nptr) {
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
-#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
#if ASAN_INTERCEPT___CXA_ATEXIT || ASAN_INTERCEPT_ATEXIT
static void AtCxaAtexit(void *unused) {
@@ -678,11 +695,10 @@ static void AtCxaAtexit(void *unused) {
#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
-#if SANITIZER_APPLE
- if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
-#endif
- ENSURE_ASAN_INITED();
-#if CAN_SANITIZE_LEAKS
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(__cxa_atexit)(func, arg, dso_handle);
+ AsanInitFromRtl();
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@@ -693,8 +709,8 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#if ASAN_INTERCEPT_ATEXIT
INTERCEPTOR(int, atexit, void (*func)()) {
- ENSURE_ASAN_INITED();
-#if CAN_SANITIZE_LEAKS
+ AsanInitFromRtl();
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
// Avoid calling real atexit as it is unreachable on at least on Linux.
@@ -751,11 +767,13 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(atoi);
ASAN_INTERCEPT_FUNC(atol);
- ASAN_INTERCEPT_FUNC(strtol);
-#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
ASAN_INTERCEPT_FUNC(atoll);
+ ASAN_INTERCEPT_FUNC(strtol);
ASAN_INTERCEPT_FUNC(strtoll);
-#endif
+# if SANITIZER_GLIBC
+ ASAN_INTERCEPT_FUNC(__isoc23_strtol);
+ ASAN_INTERCEPT_FUNC(__isoc23_strtoll);
+# endif
// Intecept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
diff --git a/compiler-rt/lib/asan/asan_interceptors.h b/compiler-rt/lib/asan/asan_interceptors.h
index 268096fea5e7..826b45f5ada8 100644
--- a/compiler-rt/lib/asan/asan_interceptors.h
+++ b/compiler-rt/lib/asan/asan_interceptors.h
@@ -24,14 +24,6 @@ namespace __asan {
void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
-#define ENSURE_ASAN_INITED() \
- do { \
- CHECK(!asan_init_is_running); \
- if (UNLIKELY(!asan_inited)) { \
- AsanInitFromRtl(); \
- } \
- } while (0)
-
} // namespace __asan
// There is no general interception at all on Fuchsia.
@@ -42,12 +34,10 @@ void InitializePlatformInterceptors();
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
#else
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
index 4e4ea7191d32..bdf328f89206 100644
--- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
+++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
@@ -33,7 +33,7 @@ using namespace __asan;
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
- } else if (UNLIKELY(!asan_inited)) { \
+ } else if (UNLIKELY(!AsanInited())) { \
return internal_memcpy(to, from, size); \
} \
return REAL(memcpy)(to, from, size); \
@@ -44,7 +44,7 @@ using namespace __asan;
do { \
if (LIKELY(replace_intrin_cached)) { \
ASAN_WRITE_RANGE(ctx, block, size); \
- } else if (UNLIKELY(!asan_inited)) { \
+ } else if (UNLIKELY(!AsanInited())) { \
return internal_memset(block, c, size); \
} \
return REAL(memset)(block, c, size); \
diff --git a/compiler-rt/lib/asan/asan_internal.h b/compiler-rt/lib/asan/asan_internal.h
index a5348e35b297..5b97e77882cd 100644
--- a/compiler-rt/lib/asan/asan_internal.h
+++ b/compiler-rt/lib/asan/asan_internal.h
@@ -60,6 +60,7 @@ class AsanThread;
using __sanitizer::StackTrace;
void AsanInitFromRtl();
+bool TryAsanInitFromRtl();
// asan_win.cpp
void InitializePlatformExceptionHandlers();
@@ -130,9 +131,7 @@ void InstallAtExitCheckLeaks();
if (&__asan_on_error) \
__asan_on_error()
-extern int asan_inited;
-// Used to avoid infinite recursion in __asan_init().
-extern bool asan_init_is_running;
+bool AsanInited();
extern bool replace_intrin_cached;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error
diff --git a/compiler-rt/lib/asan/asan_lock.h b/compiler-rt/lib/asan/asan_lock.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/compiler-rt/lib/asan/asan_lock.h
+++ /dev/null
diff --git a/compiler-rt/lib/asan/asan_mac.cpp b/compiler-rt/lib/asan/asan_mac.cpp
index 0c0750061447..1b0e9b3fe006 100644
--- a/compiler-rt/lib/asan/asan_mac.cpp
+++ b/compiler-rt/lib/asan/asan_mac.cpp
@@ -139,9 +139,11 @@ typedef void (*dispatch_mach_handler_function_t)(void *context,
dispatch_mach_reason reason,
dispatch_mach_msg_t message,
mach_error_t error);
+# if !defined(MISSING_BLOCKS_SUPPORT)
typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason,
dispatch_mach_msg_t message,
mach_error_t error);
+# endif
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {
@@ -154,8 +156,7 @@ ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = GetCurrentThread();
if (!t) {
- t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
- parent_tid, stack, /* detached */ true);
+ t = AsanThread::Create(parent_tid, stack, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker,
nullptr);
diff --git a/compiler-rt/lib/asan/asan_malloc_linux.cpp b/compiler-rt/lib/asan/asan_malloc_linux.cpp
index bab80b96f584..d426b923c94e 100644
--- a/compiler-rt/lib/asan/asan_malloc_linux.cpp
+++ b/compiler-rt/lib/asan/asan_malloc_linux.cpp
@@ -31,7 +31,7 @@
using namespace __asan;
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
- static bool UseImpl() { return asan_init_is_running; }
+ static bool UseImpl() { return !TryAsanInitFromRtl(); }
static void OnAllocate(const void *ptr, uptr size) {
# if CAN_SANITIZE_LEAKS
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
@@ -65,7 +65,6 @@ INTERCEPTOR(void, cfree, void *ptr) {
INTERCEPTOR(void*, malloc, uptr size) {
if (DlsymAlloc::Use())
return DlsymAlloc::Allocate(size);
- ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
@@ -73,7 +72,6 @@ INTERCEPTOR(void*, malloc, uptr size) {
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (DlsymAlloc::Use())
return DlsymAlloc::Callocate(nmemb, size);
- ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
@@ -81,14 +79,13 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
return DlsymAlloc::Realloc(ptr, size);
- ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
#if SANITIZER_INTERCEPT_REALLOCARRAY
INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) {
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
GET_STACK_TRACE_MALLOC;
return asan_reallocarray(ptr, nmemb, size, &stack);
}
diff --git a/compiler-rt/lib/asan/asan_malloc_mac.cpp b/compiler-rt/lib/asan/asan_malloc_mac.cpp
index 924d1f12640a..f25d7e190153 100644
--- a/compiler-rt/lib/asan/asan_malloc_mac.cpp
+++ b/compiler-rt/lib/asan/asan_malloc_mac.cpp
@@ -22,46 +22,48 @@
using namespace __asan;
#define COMMON_MALLOC_ZONE_NAME "asan"
-#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
-#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited
-#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
-#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
-#define COMMON_MALLOC_MEMALIGN(alignment, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
-#define COMMON_MALLOC_MALLOC(size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_malloc(size, &stack)
-#define COMMON_MALLOC_REALLOC(ptr, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_realloc(ptr, size, &stack);
-#define COMMON_MALLOC_CALLOC(count, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_calloc(count, size, &stack);
-#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
- GET_STACK_TRACE_MALLOC; \
- int res = asan_posix_memalign(memptr, alignment, size, &stack);
-#define COMMON_MALLOC_VALLOC(size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
-#define COMMON_MALLOC_FREE(ptr) \
- GET_STACK_TRACE_FREE; \
- asan_free(ptr, &stack, FROM_MALLOC);
-#define COMMON_MALLOC_SIZE(ptr) \
- uptr size = asan_mz_size(ptr);
-#define COMMON_MALLOC_FILL_STATS(zone, stats) \
- AsanMallocStats malloc_stats; \
- FillMallocStatistics(&malloc_stats); \
- CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
- internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
-#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
- GET_STACK_TRACE_FREE; \
- ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
-#define COMMON_MALLOC_NAMESPACE __asan
-#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
-#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
+# define COMMON_MALLOC_ENTER() \
+ do { \
+ AsanInitFromRtl(); \
+ } while (false)
+# define COMMON_MALLOC_SANITIZER_INITIALIZED AsanInited()
+# define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
+# define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
+# define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
+# define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_malloc(size, &stack)
+# define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_realloc(ptr, size, &stack);
+# define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_calloc(count, size, &stack);
+# define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = asan_posix_memalign(memptr, alignment, size, &stack);
+# define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+# define COMMON_MALLOC_FREE(ptr) \
+ GET_STACK_TRACE_FREE; \
+ asan_free(ptr, &stack, FROM_MALLOC);
+# define COMMON_MALLOC_SIZE(ptr) uptr size = asan_mz_size(ptr);
+# define COMMON_MALLOC_FILL_STATS(zone, stats) \
+ AsanMallocStats malloc_stats; \
+ FillMallocStatistics(&malloc_stats); \
+ CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
+ internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
+# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ GET_STACK_TRACE_FREE; \
+ ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+# define COMMON_MALLOC_NAMESPACE __asan
+# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
-#include "sanitizer_common/sanitizer_malloc_mac.inc"
+# include "sanitizer_common/sanitizer_malloc_mac.inc"
namespace COMMON_MALLOC_NAMESPACE {
diff --git a/compiler-rt/lib/asan/asan_malloc_win.cpp b/compiler-rt/lib/asan/asan_malloc_win.cpp
index ff78d7646a90..7e1d04c36dd5 100644
--- a/compiler-rt/lib/asan/asan_malloc_win.cpp
+++ b/compiler-rt/lib/asan/asan_malloc_win.cpp
@@ -211,7 +211,7 @@ INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags,
// interception takes place, so if it is not owned by the RTL heap we can
// pass it to the ASAN heap for inspection.
if (flags()->windows_hook_rtl_allocators) {
- if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem))
+ if (!AsanInited() || OWNED_BY_RTL(hHeap, lpMem))
return REAL(HeapSize)(hHeap, dwFlags, lpMem);
} else {
CHECK(dwFlags == 0 && "unsupported heap flags");
@@ -226,7 +226,7 @@ INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
// If the ASAN runtime is not initialized, or we encounter an unsupported
// flag, fall back to the original allocator.
if (flags()->windows_hook_rtl_allocators) {
- if (UNLIKELY(!asan_inited ||
+ if (UNLIKELY(!AsanInited() ||
(dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes);
}
@@ -297,7 +297,7 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc,
// If this heap block which was allocated before the ASAN
// runtime came up, use the real HeapFree function.
- if (UNLIKELY(!asan_inited)) {
+ if (UNLIKELY(!AsanInited())) {
return reallocFunc(hHeap, dwFlags, lpMem, dwBytes);
}
bool only_asan_supported_flags =
@@ -420,7 +420,7 @@ size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress);
INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags,
void* BaseAddress) {
if (!flags()->windows_hook_rtl_allocators ||
- UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
+ UNLIKELY(!AsanInited() || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress);
}
GET_CURRENT_PC_BP_SP;
@@ -448,7 +448,7 @@ INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags,
// If the ASAN runtime is not initialized, or we encounter an unsupported
// flag, fall back to the original allocator.
if (!flags()->windows_hook_rtl_allocators ||
- UNLIKELY(!asan_inited ||
+ UNLIKELY(!AsanInited() ||
(Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size);
}
diff --git a/compiler-rt/lib/asan/asan_poisoning.cpp b/compiler-rt/lib/asan/asan_poisoning.cpp
index e99b91d9c0a7..746ad61813c6 100644
--- a/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -160,10 +160,6 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
return;
}
CHECK_LT(beg.chunk, end.chunk);
- if (beg.offset > 0) {
- *beg.chunk = 0;
- beg.chunk++;
- }
REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
if (end.offset > 0 && end.value != 0) {
*end.chunk = Max(end.value, end.offset);
@@ -450,8 +446,11 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
// DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
- if (a + granularity <= d1)
+ //
+ // NOTE: curly brackets for the "if" below to silence a MSVC warning.
+ if (a + granularity <= d1) {
DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
+ }
// if (d2 + granularity <= c && c <= end)
// DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
// kAsanContiguousContainerOOBMagic);
diff --git a/compiler-rt/lib/asan/asan_posix.cpp b/compiler-rt/lib/asan/asan_posix.cpp
index 765f4a26cd7a..e1f66641617c 100644
--- a/compiler-rt/lib/asan/asan_posix.cpp
+++ b/compiler-rt/lib/asan/asan_posix.cpp
@@ -138,6 +138,12 @@ void PlatformTSDDtor(void *tsd) {
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ // After this point it's unsafe to execute signal handlers which may be
+ // instrumented. It's probably not just a Linux issue.
+ BlockSignals();
+# endif
AsanThread::TSDDtor(tsd);
}
#endif
diff --git a/compiler-rt/lib/asan/asan_report.cpp b/compiler-rt/lib/asan/asan_report.cpp
index f2c04342e778..7603e8131154 100644
--- a/compiler-rt/lib/asan/asan_report.cpp
+++ b/compiler-rt/lib/asan/asan_report.cpp
@@ -60,9 +60,9 @@ void AppendToErrorMessageBuffer(const char *buffer) {
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after) {
Decorator d;
- str->append("%s%s%x%x%s%s", before,
- in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
- byte & 15, d.Default(), after);
+ str->AppendF("%s%s%x%x%s%s", before,
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
+ byte & 15, d.Default(), after);
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
diff --git a/compiler-rt/lib/asan/asan_report.h b/compiler-rt/lib/asan/asan_report.h
index 248e30dd42bd..3540b3b4b1bf 100644
--- a/compiler-rt/lib/asan/asan_report.h
+++ b/compiler-rt/lib/asan/asan_report.h
@@ -35,7 +35,8 @@ int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
const char *MaybeDemangleGlobalName(const char *name);
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
-void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
+ bool print_module_name);
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after = "\n");
diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp
index 853083182b48..04ecd20821fa 100644
--- a/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/compiler-rt/lib/asan/asan_rtl.cpp
@@ -71,8 +71,17 @@ static void CheckUnwind() {
}
// -------------------------- Globals --------------------- {{{1
-int asan_inited;
-bool asan_init_is_running;
+static int asan_inited = 0;
+static int asan_init_is_running = 0;
+
+static void SetAsanInited() { asan_inited = 1; }
+
+static void SetAsanInitIsRunning(u32 val) { asan_init_is_running = val; }
+
+bool AsanInited() { return asan_inited == 1; }
+
+static bool AsanInitIsRunning() { return asan_init_is_running == 1; }
+
bool replace_intrin_cached;
#if !ASAN_FIXED_MAPPING
@@ -382,10 +391,11 @@ void PrintAddressSpaceLayout() {
}
static void AsanInitInternal() {
- if (LIKELY(asan_inited)) return;
+ if (LIKELY(AsanInited()))
+ return;
SanitizerToolName = "AddressSanitizer";
- CHECK(!asan_init_is_running && "ASan init calls itself!");
- asan_init_is_running = true;
+ CHECK(!AsanInitIsRunning() && "ASan init calls itself!");
+ SetAsanInitIsRunning(1);
CacheBinaryName();
@@ -398,7 +408,7 @@ static void AsanInitInternal() {
// Stop performing init at this point if we are being loaded via
// dlopen() and the platform supports it.
if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) {
- asan_init_is_running = false;
+ SetAsanInitIsRunning(0);
VReport(1, "AddressSanitizer init is being performed for dlopen().\n");
return;
}
@@ -460,8 +470,8 @@ static void AsanInitInternal() {
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
replace_intrin_cached = flags()->replace_intrin;
- asan_inited = 1;
- asan_init_is_running = false;
+ SetAsanInited();
+ SetAsanInitIsRunning(0);
if (flags()->atexit)
Atexit(asan_atexit);
@@ -510,7 +520,17 @@ static void AsanInitInternal() {
// Initialize as requested from some part of ASan runtime library (interceptors,
// allocator, etc).
void AsanInitFromRtl() {
- AsanInitInternal();
+ CHECK(!AsanInitIsRunning());
+ if (UNLIKELY(!AsanInited()))
+ AsanInitInternal();
+}
+
+bool TryAsanInitFromRtl() {
+ if (UNLIKELY(AsanInitIsRunning()))
+ return false;
+ if (UNLIKELY(!AsanInited()))
+ AsanInitInternal();
+ return true;
}
#if ASAN_DYNAMIC
@@ -583,7 +603,7 @@ static void UnpoisonFakeStack() {
using namespace __asan;
void NOINLINE __asan_handle_no_return() {
- if (asan_init_is_running)
+ if (AsanInitIsRunning())
return;
if (!PlatformUnpoisonStacks())
diff --git a/compiler-rt/lib/asan/asan_rtl_x86_64.S b/compiler-rt/lib/asan/asan_rtl_x86_64.S
index d93b5ed2a7fe..0b7363018f42 100644
--- a/compiler-rt/lib/asan/asan_rtl_x86_64.S
+++ b/compiler-rt/lib/asan/asan_rtl_x86_64.S
@@ -35,35 +35,29 @@ RLABEL(reg, op, s, add): ;\
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \
CLABEL(reg, op, 1, i): ;\
- push %rcx ;\
- mov %##reg,%rcx ;\
- and $0x7,%ecx ;\
- cmp %r10d,%ecx ;\
- pop %rcx ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ cmp %r10d,%r11d ;\
jl RLABEL(reg, op, 1, i);\
mov %##reg,%rdi ;\
jmp __asan_report_##op##1_asm ;\
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \
CLABEL(reg, op, 2, i): ;\
- push %rcx ;\
- mov %##reg,%rcx ;\
- and $0x7,%ecx ;\
- add $0x1,%ecx ;\
- cmp %r10d,%ecx ;\
- pop %rcx ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ add $0x1,%r11d ;\
+ cmp %r10d,%r11d ;\
jl RLABEL(reg, op, 2, i);\
mov %##reg,%rdi ;\
jmp __asan_report_##op##2_asm ;\
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \
CLABEL(reg, op, 4, i): ;\
- push %rcx ;\
- mov %##reg,%rcx ;\
- and $0x7,%ecx ;\
- add $0x3,%ecx ;\
- cmp %r10d,%ecx ;\
- pop %rcx ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ add $0x3,%r11d ;\
+ cmp %r10d,%r11d ;\
jl RLABEL(reg, op, 4, i);\
mov %##reg,%rdi ;\
jmp __asan_report_##op##4_asm ;\
diff --git a/compiler-rt/lib/asan/asan_stack.cpp b/compiler-rt/lib/asan/asan_stack.cpp
index 048295d6928a..764c6ac193fb 100644
--- a/compiler-rt/lib/asan/asan_stack.cpp
+++ b/compiler-rt/lib/asan/asan_stack.cpp
@@ -57,7 +57,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
using namespace __asan;
size = 0;
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(!AsanInited()))
return;
request_fast = StackTrace::WillUseFastUnwind(request_fast);
AsanThread *t = GetCurrentThread();
diff --git a/compiler-rt/lib/asan/asan_stats.cpp b/compiler-rt/lib/asan/asan_stats.cpp
index 9a715ea76fee..78cb30ec763d 100644
--- a/compiler-rt/lib/asan/asan_stats.cpp
+++ b/compiler-rt/lib/asan/asan_stats.cpp
@@ -142,7 +142,7 @@ uptr __sanitizer_get_current_allocated_bytes() {
uptr freed = stats.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
- return (malloced > freed) ? malloced - freed : 1;
+ return (malloced > freed) ? malloced - freed : 0;
}
uptr __sanitizer_get_heap_size() {
@@ -161,7 +161,7 @@ uptr __sanitizer_get_free_bytes() {
+ stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
- return (total_free > total_used) ? total_free - total_used : 1;
+ return (total_free > total_used) ? total_free - total_used : 0;
}
uptr __sanitizer_get_unmapped_bytes() {
diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp
index f718adf5e1f7..8798968947e8 100644
--- a/compiler-rt/lib/asan/asan_thread.cpp
+++ b/compiler-rt/lib/asan/asan_thread.cpp
@@ -44,11 +44,10 @@ static ThreadRegistry *asan_thread_registry;
static ThreadArgRetval *thread_data;
static Mutex mu_for_thread_context;
-static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
Lock lock(&mu_for_thread_context);
- return new (allocator_for_thread_context) AsanThreadContext(tid);
+ return new (GetGlobalLowLevelAllocator()) AsanThreadContext(tid);
}
static void InitThreads() {
@@ -91,20 +90,27 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
// AsanThread implementation.
-AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+AsanThread *AsanThread::Create(const void *start_data, uptr data_size,
u32 parent_tid, StackTrace *stack,
bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread *)MmapOrDie(size, __func__);
- thread->start_routine_ = start_routine;
- thread->arg_ = arg;
+ if (data_size) {
+ uptr availible_size = (uptr)thread + size - (uptr)(thread->start_data_);
+ CHECK_LE(data_size, availible_size);
+ internal_memcpy(thread->start_data_, start_data, data_size);
+ }
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
return thread;
}
+void AsanThread::GetStartData(void *out, uptr out_size) const {
+ internal_memcpy(out, start_data_, out_size);
+}
+
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext *)tsd;
VReport(1, "T%d TSDDtor\n", context->tid);
@@ -273,37 +279,17 @@ void AsanThread::Init(const InitOptions *options) {
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA
-thread_return_t AsanThread::ThreadStart(tid_t os_id) {
+void AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
if (common_flags()->use_sigaltstack)
SetAlternateSignalStack();
-
- if (!start_routine_) {
- // start_routine_ == 0 if we're on the main thread or on one of the
- // OS X libdispatch worker threads. But nobody is supposed to call
- // ThreadStart() for the worker threads.
- CHECK_EQ(tid(), 0);
- return 0;
- }
-
- thread_return_t res = start_routine_(arg_);
-
- // On POSIX systems we defer this to the TSD destructor. LSan will consider
- // the thread's memory as non-live from the moment we call Destroy(), even
- // though that memory might contain pointers to heap objects which will be
- // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
- // the TSD destructors have run might cause false positives in LSan.
- if (!SANITIZER_POSIX)
- this->Destroy();
-
- return res;
}
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
+ /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
diff --git a/compiler-rt/lib/asan/asan_thread.h b/compiler-rt/lib/asan/asan_thread.h
index c131dd40d864..62f1b5337fe4 100644
--- a/compiler-rt/lib/asan/asan_thread.h
+++ b/compiler-rt/lib/asan/asan_thread.h
@@ -56,18 +56,32 @@ class AsanThreadContext final : public ThreadContextBase {
// AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
+#if defined(_MSC_VER) && !defined(__clang__)
+// MSVC raises a warning about a nonstandard extension being used for the 0
+// sized element in this array. Disable this for warn-as-error builds.
+# pragma warning(push)
+# pragma warning(disable : 4200)
+#endif
+
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
- static AsanThread *Create(thread_callback_t start_routine, void *arg,
- u32 parent_tid, StackTrace *stack, bool detached);
+ template <typename T>
+ static AsanThread *Create(const T &data, u32 parent_tid, StackTrace *stack,
+ bool detached) {
+ return Create(&data, sizeof(data), parent_tid, stack, detached);
+ }
+ static AsanThread *Create(u32 parent_tid, StackTrace *stack, bool detached) {
+ return Create(nullptr, 0, parent_tid, stack, detached);
+ }
static void TSDDtor(void *tsd);
void Destroy();
struct InitOptions;
void Init(const InitOptions *options = nullptr);
- thread_return_t ThreadStart(tid_t os_id);
+ void ThreadStart(tid_t os_id);
+ thread_return_t RunThread();
uptr stack_top();
uptr stack_bottom();
@@ -130,12 +144,18 @@ class AsanThread {
void *extra_spill_area() { return &extra_spill_area_; }
- void *get_arg() const { return arg_; }
+ template <typename T>
+ void GetStartData(T &data) const {
+ GetStartData(&data, sizeof(data));
+ }
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
+ static AsanThread *Create(const void *start_data, uptr data_size,
+ u32 parent_tid, StackTrace *stack, bool detached);
+
void SetThreadStackAndTls(const InitOptions *options);
void ClearShadowForThreadStackAndTLS();
@@ -147,9 +167,9 @@ class AsanThread {
};
StackBounds GetStackBounds() const;
+ void GetStartData(void *out, uptr out_size) const;
+
AsanThreadContext *context_;
- thread_callback_t start_routine_;
- void *arg_;
uptr stack_top_;
uptr stack_bottom_;
@@ -168,8 +188,14 @@ class AsanThread {
AsanStats stats_;
bool unwinding_;
uptr extra_spill_area_;
+
+ char start_data_[];
};
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+#endif
+
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
ThreadArgRetval &asanThreadArgRetval();
diff --git a/compiler-rt/lib/asan/asan_win.cpp b/compiler-rt/lib/asan/asan_win.cpp
index 25f2e6cd551f..d5a30f471e2b 100644
--- a/compiler-rt/lib/asan/asan_win.cpp
+++ b/compiler-rt/lib/asan/asan_win.cpp
@@ -131,10 +131,22 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
}
#endif
+struct ThreadStartParams {
+ thread_callback_t start_routine;
+ void *arg;
+};
+
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ t->ThreadStart(GetTid());
+
+ ThreadStartParams params;
+ t->GetStartData(params);
+
+ auto res = (*params.start_routine)(params.arg);
+ t->Destroy(); // POSIX calls this from TSD destructor.
+ return res;
}
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
@@ -148,8 +160,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
// one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ ThreadStartParams params = {start_routine, arg};
+ AsanThread *t = AsanThread::Create(params, current_tid, &stack, detached);
return REAL(CreateThread)(security, stack_size, asan_thread_start, t,
thr_flags, tid);
}
diff --git a/compiler-rt/lib/asan/asan_win_dll_thunk.cpp b/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
index e3a90f18ed81..0fa636bec0d0 100644
--- a/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
+++ b/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
@@ -65,6 +65,7 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(atoll);
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
@@ -91,6 +92,7 @@ INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(strtoll);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
diff --git a/compiler-rt/lib/asan_abi/asan_abi.cpp b/compiler-rt/lib/asan_abi/asan_abi.cpp
index 769fde47a33b..cf8663024eb7 100644
--- a/compiler-rt/lib/asan_abi/asan_abi.cpp
+++ b/compiler-rt/lib/asan_abi/asan_abi.cpp
@@ -12,6 +12,10 @@ extern "C" {
// Functions concerning instrumented global variables:
void __asan_abi_register_image_globals(void) {}
void __asan_abi_unregister_image_globals(void) {}
+void __asan_abi_register_elf_globals(bool *flag, void *start, void *stop) {}
+void __asan_abi_unregister_elf_globals(bool *flag, void *start, void *stop) {}
+void __asan_abi_register_globals(void *globals, size_t n) {}
+void __asan_abi_unregister_globals(void *globals, size_t n) {}
// Functions concerning dynamic library initialization
void __asan_abi_before_dynamic_init(const char *module_name) {}
diff --git a/compiler-rt/lib/asan_abi/asan_abi.h b/compiler-rt/lib/asan_abi/asan_abi.h
index 562a552662b3..8702bcd13391 100644
--- a/compiler-rt/lib/asan_abi/asan_abi.h
+++ b/compiler-rt/lib/asan_abi/asan_abi.h
@@ -11,12 +11,18 @@
#include <stdbool.h>
#include <stddef.h>
+#include <sys/cdefs.h>
#include <sys/types.h>
-extern "C" {
+__BEGIN_DECLS
+
// Functions concerning instrumented global variables:
void __asan_abi_register_image_globals();
void __asan_abi_unregister_image_globals();
+void __asan_abi_register_elf_globals(bool *flag, void *start, void *stop);
+void __asan_abi_unregister_elf_globals(bool *flag, void *start, void *stop);
+void __asan_abi_register_globals(void *globals, size_t n);
+void __asan_abi_unregister_globals(void *globals, size_t n);
// Functions concerning dynamic library initialization
void __asan_abi_before_dynamic_init(const char *module_name);
@@ -80,5 +86,6 @@ void *__asan_abi_stack_malloc_always_n(size_t scale, size_t size);
// Functions concerning fake stack free
void __asan_abi_stack_free_n(int scale, void *p, size_t n);
-}
+
+__END_DECLS
#endif // ASAN_ABI_H
diff --git a/compiler-rt/lib/asan_abi/asan_abi_shim.cpp b/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
index 61c45db4bb9d..35c45dff96f6 100644
--- a/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
+++ b/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
@@ -15,14 +15,25 @@ extern "C" {
void __asan_register_image_globals(uptr *flag) {
__asan_abi_register_image_globals();
}
-
void __asan_unregister_image_globals(uptr *flag) {
__asan_abi_unregister_image_globals();
}
-void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {}
-void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {}
-void __asan_register_globals(__asan_global *globals, uptr n) {}
-void __asan_unregister_globals(__asan_global *globals, uptr n) {}
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
+ bool bFlag = *flag;
+ __asan_abi_register_elf_globals(&bFlag, start, stop);
+ *flag = bFlag;
+}
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
+ bool bFlag = *flag;
+ __asan_abi_unregister_elf_globals(&bFlag, start, stop);
+ *flag = bFlag;
+}
+void __asan_register_globals(__asan_global *globals, uptr n) {
+ __asan_abi_register_globals(globals, n);
+}
+void __asan_unregister_globals(__asan_global *globals, uptr n) {
+ __asan_abi_unregister_globals(globals, n);
+}
// Functions concerning dynamic library initialization
void __asan_before_dynamic_init(const char *module_name) {
@@ -49,13 +60,13 @@ void __asan_init(void) {
__asan_abi_init();
}
-void __asan_version_mismatch_check_v8(void) {}
+
void __asan_handle_no_return(void) { __asan_abi_handle_no_return(); }
// Variables concerning RTL state. These provisionally exist for completeness
// but will likely move into the Stable ABI implementation and not in the shim.
-uptr __asan_shadow_memory_dynamic_address = (uptr)0xdeaddeaddeadbeaf;
-int __asan_option_detect_stack_use_after_return = 0;
+uptr __asan_shadow_memory_dynamic_address = (uptr)0L;
+int __asan_option_detect_stack_use_after_return = 1;
// Functions concerning memory load and store reporting
void __asan_report_load1(uptr addr) {
@@ -331,25 +342,28 @@ void __asan_unpoison_stack_memory(uptr addr, uptr size) {
}
// Functions concerning redzone poisoning
-void __asan_poison_intra_object_redzone(uptr p, uptr size) {}
-void __asan_unpoison_intra_object_redzone(uptr p, uptr size) {}
+void __asan_poison_intra_object_redzone(uptr p, uptr size) {
+ __asan_abi_poison_intra_object_redzone((void *)p, size);
+}
+void __asan_unpoison_intra_object_redzone(uptr p, uptr size) {
+ __asan_abi_unpoison_intra_object_redzone((void *)p, size);
+}
// Functions concerning array cookie poisoning
-void __asan_poison_cxx_array_cookie(uptr p) {}
+void __asan_poison_cxx_array_cookie(uptr p) {
+ __asan_abi_poison_cxx_array_cookie((void *)p);
+}
uptr __asan_load_cxx_array_cookie(uptr *p) {
- // TBD: Fail here
- return (uptr)0;
+ return (uptr)__asan_abi_load_cxx_array_cookie((void **)p);
}
// Functions concerning fake stacks
void *__asan_get_current_fake_stack(void) {
- // TBD: Fail here
- return (void *)0;
+ return __asan_abi_get_current_fake_stack();
}
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end) {
- // TBD: Fail here
- return (void *)0;
+ return __asan_abi_addr_is_in_fake_stack(fake_stack, addr, beg, end);
}
// Functions concerning poisoning and unpoisoning fake stack alloca
@@ -464,22 +478,4 @@ void __asan_stack_free_9(uptr ptr, uptr size) {
void __asan_stack_free_10(uptr ptr, uptr size) {
__asan_abi_stack_free_n(10, (void *)ptr, size);
}
-
-// Functions concerning introspection (including lldb support)
-uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
- // TBD: Fail here
- return (uptr)0;
-}
-void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
- uptr access_size, u32 exp) {}
-void __asan_set_error_report_callback(void (*callback)(const char *)) {}
-void __asan_describe_address(uptr addr) {}
-int __asan_report_present(void) { return (int)0; }
-uptr __asan_get_report_pc(void) { return (uptr)0; }
-uptr __asan_get_report_bp(void) { return (uptr)0; }
-uptr __asan_get_report_sp(void) { return (uptr)0; }
-uptr __asan_get_report_address(void) { return (uptr)0; }
-int __asan_get_report_access_type(void) { return (int)0; }
-uptr __asan_get_report_access_size(void) { return (uptr)0; }
-const char *__asan_get_report_description(void) { return (const char *)0; }
}
diff --git a/compiler-rt/lib/asan_abi/asan_abi_tbd.txt b/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
index 2022c0b94283..a712093d7b21 100644
--- a/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
+++ b/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
@@ -8,3 +8,15 @@ __asan_on_error
__asan_print_accumulated_stats
__asan_set_death_callback
__asan_update_allocation_context
+__asan_describe_address
+__asan_get_alloc_stack
+__asan_get_report_access_size
+__asan_get_report_access_type
+__asan_get_report_address
+__asan_get_report_bp
+__asan_get_report_description
+__asan_get_report_pc
+__asan_get_report_sp
+__asan_report_error
+__asan_report_present
+__asan_set_error_report_callback
diff --git a/compiler-rt/lib/builtins/README.txt b/compiler-rt/lib/builtins/README.txt
index 5637183cc3b4..2d213d95f333 100644
--- a/compiler-rt/lib/builtins/README.txt
+++ b/compiler-rt/lib/builtins/README.txt
@@ -137,49 +137,54 @@ si_int __ucmpti2(tu_int a, tu_int b);
di_int __fixsfdi( float a);
di_int __fixdfdi( double a);
di_int __fixxfdi(long double a);
+di_int __fixtfdi( tf_float a);
ti_int __fixsfti( float a);
ti_int __fixdfti( double a);
ti_int __fixxfti(long double a);
-uint64_t __fixtfdi(long double input); // ppc only, doesn't match documentation
+ti_int __fixtfti( tf_float a);
su_int __fixunssfsi( float a);
su_int __fixunsdfsi( double a);
su_int __fixunsxfsi(long double a);
+su_int __fixunstfsi( tf_float a);
du_int __fixunssfdi( float a);
du_int __fixunsdfdi( double a);
du_int __fixunsxfdi(long double a);
+du_int __fixunstfdi( tf_float a);
tu_int __fixunssfti( float a);
tu_int __fixunsdfti( double a);
tu_int __fixunsxfti(long double a);
-uint64_t __fixunstfdi(long double input); // ppc only
+tu_int __fixunstfti( tf_float a);
float __floatdisf(di_int a);
double __floatdidf(di_int a);
long double __floatdixf(di_int a);
-long double __floatditf(int64_t a); // ppc only
+tf_float __floatditf(int64_t a);
float __floattisf(ti_int a);
double __floattidf(ti_int a);
long double __floattixf(ti_int a);
+tf_float __floattitf(ti_int a);
float __floatundisf(du_int a);
double __floatundidf(du_int a);
long double __floatundixf(du_int a);
-long double __floatunditf(uint64_t a); // ppc only
+tf_float __floatunditf(du_int a);
float __floatuntisf(tu_int a);
double __floatuntidf(tu_int a);
long double __floatuntixf(tu_int a);
+tf_float __floatuntixf(tu_int a);
// Floating point raised to integer power
float __powisf2( float a, int b); // a ^ b
double __powidf2( double a, int b); // a ^ b
long double __powixf2(long double a, int b); // a ^ b
-long double __powitf2(long double a, int b); // ppc only, a ^ b
+tf_float __powitf2( tf_float a, int b); // a ^ b
// Complex arithmetic
@@ -189,8 +194,7 @@ long double __powitf2(long double a, int b); // ppc only, a ^ b
double _Complex __muldc3(double a, double b, double c, double d);
long double _Complex __mulxc3(long double a, long double b,
long double c, long double d);
-long double _Complex __multc3(long double a, long double b,
- long double c, long double d); // ppc only
+ tf_float _Complex __multc3(tf_float a, tf_float b, tf_float c, tf_float d);
// (a + ib) / (c + id)
@@ -198,8 +202,7 @@ long double _Complex __multc3(long double a, long double b,
double _Complex __divdc3(double a, double b, double c, double d);
long double _Complex __divxc3(long double a, long double b,
long double c, long double d);
-long double _Complex __divtc3(long double a, long double b,
- long double c, long double d); // ppc only
+ tf_float _Complex __divtc3(tf_float a, tf_float b, tf_float c, tf_float d);
// Runtime support
diff --git a/compiler-rt/lib/builtins/aarch64/lse.S b/compiler-rt/lib/builtins/aarch64/lse.S
index 5dc0d5320b5a..1fe18f4a4681 100644
--- a/compiler-rt/lib/builtins/aarch64/lse.S
+++ b/compiler-rt/lib/builtins/aarch64/lse.S
@@ -7,7 +7,7 @@
// Out-of-line LSE atomics helpers. Ported from libgcc library.
// N = {1, 2, 4, 8}
// M = {1, 2, 4, 8, 16}
-// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
+// ORDER = {'relax', 'acq', 'rel', 'acq_rel', 'sync'}
// Routines implemented:
//
// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
@@ -35,8 +35,8 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
// Generate mnemonics for
-// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
-// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
+// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4,5
+// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4,5
#if SIZE == 1
#define S b
@@ -64,24 +64,44 @@ HIDDEN(___aarch64_have_lse_atomics)
#define L
#define M 0x000000
#define N 0x000000
+#define BARRIER
#elif MODEL == 2
#define SUFF _acq
#define A a
#define L
#define M 0x400000
#define N 0x800000
+#define BARRIER
#elif MODEL == 3
#define SUFF _rel
#define A
#define L l
#define M 0x008000
#define N 0x400000
+#define BARRIER
#elif MODEL == 4
#define SUFF _acq_rel
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
+#define BARRIER
+#elif MODEL == 5
+#define SUFF _sync
+#ifdef L_swp
+// swp has _acq semantics.
+#define A a
+#define L
+#define M 0x400000
+#define N 0x800000
+#else
+// All other _sync functions have _seq semantics.
+#define A a
+#define L l
+#define M 0x408000
+#define N 0xc00000
+#endif
+#define BARRIER dmb ish
#else
#error
#endif // MODEL
@@ -96,7 +116,12 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXR GLUE3(ld, xr, S)
+#else
#define LDXR GLUE4(ld, A, xr, S)
+#endif
#define STXR GLUE4(st, L, xr, S)
// Define temporary registers.
@@ -136,9 +161,15 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1:
+ BARRIER
ret
#else
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXP GLUE2(ld, xp)
+#else
#define LDXP GLUE3(ld, A, xp)
+#endif
#define STXP GLUE3(st, L, xp)
#ifdef HAS_ASM_LSE
#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
@@ -159,6 +190,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1:
+ BARRIER
ret
#endif
END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
@@ -180,6 +212,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
#endif // L_swp
@@ -224,6 +257,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
#endif // L_ldadd L_ldclr L_ldeor L_ldset
diff --git a/compiler-rt/lib/builtins/aarch64/sme-abi-init.c b/compiler-rt/lib/builtins/aarch64/sme-abi-init.c
new file mode 100644
index 000000000000..b6ee12170d56
--- /dev/null
+++ b/compiler-rt/lib/builtins/aarch64/sme-abi-init.c
@@ -0,0 +1,52 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+__attribute__((visibility("hidden"), nocommon))
+_Bool __aarch64_has_sme_and_tpidr2_el0;
+
+// We have multiple ways to check that the function has SME, depending on our
+// target.
+// * For Linux we can use __getauxval().
+// * For newlib we can use __aarch64_sme_accessible().
+
+#if defined(__linux__)
+
+#ifndef AT_HWCAP2
+#define AT_HWCAP2 26
+#endif
+
+#ifndef HWCAP2_SME
+#define HWCAP2_SME (1 << 23)
+#endif
+
+extern unsigned long int __getauxval (unsigned long int);
+
+static _Bool has_sme(void) {
+ return __getauxval(AT_HWCAP2) & HWCAP2_SME;
+}
+
+#else // defined(__linux__)
+
+#if defined(COMPILER_RT_SHARED_LIB)
+__attribute__((weak))
+#endif
+extern _Bool __aarch64_sme_accessible(void);
+
+static _Bool has_sme(void) {
+#if defined(COMPILER_RT_SHARED_LIB)
+ if (!__aarch64_sme_accessible)
+ return 0;
+#endif
+ return __aarch64_sme_accessible();
+}
+
+#endif // defined(__linux__)
+
+#if __GNUC__ >= 9
+#pragma GCC diagnostic ignored "-Wprio-ctor-dtor"
+#endif
+__attribute__((constructor(90)))
+static void init_aarch64_has_sme(void) {
+ __aarch64_has_sme_and_tpidr2_el0 = has_sme();
+}
diff --git a/compiler-rt/lib/builtins/aarch64/sme-abi.S b/compiler-rt/lib/builtins/aarch64/sme-abi.S
new file mode 100644
index 000000000000..d470ecaf7aaa
--- /dev/null
+++ b/compiler-rt/lib/builtins/aarch64/sme-abi.S
@@ -0,0 +1,176 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// This patch implements the support routines for the SME ABI,
+// described here:
+// https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#sme-support-routines
+
+#include "../assembly.h"
+
+
+#if !defined(__APPLE__)
+#define TPIDR2_SYMBOL SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)
+#define TPIDR2_SYMBOL_OFFSET :lo12:SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)
+#else
+// MachO requires @page/@pageoff directives because the global is defined
+// in a different file. Otherwise this file may fail to build.
+#define TPIDR2_SYMBOL SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)@page
+#define TPIDR2_SYMBOL_OFFSET SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)@pageoff
+#endif
+
+.arch armv9-a+sme
+
+// Utility function which calls a system's abort() routine. Because the function
+// is streaming-compatible it should disable streaming-SVE mode before calling
+// abort(). Note that there is no need to preserve any state before the call,
+// because the function does not return.
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(do_abort)
+.cfi_startproc
+ .variant_pcs SYMBOL_NAME(do_abort)
+ stp x29, x30, [sp, #-32]!
+ cntd x0
+ // Store VG to a stack location that we describe with .cfi_offset
+ str x0, [sp, #16]
+ .cfi_def_cfa_offset 32
+ .cfi_offset w30, -24
+ .cfi_offset w29, -32
+ .cfi_offset 46, -16
+ bl __arm_sme_state
+ tbz x0, #0, 2f
+1:
+ smstop sm
+2:
+ // We can't make this into a tail-call because the unwinder would
+ // need to restore the value of VG.
+ bl SYMBOL_NAME(abort)
+.cfi_endproc
+END_COMPILERRT_FUNCTION(do_abort)
+
+// __arm_sme_state fills the result registers based on a local
+// that is set as part of the compiler-rt startup code.
+// __aarch64_has_sme_and_tpidr2_el0
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_sme_state)
+ .variant_pcs __arm_sme_state
+ mov x0, xzr
+ mov x1, xzr
+
+ adrp x16, TPIDR2_SYMBOL
+ ldrb w16, [x16, TPIDR2_SYMBOL_OFFSET]
+ cbz w16, 1f
+0:
+ orr x0, x0, #0xC000000000000000
+ mrs x16, SVCR
+ bfxil x0, x16, #0, #2
+ mrs x1, TPIDR2_EL0
+1:
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_sme_state)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_tpidr2_restore)
+ .variant_pcs __arm_tpidr2_restore
+ // If TPIDR2_EL0 is nonnull, the subroutine aborts in some platform-specific
+ // manner.
+ mrs x14, TPIDR2_EL0
+ cbnz x14, 2f
+
+ // If any of the reserved bytes in the first 16 bytes of BLK are nonzero,
+ // the subroutine [..] aborts in some platform-defined manner.
+ ldrh w14, [x0, #10]
+ cbnz w14, 2f
+ ldr w14, [x0, #12]
+ cbnz w14, 2f
+
+ // If BLK.za_save_buffer is NULL, the subroutine does nothing.
+ ldr x16, [x0]
+ cbz x16, 1f
+
+ // If BLK.num_za_save_slices is zero, the subroutine does nothing.
+ ldrh w14, [x0, #8]
+ cbz x14, 1f
+
+ mov x15, xzr
+0:
+ ldr za[w15,0], [x16]
+ addsvl x16, x16, #1
+ add x15, x15, #1
+ cmp x14, x15
+ b.ne 0b
+1:
+ ret
+2:
+ b SYMBOL_NAME(do_abort)
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_tpidr2_restore)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_tpidr2_save)
+ .variant_pcs __arm_tpidr2_restore
+ // If the current thread does not have access to TPIDR2_EL0, the subroutine
+ // does nothing.
+ adrp x14, TPIDR2_SYMBOL
+ ldrb w14, [x14, TPIDR2_SYMBOL_OFFSET]
+ cbz w14, 1f
+
+ // If TPIDR2_EL0 is null, the subroutine does nothing.
+ mrs x16, TPIDR2_EL0
+ cbz x16, 1f
+
+ // If any of the reserved bytes in the first 16 bytes of the TPIDR2 block are
+ // nonzero, the subroutine [..] aborts in some platform-defined manner.
+ ldrh w14, [x16, #10]
+ cbnz w14, 2f
+ ldr w14, [x16, #12]
+ cbnz w14, 2f
+
+ // If num_za_save_slices is zero, the subroutine does nothing.
+ ldrh w14, [x16, #8]
+ cbz x14, 1f
+
+ // If za_save_buffer is NULL, the subroutine does nothing.
+ ldr x16, [x16]
+ cbz x16, 1f
+
+ mov x15, xzr
+0:
+ str za[w15,0], [x16]
+ addsvl x16, x16, #1
+ add x15, x15, #1
+ cmp x14, x15
+ b.ne 0b
+1:
+ ret
+2:
+ b SYMBOL_NAME(do_abort)
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_tpidr2_save)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_za_disable)
+ .variant_pcs __arm_tpidr2_restore
+ // If the current thread does not have access to SME, the subroutine does
+ // nothing.
+ adrp x14, TPIDR2_SYMBOL
+ ldrb w14, [x14, TPIDR2_SYMBOL_OFFSET]
+ cbz w14, 0f
+
+ // Otherwise, the subroutine behaves as if it did the following:
+ // * Call __arm_tpidr2_save.
+ stp x29, x30, [sp, #-16]!
+ .cfi_def_cfa_offset 16
+ mov x29, sp
+ .cfi_def_cfa w29, 16
+ .cfi_offset w30, -8
+ .cfi_offset w29, -16
+ bl __arm_tpidr2_save
+
+ // * Set TPIDR2_EL0 to null.
+ msr TPIDR2_EL0, xzr
+
+ // * Set PSTATE.ZA to 0.
+ smstop za
+
+ .cfi_def_cfa wsp, 16
+ ldp x29, x30, [sp], #16
+ .cfi_def_cfa_offset 0
+ .cfi_restore w30
+ .cfi_restore w29
+0:
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_za_disable)
diff --git a/compiler-rt/lib/builtins/absvti2.c b/compiler-rt/lib/builtins/absvti2.c
index 491d99d7ce0f..bc6933bd2a1c 100644
--- a/compiler-rt/lib/builtins/absvti2.c
+++ b/compiler-rt/lib/builtins/absvti2.c
@@ -20,7 +20,7 @@
COMPILER_RT_ABI ti_int __absvti2(ti_int a) {
const int N = (int)(sizeof(ti_int) * CHAR_BIT);
- if (a == ((ti_int)1 << (N - 1)))
+ if (a == (ti_int)((tu_int)1 << (N - 1)))
compilerrt_abort();
const ti_int s = a >> (N - 1);
return (a ^ s) - s;
diff --git a/compiler-rt/lib/builtins/arm/udivsi3.S b/compiler-rt/lib/builtins/arm/udivsi3.S
index 0567ab4ab765..16528e8bbd82 100644
--- a/compiler-rt/lib/builtins/arm/udivsi3.S
+++ b/compiler-rt/lib/builtins/arm/udivsi3.S
@@ -204,7 +204,7 @@ LOCAL_LABEL(divby0):
LOCAL_LABEL(block_skip_##shift) :; \
adcs r3, r3 // same as ((r3 << 1) | Carry). Carry is set if r0 >= r2.
- // TODO: if current location counter is not not word aligned, we don't
+ // TODO: if current location counter is not word aligned, we don't
// need the .p2align and nop
// Label div0block must be word-aligned. First align block 31
.p2align 2
diff --git a/compiler-rt/lib/builtins/ashldi3.c b/compiler-rt/lib/builtins/ashldi3.c
index 04f22228f11d..7b835da865d7 100644
--- a/compiler-rt/lib/builtins/ashldi3.c
+++ b/compiler-rt/lib/builtins/ashldi3.c
@@ -28,7 +28,8 @@ COMPILER_RT_ABI di_int __ashldi3(di_int a, int b) {
if (b == 0)
return a;
result.s.low = input.s.low << b;
- result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_word - b));
+ result.s.high =
+ ((su_int)input.s.high << b) | (input.s.low >> (bits_in_word - b));
}
return result.all;
}
diff --git a/compiler-rt/lib/builtins/ashlti3.c b/compiler-rt/lib/builtins/ashlti3.c
index 99a133ffa22f..2bebf10401d3 100644
--- a/compiler-rt/lib/builtins/ashlti3.c
+++ b/compiler-rt/lib/builtins/ashlti3.c
@@ -30,7 +30,8 @@ COMPILER_RT_ABI ti_int __ashlti3(ti_int a, int b) {
if (b == 0)
return a;
result.s.low = input.s.low << b;
- result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_dword - b));
+ result.s.high =
+ ((du_int)input.s.high << b) | (input.s.low >> (bits_in_dword - b));
}
return result.all;
}
diff --git a/compiler-rt/lib/builtins/ashrdi3.c b/compiler-rt/lib/builtins/ashrdi3.c
index 934a5c47fd69..c0879b8b252d 100644
--- a/compiler-rt/lib/builtins/ashrdi3.c
+++ b/compiler-rt/lib/builtins/ashrdi3.c
@@ -29,7 +29,8 @@ COMPILER_RT_ABI di_int __ashrdi3(di_int a, int b) {
if (b == 0)
return a;
result.s.high = input.s.high >> b;
- result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b);
+ result.s.low =
+ ((su_int)input.s.high << (bits_in_word - b)) | (input.s.low >> b);
}
return result.all;
}
diff --git a/compiler-rt/lib/builtins/ashrti3.c b/compiler-rt/lib/builtins/ashrti3.c
index b306051df028..d6b1ad9192bf 100644
--- a/compiler-rt/lib/builtins/ashrti3.c
+++ b/compiler-rt/lib/builtins/ashrti3.c
@@ -31,7 +31,8 @@ COMPILER_RT_ABI ti_int __ashrti3(ti_int a, int b) {
if (b == 0)
return a;
result.s.high = input.s.high >> b;
- result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
+ result.s.low =
+ ((du_int)input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
}
return result.all;
}
diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h
index 169d49683f50..8c42fc773483 100644
--- a/compiler-rt/lib/builtins/assembly.h
+++ b/compiler-rt/lib/builtins/assembly.h
@@ -260,9 +260,10 @@
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
- CFI_START SEPARATOR \
DECLARE_FUNC_ENCODING \
- name: SEPARATOR BTI_C
+ name: \
+ SEPARATOR CFI_START \
+ SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
diff --git a/compiler-rt/lib/builtins/clear_cache.c b/compiler-rt/lib/builtins/clear_cache.c
index 54cbda059315..2ac99b25c243 100644
--- a/compiler-rt/lib/builtins/clear_cache.c
+++ b/compiler-rt/lib/builtins/clear_cache.c
@@ -113,7 +113,7 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__) || defined(__OpenBSD__)
// Pre-R6 may not be globalized. And some implementations may give strange
// synci_step. So, let's use libc call for it.
- cacheflush(start, end_int - start_int, BCACHE);
+ _flush_cache(start, end_int - start_int, BCACHE);
#else
(void)start_int;
(void)end_int;
diff --git a/compiler-rt/lib/builtins/cpu_model.c b/compiler-rt/lib/builtins/cpu_model.c
index 36eb696c39ee..b0ec5e51e96d 100644
--- a/compiler-rt/lib/builtins/cpu_model.c
+++ b/compiler-rt/lib/builtins/cpu_model.c
@@ -79,6 +79,7 @@ enum ProcessorTypes {
ZHAOXIN_FAM7H,
INTEL_SIERRAFOREST,
INTEL_GRANDRIDGE,
+ INTEL_CLEARWATERFOREST,
CPU_TYPE_MAX
};
@@ -114,6 +115,9 @@ enum ProcessorSubtypes {
AMDFAM19H_ZNVER4,
INTEL_COREI7_GRANITERAPIDS,
INTEL_COREI7_GRANITERAPIDS_D,
+ INTEL_COREI7_ARROWLAKE,
+ INTEL_COREI7_ARROWLAKE_S,
+ INTEL_COREI7_PANTHERLAKE,
CPU_SUBTYPE_MAX
};
@@ -156,6 +160,19 @@ enum ProcessorFeatures {
FEATURE_AVX512BITALG,
FEATURE_AVX512BF16,
FEATURE_AVX512VP2INTERSECT,
+
+ FEATURE_CMPXCHG16B = 46,
+ FEATURE_F16C = 49,
+ FEATURE_LAHF_LM = 54,
+ FEATURE_LM,
+ FEATURE_WP,
+ FEATURE_LZCNT,
+ FEATURE_MOVBE,
+
+ FEATURE_X86_64_BASELINE = 95,
+ FEATURE_X86_64_V2,
+ FEATURE_X86_64_V3,
+ FEATURE_X86_64_V4,
CPU_FEATURE_MAX
};
@@ -449,14 +466,41 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
case 0x9a:
// Raptorlake:
case 0xb7:
+ case 0xba:
+ case 0xbf:
// Meteorlake:
case 0xaa:
case 0xac:
+ // Gracemont:
+ case 0xbe:
CPU = "alderlake";
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_ALDERLAKE;
break;
+ // Arrowlake:
+ case 0xc5:
+ CPU = "arrowlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ARROWLAKE;
+ break;
+
+ // Arrowlake S:
+ case 0xc6:
+ // Lunarlake:
+ case 0xbd:
+ CPU = "arrowlake-s";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ARROWLAKE_S;
+ break;
+
+ // Pantherlake:
+ case 0xcc:
+ CPU = "pantherlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_PANTHERLAKE;
+ break;
+
// Icelake Xeon:
case 0x6a:
case 0x6c:
@@ -518,6 +562,9 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Type = INTEL_GOLDMONT_PLUS;
break;
case 0x86:
+ case 0x8a: // Lakefield
+ case 0x96: // Elkhart Lake
+ case 0x9c: // Jasper Lake
CPU = "tremont";
*Type = INTEL_TREMONT;
break;
@@ -534,6 +581,13 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Type = INTEL_GRANDRIDGE;
break;
+ // Clearwaterforest:
+ case 0xdd:
+ CPU = "clearwaterforest";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_CLEARWATERFOREST;
+ break;
+
case 0x57:
CPU = "knl";
*Type = INTEL_KNL;
@@ -655,6 +709,7 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
unsigned *Features) {
unsigned EAX = 0, EBX = 0;
+#define hasFeature(F) ((Features[F / 32] >> (F % 32)) & 1)
#define setFeature(F) \
Features[F / 32] |= 1U << (F % 32)
@@ -675,14 +730,20 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(FEATURE_SSSE3);
if ((ECX >> 12) & 1)
setFeature(FEATURE_FMA);
+ if ((ECX >> 13) & 1)
+ setFeature(FEATURE_CMPXCHG16B);
if ((ECX >> 19) & 1)
setFeature(FEATURE_SSE4_1);
if ((ECX >> 20) & 1)
setFeature(FEATURE_SSE4_2);
+ if ((ECX >> 22) & 1)
+ setFeature(FEATURE_MOVBE);
if ((ECX >> 23) & 1)
setFeature(FEATURE_POPCNT);
if ((ECX >> 25) & 1)
setFeature(FEATURE_AES);
+ if ((ECX >> 29) & 1)
+ setFeature(FEATURE_F16C);
// If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
// indicates that the AVX registers will be saved and restored on context
@@ -751,8 +812,11 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512VP2INTERSECT);
+ // EAX from subleaf 0 is the maximum subleaf supported. Some CPUs don't
+ // return all 0s for invalid subleaves so check the limit.
bool HasLeaf7Subleaf1 =
- MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
+ HasLeaf7 && EAX >= 1 &&
+ !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
if (HasLeaf7Subleaf1 && ((EAX >> 5) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512BF16);
@@ -761,12 +825,39 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
!getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
- if (HasExtLeaf1 && ((ECX >> 6) & 1))
- setFeature(FEATURE_SSE4_A);
- if (HasExtLeaf1 && ((ECX >> 11) & 1))
- setFeature(FEATURE_XOP);
- if (HasExtLeaf1 && ((ECX >> 16) & 1))
- setFeature(FEATURE_FMA4);
+ if (HasExtLeaf1) {
+ if (ECX & 1)
+ setFeature(FEATURE_LAHF_LM);
+ if ((ECX >> 5) & 1)
+ setFeature(FEATURE_LZCNT);
+ if (((ECX >> 6) & 1))
+ setFeature(FEATURE_SSE4_A);
+ if (((ECX >> 11) & 1))
+ setFeature(FEATURE_XOP);
+ if (((ECX >> 16) & 1))
+ setFeature(FEATURE_FMA4);
+ if (((EDX >> 29) & 1))
+ setFeature(FEATURE_LM);
+ }
+
+ if (hasFeature(FEATURE_LM) && hasFeature(FEATURE_SSE2)) {
+ setFeature(FEATURE_X86_64_BASELINE);
+ if (hasFeature(FEATURE_CMPXCHG16B) && hasFeature(FEATURE_POPCNT) &&
+ hasFeature(FEATURE_LAHF_LM) && hasFeature(FEATURE_SSE4_2)) {
+ setFeature(FEATURE_X86_64_V2);
+ if (hasFeature(FEATURE_AVX2) && hasFeature(FEATURE_BMI) &&
+ hasFeature(FEATURE_BMI2) && hasFeature(FEATURE_F16C) &&
+ hasFeature(FEATURE_FMA) && hasFeature(FEATURE_LZCNT) &&
+ hasFeature(FEATURE_MOVBE)) {
+ setFeature(FEATURE_X86_64_V3);
+ if (hasFeature(FEATURE_AVX512BW) && hasFeature(FEATURE_AVX512CD) &&
+ hasFeature(FEATURE_AVX512DQ) && hasFeature(FEATURE_AVX512VL))
+ setFeature(FEATURE_X86_64_V4);
+ }
+ }
+ }
+
+#undef hasFeature
#undef setFeature
}
@@ -788,7 +879,7 @@ struct __processor_model {
#ifndef _WIN32
__attribute__((visibility("hidden")))
#endif
-unsigned int __cpu_features2 = 0;
+unsigned __cpu_features2[(CPU_FEATURE_MAX - 1) / 32];
// A constructor function that is sets __cpu_model and __cpu_features2 with
// the right values. This needs to run only once. This constructor is
@@ -802,6 +893,8 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
unsigned Vendor;
unsigned Model, Family;
unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0};
+ static_assert(sizeof(Features) / sizeof(Features[0]) == 4, "");
+ static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, "");
// This function needs to run just once.
if (__cpu_model.__cpu_vendor)
@@ -819,9 +912,10 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
// Find available features.
getAvailableFeatures(ECX, EDX, MaxLeaf, &Features[0]);
- assert((sizeof(Features)/sizeof(Features[0])) == 2);
__cpu_model.__cpu_features[0] = Features[0];
- __cpu_features2 = Features[1];
+ __cpu_features2[0] = Features[1];
+ __cpu_features2[1] = Features[2];
+ __cpu_features2[2] = Features[3];
if (Vendor == SIG_INTEL) {
// Get CPU type.
@@ -1043,10 +1137,10 @@ typedef struct __ifunc_arg_t {
#define HWCAP2_WFXT (1UL << 31)
#endif
#ifndef HWCAP2_EBF16
-#define HWCAP2_EBF16 (1UL << 32)
+#define HWCAP2_EBF16 (1ULL << 32)
#endif
#ifndef HWCAP2_SVE_EBF16
-#define HWCAP2_SVE_EBF16 (1UL << 33)
+#define HWCAP2_SVE_EBF16 (1ULL << 33)
#endif
// Detect Exynos 9810 CPU
@@ -1151,7 +1245,11 @@ enum CPUFeatures {
FEAT_SME_F64,
FEAT_SME_I64,
FEAT_SME2,
- FEAT_MAX
+ FEAT_RCPC3,
+ FEAT_MAX,
+ FEAT_EXT = 62, // Reserved to indicate presence of additional features field
+ // in __aarch64_cpu_features
+ FEAT_INIT // Used as flag of features initialization completion
};
// Architecture features used
@@ -1161,13 +1259,12 @@ struct {
// As features grows new fields could be added
} __aarch64_cpu_features __attribute__((visibility("hidden"), nocommon));
-void init_cpu_features_resolver(unsigned long hwcap, const __ifunc_arg_t *arg) {
+static void __init_cpu_features_constructor(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
#define setCPUFeature(F) __aarch64_cpu_features.features |= 1ULL << F
#define getCPUFeature(id, ftr) __asm__("mrs %0, " #id : "=r"(ftr))
#define extractBits(val, start, number) \
(val & ((1ULL << number) - 1ULL) << start) >> start
- if (__aarch64_cpu_features.features)
- return;
unsigned long hwcap2 = 0;
if (hwcap & _IFUNC_ARG_HWCAP)
hwcap2 = arg->_hwcap2;
@@ -1311,6 +1408,9 @@ void init_cpu_features_resolver(unsigned long hwcap, const __ifunc_arg_t *arg) {
// ID_AA64ISAR1_EL1.LRCPC != 0b0000
if (extractBits(ftr, 20, 4) != 0x0)
setCPUFeature(FEAT_RCPC);
+ // ID_AA64ISAR1_EL1.LRCPC == 0b0011
+ if (extractBits(ftr, 20, 4) == 0x3)
+ setCPUFeature(FEAT_RCPC3);
// ID_AA64ISAR1_EL1.SPECRES == 0b0001
if (extractBits(ftr, 40, 4) == 0x2)
setCPUFeature(FEAT_PREDRES);
@@ -1346,10 +1446,27 @@ void init_cpu_features_resolver(unsigned long hwcap, const __ifunc_arg_t *arg) {
if (hwcap & HWCAP_SHA3)
setCPUFeature(FEAT_SHA3);
}
- setCPUFeature(FEAT_MAX);
+ setCPUFeature(FEAT_INIT);
+}
+
+void __init_cpu_features_resolver(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
+ if (__aarch64_cpu_features.features)
+ return;
+#if defined(__ANDROID__)
+ // ifunc resolvers don't have hwcaps in arguments on Android API lower
+ // than 30. If so, set feature detection done and keep all CPU features
+ // unsupported (zeros). To detect this case in runtime we check existence
+ // of memfd_create function from Standard C library which was introduced in
+ // Android API 30.
+ int memfd_create(const char *, unsigned int) __attribute__((weak));
+ if (!memfd_create)
+ return;
+#endif // defined(__ANDROID__)
+ __init_cpu_features_constructor(hwcap, arg);
}
-void CONSTRUCTOR_ATTRIBUTE init_cpu_features(void) {
+void CONSTRUCTOR_ATTRIBUTE __init_cpu_features(void) {
unsigned long hwcap;
unsigned long hwcap2;
// CPU features already initialized.
@@ -1374,7 +1491,7 @@ void CONSTRUCTOR_ATTRIBUTE init_cpu_features(void) {
arg._size = sizeof(__ifunc_arg_t);
arg._hwcap = hwcap;
arg._hwcap2 = hwcap2;
- init_cpu_features_resolver(hwcap | _IFUNC_ARG_HWCAP, &arg);
+ __init_cpu_features_constructor(hwcap | _IFUNC_ARG_HWCAP, &arg);
#undef extractBits
#undef getCPUFeature
#undef setCPUFeature
diff --git a/compiler-rt/lib/builtins/divmoddi4.c b/compiler-rt/lib/builtins/divmoddi4.c
index e7cbbb1aaa30..64bbb6934601 100644
--- a/compiler-rt/lib/builtins/divmoddi4.c
+++ b/compiler-rt/lib/builtins/divmoddi4.c
@@ -18,8 +18,8 @@ COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int *rem) {
const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
di_int s_a = a >> bits_in_dword_m1; // s_a = a < 0 ? -1 : 0
di_int s_b = b >> bits_in_dword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (du_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (du_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
du_int r;
di_int q = (__udivmoddi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/compiler-rt/lib/builtins/divmodsi4.c b/compiler-rt/lib/builtins/divmodsi4.c
index a85e2993b4e9..193f81053568 100644
--- a/compiler-rt/lib/builtins/divmodsi4.c
+++ b/compiler-rt/lib/builtins/divmodsi4.c
@@ -19,8 +19,8 @@ COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int *rem) {
const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
si_int s_a = a >> bits_in_word_m1; // s_a = a < 0 ? -1 : 0
si_int s_b = b >> bits_in_word_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (su_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (su_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
su_int r;
si_int q = (__udivmodsi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/compiler-rt/lib/builtins/divmodti4.c b/compiler-rt/lib/builtins/divmodti4.c
index b243ba4ef853..185d3d47f365 100644
--- a/compiler-rt/lib/builtins/divmodti4.c
+++ b/compiler-rt/lib/builtins/divmodti4.c
@@ -20,8 +20,8 @@ COMPILER_RT_ABI ti_int __divmodti4(ti_int a, ti_int b, ti_int *rem) {
const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
ti_int s_a = a >> bits_in_tword_m1; // s_a = a < 0 ? -1 : 0
ti_int s_b = b >> bits_in_tword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (tu_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (tu_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
tu_int r;
ti_int q = (__udivmodti4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/compiler-rt/lib/builtins/divtc3.c b/compiler-rt/lib/builtins/divtc3.c
index 0e4799295f32..e970cef574b2 100644
--- a/compiler-rt/lib/builtins/divtc3.c
+++ b/compiler-rt/lib/builtins/divtc3.c
@@ -12,44 +12,45 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#include "int_lib.h"
-#include "int_math.h"
+
+#if defined(CRT_HAS_TF_MODE)
// Returns: the quotient of (a + ib) / (c + id)
-COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b,
- long double __c, long double __d) {
+COMPILER_RT_ABI Qcomplex __divtc3(fp_t __a, fp_t __b, fp_t __c, fp_t __d) {
int __ilogbw = 0;
- long double __logbw =
- __compiler_rt_logbl(__compiler_rt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ fp_t __logbw = __compiler_rt_logbtf(
+ __compiler_rt_fmaxtf(crt_fabstf(__c), crt_fabstf(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
- __c = __compiler_rt_scalbnl(__c, -__ilogbw);
- __d = __compiler_rt_scalbnl(__d, -__ilogbw);
+ __c = __compiler_rt_scalbntf(__c, -__ilogbw);
+ __d = __compiler_rt_scalbntf(__d, -__ilogbw);
}
- long double __denom = __c * __c + __d * __d;
- Lcomplex z;
- COMPLEX_REAL(z) =
- __compiler_rt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
- COMPLEX_IMAGINARY(z) =
- __compiler_rt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ fp_t __denom = __c * __c + __d * __d;
+ Qcomplex z;
+ COMPLEXTF_REAL(z) =
+ __compiler_rt_scalbntf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEXTF_IMAGINARY(z) =
+ __compiler_rt_scalbntf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEXTF_REAL(z)) && crt_isnan(COMPLEXTF_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
- COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
- COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
+ COMPLEXTF_REAL(z) = crt_copysigntf(CRT_INFINITY, __c) * __a;
+ COMPLEXTF_IMAGINARY(z) = crt_copysigntf(CRT_INFINITY, __c) * __b;
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
- __a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
- __b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
- COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
- COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ __a = crt_copysigntf(crt_isinf(__a) ? (fp_t)1.0 : (fp_t)0.0, __a);
+ __b = crt_copysigntf(crt_isinf(__b) ? (fp_t)1.0 : (fp_t)0.0, __b);
+ COMPLEXTF_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEXTF_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
} else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
- __c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
- __d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
- COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
- COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
+ __c = crt_copysigntf(crt_isinf(__c) ? (fp_t)1.0 : (fp_t)0.0, __c);
+ __d = crt_copysigntf(crt_isinf(__d) ? (fp_t)1.0 : (fp_t)0.0, __d);
+ COMPLEXTF_REAL(z) = 0.0 * (__a * __c + __b * __d);
+ COMPLEXTF_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
}
}
return z;
}
+
+#endif
diff --git a/compiler-rt/lib/builtins/divxc3.c b/compiler-rt/lib/builtins/divxc3.c
index 97ffd2eac211..3423334f2006 100644
--- a/compiler-rt/lib/builtins/divxc3.c
+++ b/compiler-rt/lib/builtins/divxc3.c
@@ -17,16 +17,16 @@
// Returns: the quotient of (a + ib) / (c + id)
-COMPILER_RT_ABI Lcomplex __divxc3(long double __a, long double __b,
- long double __c, long double __d) {
+COMPILER_RT_ABI Lcomplex __divxc3(xf_float __a, xf_float __b, xf_float __c,
+ xf_float __d) {
int __ilogbw = 0;
- long double __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ xf_float __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbnl(__c, -__ilogbw);
__d = crt_scalbnl(__d, -__ilogbw);
}
- long double __denom = __c * __c + __d * __d;
+ xf_float __denom = __c * __c + __d * __d;
Lcomplex z;
COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) =
diff --git a/compiler-rt/lib/builtins/extenddftf2.c b/compiler-rt/lib/builtins/extenddftf2.c
index 835076be1f20..a61ef53147ab 100644
--- a/compiler-rt/lib/builtins/extenddftf2.c
+++ b/compiler-rt/lib/builtins/extenddftf2.c
@@ -14,8 +14,6 @@
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI fp_t __extenddftf2(double a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extenddftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/extendhftf2.c b/compiler-rt/lib/builtins/extendhftf2.c
index a2cb0f771ee9..7609db6f06e4 100644
--- a/compiler-rt/lib/builtins/extendhftf2.c
+++ b/compiler-rt/lib/builtins/extendhftf2.c
@@ -15,8 +15,6 @@
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI long double __extendhftf2(_Float16 a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extendhftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/extendsftf2.c b/compiler-rt/lib/builtins/extendsftf2.c
index 0739859bcbc1..4ab2982ce514 100644
--- a/compiler-rt/lib/builtins/extendsftf2.c
+++ b/compiler-rt/lib/builtins/extendsftf2.c
@@ -14,8 +14,6 @@
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI fp_t __extendsftf2(float a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extendsftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/extendxftf2.c b/compiler-rt/lib/builtins/extendxftf2.c
new file mode 100644
index 000000000000..c1d97b5cfa15
--- /dev/null
+++ b/compiler-rt/lib/builtins/extendxftf2.c
@@ -0,0 +1,24 @@
+//===-- lib/extendxftf2.c - long double -> quad conversion --------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits.
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_TF_MODE) && __LDBL_MANT_DIG__ == 64 && defined(__x86_64__)
+#define SRC_80
+#define DST_QUAD
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI tf_float __extendxftf2(xf_float a) {
+ return __extendXfYf2__(a);
+}
+
+#endif
diff --git a/compiler-rt/lib/builtins/fixunsxfdi.c b/compiler-rt/lib/builtins/fixunsxfdi.c
index c8a8061b2cf0..957c263aa3c5 100644
--- a/compiler-rt/lib/builtins/fixunsxfdi.c
+++ b/compiler-rt/lib/builtins/fixunsxfdi.c
@@ -32,8 +32,8 @@
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI du_int __fixunsxfdi(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI du_int __fixunsxfdi(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/compiler-rt/lib/builtins/fixunsxfsi.c b/compiler-rt/lib/builtins/fixunsxfsi.c
index 154abcbd35e7..a0abb82b7917 100644
--- a/compiler-rt/lib/builtins/fixunsxfsi.c
+++ b/compiler-rt/lib/builtins/fixunsxfsi.c
@@ -32,8 +32,8 @@
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI su_int __fixunsxfsi(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI su_int __fixunsxfsi(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/compiler-rt/lib/builtins/fixunsxfti.c b/compiler-rt/lib/builtins/fixunsxfti.c
index 508554e4f8f6..be3f75f04f7f 100644
--- a/compiler-rt/lib/builtins/fixunsxfti.c
+++ b/compiler-rt/lib/builtins/fixunsxfti.c
@@ -25,8 +25,8 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI tu_int __fixunsxfti(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI tu_int __fixunsxfti(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/compiler-rt/lib/builtins/fixxfdi.c b/compiler-rt/lib/builtins/fixxfdi.c
index 86cf3767b75d..35d7083f56b0 100644
--- a/compiler-rt/lib/builtins/fixxfdi.c
+++ b/compiler-rt/lib/builtins/fixxfdi.c
@@ -31,10 +31,10 @@
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI di_int __fixxfdi(long double a) {
+COMPILER_RT_ABI di_int __fixxfdi(xf_float a) {
const di_int di_max = (di_int)((~(du_int)0) / 2);
const di_int di_min = -di_max - 1;
- long_double_bits fb;
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0)
diff --git a/compiler-rt/lib/builtins/fixxfti.c b/compiler-rt/lib/builtins/fixxfti.c
index 90e03116e7ca..95038dfafd5d 100644
--- a/compiler-rt/lib/builtins/fixxfti.c
+++ b/compiler-rt/lib/builtins/fixxfti.c
@@ -24,10 +24,10 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI ti_int __fixxfti(long double a) {
+COMPILER_RT_ABI ti_int __fixxfti(xf_float a) {
const ti_int ti_max = (ti_int)((~(tu_int)0) / 2);
const ti_int ti_min = -ti_max - 1;
- long_double_bits fb;
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0)
diff --git a/compiler-rt/lib/builtins/floatdidf.c b/compiler-rt/lib/builtins/floatdidf.c
index c994aad3f079..6da81f7a05bf 100644
--- a/compiler-rt/lib/builtins/floatdidf.c
+++ b/compiler-rt/lib/builtins/floatdidf.c
@@ -45,53 +45,11 @@ COMPILER_RT_ABI double __floatdidf(di_int a) {
// flags to set, and we don't want to code-gen to an unknown soft-float
// implementation.
-COMPILER_RT_ABI double __floatdidf(di_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(di_int) * CHAR_BIT;
- const di_int s = a >> (N - 1);
- a = (du_int)(a ^ s) - s;
- int sd = N - __builtin_clzll(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = ((du_int)a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((du_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)s & 0x80000000) | // sign
- ((su_int)(e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+#define SRC_I64
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI double __floatdidf(di_int a) { return __floatXiYf__(a); }
#endif
#if defined(__ARM_EABI__)
diff --git a/compiler-rt/lib/builtins/floatdisf.c b/compiler-rt/lib/builtins/floatdisf.c
index 0b62ed8689bc..0bb88c5c518e 100644
--- a/compiler-rt/lib/builtins/floatdisf.c
+++ b/compiler-rt/lib/builtins/floatdisf.c
@@ -19,52 +19,11 @@
#include "int_lib.h"
-COMPILER_RT_ABI float __floatdisf(di_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(di_int) * CHAR_BIT;
- const di_int s = a >> (N - 1);
- a = (du_int)(a ^ s) - s;
- int sd = N - __builtin_clzll(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = ((du_int)a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((du_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((su_int)s & 0x80000000) | // sign
- ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+#define SRC_I64
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI float __floatdisf(di_int a) { return __floatXiYf__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
diff --git a/compiler-rt/lib/builtins/floatdixf.c b/compiler-rt/lib/builtins/floatdixf.c
index ad5deb2d4bf5..3d9e664e4814 100644
--- a/compiler-rt/lib/builtins/floatdixf.c
+++ b/compiler-rt/lib/builtins/floatdixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatdixf(di_int a) {
+COMPILER_RT_ABI xf_float __floatdixf(di_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(di_int) * CHAR_BIT;
@@ -31,7 +31,7 @@ COMPILER_RT_ABI long double __floatdixf(di_int a) {
a = (a ^ s) - s;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz; // exponent
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = ((su_int)s & 0x00008000) | // sign
(e + 16383); // exponent
fb.u.low.all = a << clz; // mantissa
diff --git a/compiler-rt/lib/builtins/floatsidf.c b/compiler-rt/lib/builtins/floatsidf.c
index 28cf32f6388b..a23b31e7bc7e 100644
--- a/compiler-rt/lib/builtins/floatsidf.c
+++ b/compiler-rt/lib/builtins/floatsidf.c
@@ -27,20 +27,21 @@ COMPILER_RT_ABI fp_t __floatsidf(si_int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
+ su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- a = -a;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - clzsi(a);
+ const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field and clear the implicit bit. Extra
// cast to unsigned int is necessary to get the correct behavior for
// the input INT_MIN.
const int shift = significandBits - exponent;
- result = (rep_t)(su_int)a << shift ^ implicitBit;
+ result = (rep_t)aAbs << shift ^ implicitBit;
// Insert the exponent
result += (rep_t)(exponent + exponentBias) << significandBits;
diff --git a/compiler-rt/lib/builtins/floatsisf.c b/compiler-rt/lib/builtins/floatsisf.c
index c01f81e41e8e..5ede30b703e0 100644
--- a/compiler-rt/lib/builtins/floatsisf.c
+++ b/compiler-rt/lib/builtins/floatsisf.c
@@ -27,23 +27,24 @@ COMPILER_RT_ABI fp_t __floatsisf(si_int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
+ su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- a = -a;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - clzsi(a);
+ const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field, rounding if it is a right-shift
if (exponent <= significandBits) {
const int shift = significandBits - exponent;
- result = (rep_t)a << shift ^ implicitBit;
+ result = (rep_t)aAbs << shift ^ implicitBit;
} else {
const int shift = exponent - significandBits;
- result = (rep_t)a >> shift ^ implicitBit;
- rep_t round = (rep_t)a << (typeWidth - shift);
+ result = (rep_t)aAbs >> shift ^ implicitBit;
+ rep_t round = (rep_t)aAbs << (typeWidth - shift);
if (round > signBit)
result++;
if (round == signBit)
diff --git a/compiler-rt/lib/builtins/floatsitf.c b/compiler-rt/lib/builtins/floatsitf.c
index 4d5b52f4ed91..314a8a7bbdfe 100644
--- a/compiler-rt/lib/builtins/floatsitf.c
+++ b/compiler-rt/lib/builtins/floatsitf.c
@@ -29,7 +29,7 @@ COMPILER_RT_ABI fp_t __floatsitf(si_int a) {
su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- aAbs = ~(su_int)a + (su_int)1U;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
diff --git a/compiler-rt/lib/builtins/floattidf.c b/compiler-rt/lib/builtins/floattidf.c
index 7bfe87f53aa0..ef8fe180e2f5 100644
--- a/compiler-rt/lib/builtins/floattidf.c
+++ b/compiler-rt/lib/builtins/floattidf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_I128
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a double, rounding toward even.
// Assumption: double is a IEEE 64 bit floating point type
@@ -22,52 +26,6 @@
// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
// mmmm
-COMPILER_RT_ABI double __floattidf(ti_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)s & 0x80000000) | // sign
- ((e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+COMPILER_RT_ABI double __floattidf(ti_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/compiler-rt/lib/builtins/floattisf.c b/compiler-rt/lib/builtins/floattisf.c
index 717cb361f075..77589902f544 100644
--- a/compiler-rt/lib/builtins/floattisf.c
+++ b/compiler-rt/lib/builtins/floattisf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_I128
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a float, rounding toward even.
// Assumption: float is a IEEE 32 bit floating point type
@@ -21,51 +25,6 @@
// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
-COMPILER_RT_ABI float __floattisf(ti_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((tu_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((su_int)s & 0x80000000) | // sign
- ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+COMPILER_RT_ABI float __floattisf(ti_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/compiler-rt/lib/builtins/floattitf.c b/compiler-rt/lib/builtins/floattitf.c
index fff0755c3bb4..5dffe22fdb4e 100644
--- a/compiler-rt/lib/builtins/floattitf.c
+++ b/compiler-rt/lib/builtins/floattitf.c
@@ -16,6 +16,11 @@
#include "fp_lib.h"
#include "int_lib.h"
+#if defined(CRT_HAS_TF_MODE)
+#define SRC_I128
+#define DST_QUAD
+#include "int_to_fp_impl.inc"
+
// Returns: convert a ti_int to a fp_t, rounding toward even.
// Assumption: fp_t is a IEEE 128 bit floating point type
@@ -25,54 +30,6 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_TF_MODE)
-COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > TF_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit LDBL_MANT_DIG-1 bits to the right of 1
- // Q = bit LDBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case TF_MANT_DIG + 1:
- a <<= 1;
- break;
- case TF_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (TF_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + TF_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << TF_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to LDBL_MANT_DIG bits
- } else {
- a <<= (TF_MANT_DIG - sd);
- // a is now rounded to LDBL_MANT_DIG bits
- }
-
- long_double_bits fb;
- fb.u.high.all = (s & 0x8000000000000000LL) // sign
- | (du_int)(e + 16383) << 48 // exponent
- | ((a >> 64) & 0x0000ffffffffffffLL); // significand
- fb.u.low.all = (du_int)(a);
- return fb.f;
-}
+COMPILER_RT_ABI fp_t __floattitf(ti_int a) { return __floatXiYf__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/floattixf.c b/compiler-rt/lib/builtins/floattixf.c
index 23796f1bb56f..c80bc714459c 100644
--- a/compiler-rt/lib/builtins/floattixf.c
+++ b/compiler-rt/lib/builtins/floattixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floattixf(ti_int a) {
+COMPILER_RT_ABI xf_float __floattixf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
@@ -63,7 +63,7 @@ COMPILER_RT_ABI long double __floattixf(ti_int a) {
a <<= (LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = ((su_int)s & 0x8000) | // sign
(e + 16383); // exponent
fb.u.low.all = (du_int)a; // mantissa
diff --git a/compiler-rt/lib/builtins/floatundidf.c b/compiler-rt/lib/builtins/floatundidf.c
index 2ec802cdc134..9743e96ec679 100644
--- a/compiler-rt/lib/builtins/floatundidf.c
+++ b/compiler-rt/lib/builtins/floatundidf.c
@@ -51,50 +51,11 @@ COMPILER_RT_ABI double __floatundidf(du_int a) {
// flags to set, and we don't want to code-gen to an unknown soft-float
// implementation.
-COMPILER_RT_ABI double __floatundidf(du_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(du_int) * CHAR_BIT;
- int sd = N - __builtin_clzll(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((du_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)(e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+#define SRC_U64
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI double __floatundidf(du_int a) { return __floatXiYf__(a); }
#endif
#if defined(__ARM_EABI__)
diff --git a/compiler-rt/lib/builtins/floatundisf.c b/compiler-rt/lib/builtins/floatundisf.c
index 2a4157dc5e4b..d4b418efd406 100644
--- a/compiler-rt/lib/builtins/floatundisf.c
+++ b/compiler-rt/lib/builtins/floatundisf.c
@@ -19,49 +19,11 @@
#include "int_lib.h"
-COMPILER_RT_ABI float __floatundisf(du_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(du_int) * CHAR_BIT;
- int sd = N - __builtin_clzll(a); // number of significant digits
- si_int e = sd - 1; // 8 exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((du_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+#define SRC_U64
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI float __floatundisf(du_int a) { return __floatXiYf__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
diff --git a/compiler-rt/lib/builtins/floatundixf.c b/compiler-rt/lib/builtins/floatundixf.c
index 85264adac197..3e3c6556d65b 100644
--- a/compiler-rt/lib/builtins/floatundixf.c
+++ b/compiler-rt/lib/builtins/floatundixf.c
@@ -22,13 +22,13 @@
// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatundixf(du_int a) {
+COMPILER_RT_ABI xf_float __floatundixf(du_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(du_int) * CHAR_BIT;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz; // exponent
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = (e + 16383); // exponent
fb.u.low.all = a << clz; // mantissa
return fb.f;
diff --git a/compiler-rt/lib/builtins/floatuntidf.c b/compiler-rt/lib/builtins/floatuntidf.c
index 4dfca8e49309..9abeacc30c3c 100644
--- a/compiler-rt/lib/builtins/floatuntidf.c
+++ b/compiler-rt/lib/builtins/floatuntidf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_U128
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a double, rounding toward even.
// Assumption: double is a IEEE 64 bit floating point type
@@ -22,49 +26,6 @@
// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
// mmmm
-COMPILER_RT_ABI double __floatuntidf(tu_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+COMPILER_RT_ABI double __floatuntidf(tu_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/compiler-rt/lib/builtins/floatuntisf.c b/compiler-rt/lib/builtins/floatuntisf.c
index a53659cd1fca..997c1569acd6 100644
--- a/compiler-rt/lib/builtins/floatuntisf.c
+++ b/compiler-rt/lib/builtins/floatuntisf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_U128
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a float, rounding toward even.
// Assumption: float is a IEEE 32 bit floating point type
@@ -21,48 +25,6 @@
// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
-COMPILER_RT_ABI float __floatuntisf(tu_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((tu_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+COMPILER_RT_ABI float __floatuntisf(tu_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/compiler-rt/lib/builtins/floatuntitf.c b/compiler-rt/lib/builtins/floatuntitf.c
index 33a81b34eeb1..1c5998a40b9f 100644
--- a/compiler-rt/lib/builtins/floatuntitf.c
+++ b/compiler-rt/lib/builtins/floatuntitf.c
@@ -16,6 +16,11 @@
#include "fp_lib.h"
#include "int_lib.h"
+#if defined(CRT_HAS_TF_MODE)
+#define SRC_U128
+#define DST_QUAD
+#include "int_to_fp_impl.inc"
+
// Returns: convert a tu_int to a fp_t, rounding toward even.
// Assumption: fp_t is a IEEE 128 bit floating point type
@@ -25,51 +30,6 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_TF_MODE)
-COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > TF_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit TF_MANT_DIG-1 bits to the right of 1
- // Q = bit TF_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case TF_MANT_DIG + 1:
- a <<= 1;
- break;
- case TF_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (TF_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + TF_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to TF_MANT_DIG or TF_MANT_DIG+1 bits
- if (a & ((tu_int)1 << TF_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to TF_MANT_DIG bits
- } else {
- a <<= (TF_MANT_DIG - sd);
- // a is now rounded to TF_MANT_DIG bits
- }
-
- long_double_bits fb;
- fb.u.high.all = (du_int)(e + 16383) << 48 // exponent
- | ((a >> 64) & 0x0000ffffffffffffLL); // significand
- fb.u.low.all = (du_int)(a);
- return fb.f;
-}
+COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) { return __floatXiYf__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/floatuntixf.c b/compiler-rt/lib/builtins/floatuntixf.c
index efd8a27a0875..4c53775229ea 100644
--- a/compiler-rt/lib/builtins/floatuntixf.c
+++ b/compiler-rt/lib/builtins/floatuntixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
+COMPILER_RT_ABI xf_float __floatuntixf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
@@ -61,7 +61,7 @@ COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
a <<= (LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = (e + 16383); // exponent
fb.u.low.all = (du_int)a; // mantissa
return fb.f;
diff --git a/compiler-rt/lib/builtins/fp_extend.h b/compiler-rt/lib/builtins/fp_extend.h
index eee4722bf90e..95ea2a7ac4b2 100644
--- a/compiler-rt/lib/builtins/fp_extend.h
+++ b/compiler-rt/lib/builtins/fp_extend.h
@@ -20,15 +20,24 @@
typedef float src_t;
typedef uint32_t src_rep_t;
#define SRC_REP_C UINT32_C
-static const int srcSigBits = 23;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 23;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 8;
#define src_rep_t_clz clzsi
#elif defined SRC_DOUBLE
typedef double src_t;
typedef uint64_t src_rep_t;
#define SRC_REP_C UINT64_C
-static const int srcSigBits = 52;
-static __inline int src_rep_t_clz(src_rep_t a) {
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 52;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 11;
+
+static inline int src_rep_t_clz_impl(src_rep_t a) {
#if defined __LP64__
return __builtin_clzl(a);
#else
@@ -38,6 +47,19 @@ static __inline int src_rep_t_clz(src_rep_t a) {
return 32 + clzsi(a & REP_C(0xffffffff));
#endif
}
+#define src_rep_t_clz src_rep_t_clz_impl
+
+#elif defined SRC_80
+typedef xf_float src_t;
+typedef __uint128_t src_rep_t;
+#define SRC_REP_C (__uint128_t)
+// sign bit, exponent and significand occupy the lower 80 bits.
+static const int srcBits = 80;
+static const int srcSigFracBits = 63;
+// -1 accounts for the sign bit.
+// -1 accounts for the explicitly stored integer bit.
+// srcBits - srcSigFracBits - 1 - 1
+static const int srcExpBits = 15;
#elif defined SRC_HALF
#ifdef COMPILER_RT_HAS_FLOAT16
@@ -47,8 +69,17 @@ typedef uint16_t src_t;
#endif
typedef uint16_t src_rep_t;
#define SRC_REP_C UINT16_C
-static const int srcSigBits = 10;
-#define src_rep_t_clz __builtin_clz
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 10;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 5;
+
+static inline int src_rep_t_clz_impl(src_rep_t a) {
+ return __builtin_clz(a) - 16;
+}
+
+#define src_rep_t_clz src_rep_t_clz_impl
#else
#error Source should be half, single, or double precision!
@@ -58,28 +89,72 @@ static const int srcSigBits = 10;
typedef float dst_t;
typedef uint32_t dst_rep_t;
#define DST_REP_C UINT32_C
-static const int dstSigBits = 23;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 23;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#elif defined DST_DOUBLE
typedef double dst_t;
typedef uint64_t dst_rep_t;
#define DST_REP_C UINT64_C
-static const int dstSigBits = 52;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 52;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 11;
#elif defined DST_QUAD
-typedef long double dst_t;
+typedef tf_float dst_t;
typedef __uint128_t dst_rep_t;
#define DST_REP_C (__uint128_t)
-static const int dstSigBits = 112;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 112;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 15;
#else
#error Destination should be single, double, or quad precision!
#endif // end destination precision
-// End of specialization parameters. Two helper routines for conversion to and
-// from the representation of floating-point data as integer values follow.
+// End of specialization parameters.
+
+// TODO: These helper routines should be placed into fp_lib.h
+// Currently they depend on macros/constants defined above.
+
+static inline src_rep_t extract_sign_from_src(src_rep_t x) {
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcBits - 1);
+ return (x & srcSignMask) >> (srcBits - 1);
+}
+
+static inline src_rep_t extract_exp_from_src(src_rep_t x) {
+ const int srcSigBits = srcBits - 1 - srcExpBits;
+ const src_rep_t srcExpMask = ((SRC_REP_C(1) << srcExpBits) - 1) << srcSigBits;
+ return (x & srcExpMask) >> srcSigBits;
+}
+
+static inline src_rep_t extract_sig_frac_from_src(src_rep_t x) {
+ const src_rep_t srcSigFracMask = (SRC_REP_C(1) << srcSigFracBits) - 1;
+ return x & srcSigFracMask;
+}
+
+#ifdef src_rep_t_clz
+static inline int clz_in_sig_frac(src_rep_t sigFrac) {
+ const int skip = 1 + srcExpBits;
+ return src_rep_t_clz(sigFrac) - skip;
+}
+#endif
+
+static inline dst_rep_t construct_dst_rep(dst_rep_t sign, dst_rep_t exp, dst_rep_t sigFrac) {
+ return (sign << (dstBits - 1)) | (exp << (dstBits - 1 - dstExpBits)) | sigFrac;
+}
+
+// Two helper routines for conversion to and from the representation of
+// floating-point data as integer values follow.
-static __inline src_rep_t srcToRep(src_t x) {
+static inline src_rep_t srcToRep(src_t x) {
const union {
src_t f;
src_rep_t i;
@@ -87,7 +162,7 @@ static __inline src_rep_t srcToRep(src_t x) {
return rep.i;
}
-static __inline dst_t dstFromRep(dst_rep_t x) {
+static inline dst_t dstFromRep(dst_rep_t x) {
const union {
dst_t f;
dst_rep_t i;
diff --git a/compiler-rt/lib/builtins/fp_extend_impl.inc b/compiler-rt/lib/builtins/fp_extend_impl.inc
index d1c9c02a00c5..e16b55d150d2 100644
--- a/compiler-rt/lib/builtins/fp_extend_impl.inc
+++ b/compiler-rt/lib/builtins/fp_extend_impl.inc
@@ -37,71 +37,72 @@
#include "fp_extend.h"
+// The source type may use a usual IEEE-754 interchange format or Intel 80-bit
+// format. In particular, for the source type srcSigFracBits may be not equal to
+// srcSigBits. The destination type is assumed to be one of IEEE-754 standard
+// types.
static __inline dst_t __extendXfYf2__(src_t a) {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const int srcBits = sizeof(src_t) * CHAR_BIT;
- const int srcExpBits = srcBits - srcSigBits - 1;
const int srcInfExp = (1 << srcExpBits) - 1;
const int srcExpBias = srcInfExp >> 1;
- const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
- const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
- const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
- const src_rep_t srcAbsMask = srcSignMask - 1;
- const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
- const src_rep_t srcNaNCode = srcQNaN - 1;
-
- const int dstBits = sizeof(dst_t) * CHAR_BIT;
- const int dstExpBits = dstBits - dstSigBits - 1;
const int dstInfExp = (1 << dstExpBits) - 1;
const int dstExpBias = dstInfExp >> 1;
- const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
-
// Break a into a sign and representation of the absolute value.
const src_rep_t aRep = srcToRep(a);
- const src_rep_t aAbs = aRep & srcAbsMask;
- const src_rep_t sign = aRep & srcSignMask;
- dst_rep_t absResult;
+ const src_rep_t srcSign = extract_sign_from_src(aRep);
+ const src_rep_t srcExp = extract_exp_from_src(aRep);
+ const src_rep_t srcSigFrac = extract_sig_frac_from_src(aRep);
+
+ dst_rep_t dstSign = srcSign;
+ dst_rep_t dstExp;
+ dst_rep_t dstSigFrac;
- // If sizeof(src_rep_t) < sizeof(int), the subtraction result is promoted
- // to (signed) int. To avoid that, explicitly cast to src_rep_t.
- if ((src_rep_t)(aAbs - srcMinNormal) < srcInfinity - srcMinNormal) {
+ if (srcExp >= 1 && srcExp < srcInfExp) {
// a is a normal number.
- // Extend to the destination type by shifting the significand and
- // exponent into the proper position and rebiasing the exponent.
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
- absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
+ dstExp = (dst_rep_t)srcExp + (dst_rep_t)(dstExpBias - srcExpBias);
+ dstSigFrac = (dst_rep_t)srcSigFrac << (dstSigFracBits - srcSigFracBits);
}
- else if (aAbs >= srcInfinity) {
+ else if (srcExp == srcInfExp) {
// a is NaN or infinity.
- // Conjure the result by beginning with infinity, then setting the qNaN
- // bit (if needed) and right-aligning the rest of the trailing NaN
- // payload field.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
- absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
- absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ dstExp = dstInfExp;
+ dstSigFrac = (dst_rep_t)srcSigFrac << (dstSigFracBits - srcSigFracBits);
}
- else if (aAbs) {
+ else if (srcSigFrac) {
// a is denormal.
- // renormalize the significand and clear the leading bit, then insert
- // the correct adjusted exponent in the destination type.
- const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
- absResult ^= dstMinNormal;
- const int resultExponent = dstExpBias - srcExpBias - scale + 1;
- absResult |= (dst_rep_t)resultExponent << dstSigBits;
+ if (srcExpBits == dstExpBits) {
+ // The exponent fields are identical and this is a denormal number, so all
+ // the non-significand bits are zero. In particular, this branch is always
+ // taken when we extend a denormal F80 to F128.
+ dstExp = 0;
+ dstSigFrac = ((dst_rep_t)srcSigFrac) << (dstSigFracBits - srcSigFracBits);
+ } else {
+#ifndef src_rep_t_clz
+ // If src_rep_t_clz is not defined this branch must be unreachable.
+ __builtin_unreachable();
+#else
+ // Renormalize the significand and clear the leading bit.
+ // For F80 -> F128 this codepath is unused.
+ const int scale = clz_in_sig_frac(srcSigFrac) + 1;
+ dstExp = dstExpBias - srcExpBias - scale + 1;
+ dstSigFrac = (dst_rep_t)srcSigFrac
+ << (dstSigFracBits - srcSigFracBits + scale);
+ const dst_rep_t dstMinNormal = DST_REP_C(1) << (dstBits - 1 - dstExpBits);
+ dstSigFrac ^= dstMinNormal;
+#endif
+ }
}
else {
// a is zero.
- absResult = 0;
+ dstExp = 0;
+ dstSigFrac = 0;
}
- // Apply the signbit to the absolute value.
- const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
+ const dst_rep_t result = construct_dst_rep(dstSign, dstExp, dstSigFrac);
return dstFromRep(result);
}
diff --git a/compiler-rt/lib/builtins/fp_fixint_impl.inc b/compiler-rt/lib/builtins/fp_fixint_impl.inc
index 2196d712f05f..3556bad9990b 100644
--- a/compiler-rt/lib/builtins/fp_fixint_impl.inc
+++ b/compiler-rt/lib/builtins/fp_fixint_impl.inc
@@ -36,5 +36,5 @@ static __inline fixint_t __fixint(fp_t a) {
if (exponent < significandBits)
return sign * (significand >> (significandBits - exponent));
else
- return sign * ((fixint_t)significand << (exponent - significandBits));
+ return sign * ((fixuint_t)significand << (exponent - significandBits));
}
diff --git a/compiler-rt/lib/builtins/fp_lib.h b/compiler-rt/lib/builtins/fp_lib.h
index 58eb45fcc729..43bbdd5f8736 100644
--- a/compiler-rt/lib/builtins/fp_lib.h
+++ b/compiler-rt/lib/builtins/fp_lib.h
@@ -105,18 +105,11 @@ static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
#elif defined QUAD_PRECISION
-#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
-// TODO: Availability of the *tf functions should not depend on long double
-// being IEEE 128, but instead on being able to use a 128-bit floating-point
-// type, which includes __float128.
-// Right now this (incorrectly) stops the builtins from being used for x86.
-#define CRT_LDBL_128BIT
-#define CRT_HAS_TF_MODE
-#define TF_C(c) c##L
+#if defined(CRT_HAS_TF_MODE)
typedef uint64_t half_rep_t;
typedef __uint128_t rep_t;
typedef __int128_t srep_t;
-typedef long double fp_t;
+typedef tf_float fp_t;
#define HALF_REP_C UINT64_C
#define REP_C (__uint128_t)
// Note: Since there is no explicit way to tell compiler the constant is a
@@ -207,13 +200,13 @@ static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
#undef Word_HiMask
#undef Word_LoMask
#undef Word_FullMask
-#endif // __LDBL_MANT_DIG__ == 113 && __SIZEOF_INT128__
+#endif // defined(CRT_HAS_TF_MODE)
#else
#error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
#endif
#if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) || \
- defined(CRT_LDBL_128BIT)
+ (defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE))
#define typeWidth (sizeof(rep_t) * CHAR_BIT)
#define exponentBits (typeWidth - significandBits - 1)
#define maxExponent ((1 << exponentBits) - 1)
@@ -393,31 +386,40 @@ static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
#endif
}
-#elif defined(QUAD_PRECISION)
-
-#if defined(CRT_LDBL_128BIT)
-static __inline fp_t __compiler_rt_logbl(fp_t x) {
+#elif defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE)
+// The generic implementation only works for ieee754 floating point. For other
+// floating point types, continue to rely on the libm implementation for now.
+#if defined(CRT_HAS_IEEE_TF)
+static __inline tf_float __compiler_rt_logbtf(tf_float x) {
return __compiler_rt_logbX(x);
}
-static __inline fp_t __compiler_rt_scalbnl(fp_t x, int y) {
+static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
return __compiler_rt_scalbnX(x, y);
}
-static __inline fp_t __compiler_rt_fmaxl(fp_t x, fp_t y) {
+static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
return __compiler_rt_fmaxX(x, y);
}
-#else
-// The generic implementation only works for ieee754 floating point. For other
-// floating point types, continue to rely on the libm implementation for now.
-static __inline long double __compiler_rt_logbl(long double x) {
+#define __compiler_rt_logbl __compiler_rt_logbtf
+#define __compiler_rt_scalbnl __compiler_rt_scalbntf
+#define __compiler_rt_fmaxl __compiler_rt_fmaxtf
+#define crt_fabstf crt_fabsf128
+#define crt_copysigntf crt_copysignf128
+#elif defined(CRT_LDBL_128BIT)
+static __inline tf_float __compiler_rt_logbtf(tf_float x) {
return crt_logbl(x);
}
-static __inline long double __compiler_rt_scalbnl(long double x, int y) {
+static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
return crt_scalbnl(x, y);
}
-static __inline long double __compiler_rt_fmaxl(long double x, long double y) {
+static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
return crt_fmaxl(x, y);
}
-#endif // CRT_LDBL_128BIT
+#define __compiler_rt_logbl crt_logbl
+#define __compiler_rt_scalbnl crt_scalbnl
+#define __compiler_rt_fmaxl crt_fmaxl
+#else
+#error Unsupported TF mode type
+#endif
#endif // *_PRECISION
diff --git a/compiler-rt/lib/builtins/fp_trunc.h b/compiler-rt/lib/builtins/fp_trunc.h
index 91f614528ab3..141fe63e132d 100644
--- a/compiler-rt/lib/builtins/fp_trunc.h
+++ b/compiler-rt/lib/builtins/fp_trunc.h
@@ -19,19 +19,31 @@
typedef float src_t;
typedef uint32_t src_rep_t;
#define SRC_REP_C UINT32_C
-static const int srcSigBits = 23;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 23;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 8;
#elif defined SRC_DOUBLE
typedef double src_t;
typedef uint64_t src_rep_t;
#define SRC_REP_C UINT64_C
-static const int srcSigBits = 52;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 52;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 11;
#elif defined SRC_QUAD
-typedef long double src_t;
+typedef tf_float src_t;
typedef __uint128_t src_rep_t;
#define SRC_REP_C (__uint128_t)
-static const int srcSigBits = 112;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 112;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 15;
#else
#error Source should be double precision or quad precision!
@@ -41,13 +53,32 @@ static const int srcSigBits = 112;
typedef double dst_t;
typedef uint64_t dst_rep_t;
#define DST_REP_C UINT64_C
-static const int dstSigBits = 52;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 52;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 11;
+
+#elif defined DST_80
+typedef xf_float dst_t;
+typedef __uint128_t dst_rep_t;
+#define DST_REP_C (__uint128_t)
+static const int dstBits = 80;
+static const int dstSigFracBits = 63;
+// -1 accounts for the sign bit.
+// -1 accounts for the explicitly stored integer bit.
+// dstBits - dstSigFracBits - 1 - 1
+static const int dstExpBits = 15;
#elif defined DST_SINGLE
typedef float dst_t;
typedef uint32_t dst_rep_t;
#define DST_REP_C UINT32_C
-static const int dstSigBits = 23;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 23;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#elif defined DST_HALF
#ifdef COMPILER_RT_HAS_FLOAT16
@@ -57,22 +88,58 @@ typedef uint16_t dst_t;
#endif
typedef uint16_t dst_rep_t;
#define DST_REP_C UINT16_C
-static const int dstSigBits = 10;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 10;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 5;
#elif defined DST_BFLOAT
typedef __bf16 dst_t;
typedef uint16_t dst_rep_t;
#define DST_REP_C UINT16_C
-static const int dstSigBits = 7;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 7;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#else
#error Destination should be single precision or double precision!
#endif // end destination precision
+// TODO: These helper routines should be placed into fp_lib.h
+// Currently they depend on macros/constants defined above.
+
+static inline src_rep_t extract_sign_from_src(src_rep_t x) {
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcBits - 1);
+ return (x & srcSignMask) >> (srcBits - 1);
+}
+
+static inline src_rep_t extract_exp_from_src(src_rep_t x) {
+ const int srcSigBits = srcBits - 1 - srcExpBits;
+ const src_rep_t srcExpMask = ((SRC_REP_C(1) << srcExpBits) - 1) << srcSigBits;
+ return (x & srcExpMask) >> srcSigBits;
+}
+
+static inline src_rep_t extract_sig_frac_from_src(src_rep_t x) {
+ const src_rep_t srcSigFracMask = (SRC_REP_C(1) << srcSigFracBits) - 1;
+ return x & srcSigFracMask;
+}
+
+static inline dst_rep_t construct_dst_rep(dst_rep_t sign, dst_rep_t exp, dst_rep_t sigFrac) {
+ dst_rep_t result = (sign << (dstBits - 1)) | (exp << (dstBits - 1 - dstExpBits)) | sigFrac;
+ // Set the explicit integer bit in F80 if present.
+ if (dstBits == 80 && exp) {
+ result |= (DST_REP_C(1) << dstSigFracBits);
+ }
+ return result;
+}
+
// End of specialization parameters. Two helper routines for conversion to and
// from the representation of floating-point data as integer values follow.
-static __inline src_rep_t srcToRep(src_t x) {
+static inline src_rep_t srcToRep(src_t x) {
const union {
src_t f;
src_rep_t i;
@@ -80,7 +147,7 @@ static __inline src_rep_t srcToRep(src_t x) {
return rep.i;
}
-static __inline dst_t dstFromRep(dst_rep_t x) {
+static inline dst_t dstFromRep(dst_rep_t x) {
const union {
dst_t f;
dst_rep_t i;
diff --git a/compiler-rt/lib/builtins/fp_trunc_impl.inc b/compiler-rt/lib/builtins/fp_trunc_impl.inc
index 6662be7607e7..f68492495697 100644
--- a/compiler-rt/lib/builtins/fp_trunc_impl.inc
+++ b/compiler-rt/lib/builtins/fp_trunc_impl.inc
@@ -38,95 +38,118 @@
#include "fp_trunc.h"
+// The destination type may use a usual IEEE-754 interchange format or Intel
+// 80-bit format. In particular, for the destination type dstSigFracBits may be
+// not equal to dstSigBits. The source type is assumed to be one of IEEE-754
+// standard types.
static __inline dst_t __truncXfYf2__(src_t a) {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const int srcBits = sizeof(src_t) * CHAR_BIT;
- const int srcExpBits = srcBits - srcSigBits - 1;
const int srcInfExp = (1 << srcExpBits) - 1;
const int srcExpBias = srcInfExp >> 1;
- const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
- const src_rep_t srcSignificandMask = srcMinNormal - 1;
- const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
- const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
- const src_rep_t srcAbsMask = srcSignMask - 1;
- const src_rep_t roundMask = (SRC_REP_C(1) << (srcSigBits - dstSigBits)) - 1;
- const src_rep_t halfway = SRC_REP_C(1) << (srcSigBits - dstSigBits - 1);
- const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
+ const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigFracBits;
+ const src_rep_t roundMask =
+ (SRC_REP_C(1) << (srcSigFracBits - dstSigFracBits)) - 1;
+ const src_rep_t halfway = SRC_REP_C(1)
+ << (srcSigFracBits - dstSigFracBits - 1);
+ const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigFracBits - 1);
const src_rep_t srcNaNCode = srcQNaN - 1;
- const int dstBits = sizeof(dst_t) * CHAR_BIT;
- const int dstExpBits = dstBits - dstSigBits - 1;
const int dstInfExp = (1 << dstExpBits) - 1;
const int dstExpBias = dstInfExp >> 1;
-
- const int underflowExponent = srcExpBias + 1 - dstExpBias;
const int overflowExponent = srcExpBias + dstInfExp - dstExpBias;
- const src_rep_t underflow = (src_rep_t)underflowExponent << srcSigBits;
- const src_rep_t overflow = (src_rep_t)overflowExponent << srcSigBits;
- const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigBits - 1);
+ const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigFracBits - 1);
const dst_rep_t dstNaNCode = dstQNaN - 1;
- // Break a into a sign and representation of the absolute value.
const src_rep_t aRep = srcToRep(a);
- const src_rep_t aAbs = aRep & srcAbsMask;
- const src_rep_t sign = aRep & srcSignMask;
- dst_rep_t absResult;
+ const src_rep_t srcSign = extract_sign_from_src(aRep);
+ const src_rep_t srcExp = extract_exp_from_src(aRep);
+ const src_rep_t srcSigFrac = extract_sig_frac_from_src(aRep);
+
+ dst_rep_t dstSign = srcSign;
+ dst_rep_t dstExp;
+ dst_rep_t dstSigFrac;
+
+ // Same size exponents and a's significand tail is 0.
+ // The significand can be truncated and the exponent can be copied over.
+ const int sigFracTailBits = srcSigFracBits - dstSigFracBits;
+ if (srcExpBits == dstExpBits &&
+ ((aRep >> sigFracTailBits) << sigFracTailBits) == aRep) {
+ dstExp = srcExp;
+ dstSigFrac = (dst_rep_t)(srcSigFrac >> sigFracTailBits);
+ return dstFromRep(construct_dst_rep(dstSign, dstExp, dstSigFrac));
+ }
- if (aAbs - underflow < aAbs - overflow) {
+ const int dstExpCandidate = ((int)srcExp - srcExpBias) + dstExpBias;
+ if (dstExpCandidate >= 1 && dstExpCandidate < dstInfExp) {
// The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
+ // destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
- absResult = aAbs >> (srcSigBits - dstSigBits);
- absResult -= (dst_rep_t)(srcExpBias - dstExpBias) << dstSigBits;
+ dstExp = dstExpCandidate;
+ dstSigFrac = (dst_rep_t)(srcSigFrac >> sigFracTailBits);
- const src_rep_t roundBits = aAbs & roundMask;
+ const src_rep_t roundBits = srcSigFrac & roundMask;
// Round to nearest.
if (roundBits > halfway)
- absResult++;
+ dstSigFrac++;
// Tie to even.
else if (roundBits == halfway)
- absResult += absResult & 1;
- } else if (aAbs > srcInfinity) {
+ dstSigFrac += dstSigFrac & 1;
+
+ // Rounding has changed the exponent.
+ if (dstSigFrac >= (DST_REP_C(1) << dstSigFracBits)) {
+ dstExp += 1;
+ dstSigFrac ^= (DST_REP_C(1) << dstSigFracBits);
+ }
+ } else if (srcExp == srcInfExp && srcSigFrac) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
- absResult |= dstQNaN;
- absResult |=
- ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode;
- } else if (aAbs >= overflow) {
- // a overflows to infinity.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ dstExp = dstInfExp;
+ dstSigFrac = dstQNaN;
+ dstSigFrac |= ((srcSigFrac & srcNaNCode) >> sigFracTailBits) & dstNaNCode;
+ } else if ((int)srcExp >= overflowExponent) {
+ dstExp = dstInfExp;
+ dstSigFrac = 0;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
- const int aExp = aAbs >> srcSigBits;
- const int shift = srcExpBias - dstExpBias - aExp + 1;
+ src_rep_t significand = srcSigFrac;
+ int shift = srcExpBias - dstExpBias - srcExp;
- const src_rep_t significand = (aRep & srcSignificandMask) | srcMinNormal;
+ if (srcExp) {
+ // Set the implicit integer bit if the source is a normal number.
+ significand |= srcMinNormal;
+ shift += 1;
+ }
// Right shift by the denormalization amount with sticky.
- if (shift > srcSigBits) {
- absResult = 0;
+ if (shift > srcSigFracBits) {
+ dstExp = 0;
+ dstSigFrac = 0;
} else {
- const bool sticky = (significand << (srcBits - shift)) != 0;
+ dstExp = 0;
+ const bool sticky = shift && ((significand << (srcBits - shift)) != 0);
src_rep_t denormalizedSignificand = significand >> shift | sticky;
- absResult = denormalizedSignificand >> (srcSigBits - dstSigBits);
+ dstSigFrac = denormalizedSignificand >> sigFracTailBits;
const src_rep_t roundBits = denormalizedSignificand & roundMask;
// Round to nearest
if (roundBits > halfway)
- absResult++;
+ dstSigFrac++;
// Ties to even
else if (roundBits == halfway)
- absResult += absResult & 1;
+ dstSigFrac += dstSigFrac & 1;
+
+ // Rounding has changed the exponent.
+ if (dstSigFrac >= (DST_REP_C(1) << dstSigFracBits)) {
+ dstExp += 1;
+ dstSigFrac ^= (DST_REP_C(1) << dstSigFracBits);
+ }
}
}
- // Apply the signbit to the absolute value.
- const dst_rep_t result = absResult | sign >> (srcBits - dstBits);
- return dstFromRep(result);
+ return dstFromRep(construct_dst_rep(dstSign, dstExp, dstSigFrac));
}
diff --git a/compiler-rt/lib/builtins/i386/chkstk.S b/compiler-rt/lib/builtins/i386/chkstk.S
index f0bea2187457..a84bb0ee3007 100644
--- a/compiler-rt/lib/builtins/i386/chkstk.S
+++ b/compiler-rt/lib/builtins/i386/chkstk.S
@@ -4,19 +4,20 @@
#include "../assembly.h"
-// _chkstk routine
+#ifdef __i386__
+
+// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
+// then decrement %esp by %eax. Preserves all registers except %esp and flags.
// This routine is windows specific
// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-#ifdef __i386__
-
.text
.balign 4
-DEFINE_COMPILERRT_FUNCTION(__chkstk_ms)
+DEFINE_COMPILERRT_FUNCTION(_alloca) // _chkstk and _alloca are the same function
+DEFINE_COMPILERRT_FUNCTION(_chkstk)
push %ecx
- push %eax
cmp $0x1000,%eax
- lea 12(%esp),%ecx
+ lea 8(%esp),%ecx // esp before calling this routine -> ecx
jb 1f
2:
sub $0x1000,%ecx
@@ -27,9 +28,14 @@ DEFINE_COMPILERRT_FUNCTION(__chkstk_ms)
1:
sub %eax,%ecx
test %ecx,(%ecx)
- pop %eax
- pop %ecx
+
+ lea 4(%esp),%eax // load pointer to the return address into eax
+ mov %ecx,%esp // install the new top of stack pointer into esp
+ mov -4(%eax),%ecx // restore ecx
+ push (%eax) // push return address onto the stack
+ sub %esp,%eax // restore the original value in eax
ret
-END_COMPILERRT_FUNCTION(__chkstk_ms)
+END_COMPILERRT_FUNCTION(_chkstk)
+END_COMPILERRT_FUNCTION(_alloca)
#endif // __i386__
diff --git a/compiler-rt/lib/builtins/i386/chkstk2.S b/compiler-rt/lib/builtins/i386/chkstk2.S
deleted file mode 100644
index 5d6cbdfa5c99..000000000000
--- a/compiler-rt/lib/builtins/i386/chkstk2.S
+++ /dev/null
@@ -1,41 +0,0 @@
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "../assembly.h"
-
-#ifdef __i386__
-
-// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
-// then decrement %esp by %eax. Preserves all registers except %esp and flags.
-// This routine is windows specific
-// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-
-.text
-.balign 4
-DEFINE_COMPILERRT_FUNCTION(_alloca) // _chkstk and _alloca are the same function
-DEFINE_COMPILERRT_FUNCTION(__chkstk)
- push %ecx
- cmp $0x1000,%eax
- lea 8(%esp),%ecx // esp before calling this routine -> ecx
- jb 1f
-2:
- sub $0x1000,%ecx
- test %ecx,(%ecx)
- sub $0x1000,%eax
- cmp $0x1000,%eax
- ja 2b
-1:
- sub %eax,%ecx
- test %ecx,(%ecx)
-
- lea 4(%esp),%eax // load pointer to the return address into eax
- mov %ecx,%esp // install the new top of stack pointer into esp
- mov -4(%eax),%ecx // restore ecx
- push (%eax) // push return address onto the stack
- sub %esp,%eax // restore the original value in eax
- ret
-END_COMPILERRT_FUNCTION(__chkstk)
-END_COMPILERRT_FUNCTION(_alloca)
-
-#endif // __i386__
diff --git a/compiler-rt/lib/builtins/i386/floatdixf.S b/compiler-rt/lib/builtins/i386/floatdixf.S
index 19dd0835a9c5..486e3b004fa3 100644
--- a/compiler-rt/lib/builtins/i386/floatdixf.S
+++ b/compiler-rt/lib/builtins/i386/floatdixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatdixf(di_int a);
+// xf_float __floatdixf(di_int a);
#ifdef __i386__
diff --git a/compiler-rt/lib/builtins/i386/floatundixf.S b/compiler-rt/lib/builtins/i386/floatundixf.S
index 30b4d9f4b96c..778c3dc0cc76 100644
--- a/compiler-rt/lib/builtins/i386/floatundixf.S
+++ b/compiler-rt/lib/builtins/i386/floatundixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatundixf(du_int a);16
+// xf_float __floatundixf(du_int a);16
#ifdef __i386__
diff --git a/compiler-rt/lib/builtins/int_lib.h b/compiler-rt/lib/builtins/int_lib.h
index fb791ebc42eb..04ea2d910574 100644
--- a/compiler-rt/lib/builtins/int_lib.h
+++ b/compiler-rt/lib/builtins/int_lib.h
@@ -49,7 +49,7 @@
#define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name
#if defined(__ELF__) || defined(__MINGW32__) || defined(__wasm__) || \
- defined(_AIX)
+ defined(_AIX) || defined(__CYGWIN__)
#define COMPILER_RT_ALIAS(name, aliasname) \
COMPILER_RT_ABI __typeof(name) aliasname __attribute__((__alias__(#name)));
#elif defined(__APPLE__)
diff --git a/compiler-rt/lib/builtins/int_math.h b/compiler-rt/lib/builtins/int_math.h
index 48b9580f5961..74d3e311db5e 100644
--- a/compiler-rt/lib/builtins/int_math.h
+++ b/compiler-rt/lib/builtins/int_math.h
@@ -65,6 +65,11 @@
#define crt_copysign(x, y) __builtin_copysign((x), (y))
#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#if __has_builtin(__builtin_copysignf128)
+#define crt_copysignf128(x, y) __builtin_copysignf128((x), (y))
+#elif __has_builtin(__builtin_copysignq) || (defined(__GNUC__) && __GNUC__ >= 7)
+#define crt_copysignf128(x, y) __builtin_copysignq((x), (y))
+#endif
#endif
#if defined(_MSC_VER) && !defined(__clang__)
@@ -75,6 +80,11 @@
#define crt_fabs(x) __builtin_fabs((x))
#define crt_fabsf(x) __builtin_fabsf((x))
#define crt_fabsl(x) __builtin_fabsl((x))
+#if __has_builtin(__builtin_fabsf128)
+#define crt_fabsf128(x) __builtin_fabsf128((x))
+#elif __has_builtin(__builtin_fabsq) || (defined(__GNUC__) && __GNUC__ >= 7)
+#define crt_fabsf128(x) __builtin_fabsq((x))
+#endif
#endif
#if defined(_MSC_VER) && !defined(__clang__)
diff --git a/compiler-rt/lib/builtins/int_to_fp.h b/compiler-rt/lib/builtins/int_to_fp.h
new file mode 100644
index 000000000000..2c1218f1e89c
--- /dev/null
+++ b/compiler-rt/lib/builtins/int_to_fp.h
@@ -0,0 +1,82 @@
+//===-- int_to_fp.h - integer to floating point conversion ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Set source and destination defines in order to use a correctly
+// parameterised floatXiYf implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_TO_FP_H
+#define INT_TO_FP_H
+
+#include "int_lib.h"
+
+#if defined SRC_I64
+typedef int64_t src_t;
+typedef uint64_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __builtin_clzll(x); }
+
+#elif defined SRC_U64
+typedef uint64_t src_t;
+typedef uint64_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __builtin_clzll(x); }
+
+#elif defined SRC_I128
+typedef __int128_t src_t;
+typedef __uint128_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __clzti2(x); }
+
+#elif defined SRC_U128
+typedef __uint128_t src_t;
+typedef __uint128_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __clzti2(x); }
+
+#else
+#error Source should be a handled integer type.
+#endif
+
+#if defined DST_SINGLE
+typedef float dst_t;
+typedef uint32_t dst_rep_t;
+#define DST_REP_C UINT32_C
+
+enum {
+ dstSigBits = 23,
+};
+
+#elif defined DST_DOUBLE
+typedef double dst_t;
+typedef uint64_t dst_rep_t;
+#define DST_REP_C UINT64_C
+
+enum {
+ dstSigBits = 52,
+};
+
+#elif defined DST_QUAD
+typedef tf_float dst_t;
+typedef __uint128_t dst_rep_t;
+#define DST_REP_C (__uint128_t)
+
+enum {
+ dstSigBits = 112,
+};
+
+#else
+#error Destination should be a handled floating point type
+#endif
+
+static __inline dst_t dstFromRep(dst_rep_t x) {
+ const union {
+ dst_t f;
+ dst_rep_t i;
+ } rep = {.i = x};
+ return rep.f;
+}
+
+#endif // INT_TO_FP_H
diff --git a/compiler-rt/lib/builtins/int_to_fp_impl.inc b/compiler-rt/lib/builtins/int_to_fp_impl.inc
new file mode 100644
index 000000000000..51f76fd76d80
--- /dev/null
+++ b/compiler-rt/lib/builtins/int_to_fp_impl.inc
@@ -0,0 +1,72 @@
+//===-- int_to_fp_impl.inc - integer to floating point conversion ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Thsi file implements a generic conversion from an integer type to an
+// IEEE-754 floating point type, allowing a common implementation to be hsared
+// without copy and paste.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_to_fp.h"
+
+static __inline dst_t __floatXiYf__(src_t a) {
+ if (a == 0)
+ return 0.0;
+
+ enum {
+ dstMantDig = dstSigBits + 1,
+ srcBits = sizeof(src_t) * CHAR_BIT,
+ srcIsSigned = ((src_t)-1) < 0,
+ };
+
+ const src_t s = srcIsSigned ? a >> (srcBits - 1) : 0;
+
+ a = (usrc_t)(a ^ s) - s;
+ int sd = srcBits - clzSrcT(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > dstMantDig) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit dstMantDig-1 bits to the right of 1
+ // Q = bit dstMantDig bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ if (sd == dstMantDig + 1) {
+ a <<= 1;
+ } else if (sd == dstMantDig + 2) {
+ // Do nothing.
+ } else {
+ a = ((usrc_t)a >> (sd - (dstMantDig + 2))) |
+ ((a & ((usrc_t)(-1) >> ((srcBits + dstMantDig + 2) - sd))) != 0);
+ }
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to dstMantDig or dstMantDig+1 bits
+ if (a & ((usrc_t)1 << dstMantDig)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to dstMantDig bits
+ } else {
+ a <<= (dstMantDig - sd);
+ // a is now rounded to dstMantDig bits
+ }
+ const int dstBits = sizeof(dst_t) * CHAR_BIT;
+ const dst_rep_t dstSignMask = DST_REP_C(1) << (dstBits - 1);
+ const int dstExpBits = dstBits - dstSigBits - 1;
+ const int dstExpBias = (1 << (dstExpBits - 1)) - 1;
+ const dst_rep_t dstSignificandMask = (DST_REP_C(1) << dstSigBits) - 1;
+ // Combine sign, exponent, and mantissa.
+ const dst_rep_t result = ((dst_rep_t)s & dstSignMask) |
+ ((dst_rep_t)(e + dstExpBias) << dstSigBits) |
+ ((dst_rep_t)(a) & dstSignificandMask);
+ return dstFromRep(result);
+}
diff --git a/compiler-rt/lib/builtins/int_types.h b/compiler-rt/lib/builtins/int_types.h
index e94d3154c6d4..18bf0a7f3bf9 100644
--- a/compiler-rt/lib/builtins/int_types.h
+++ b/compiler-rt/lib/builtins/int_types.h
@@ -165,16 +165,80 @@ typedef struct {
#define HAS_80_BIT_LONG_DOUBLE 0
#endif
-#if CRT_HAS_FLOATING_POINT
+#if HAS_80_BIT_LONG_DOUBLE
+typedef long double xf_float;
typedef union {
uqwords u;
- long double f;
-} long_double_bits;
+ xf_float f;
+} xf_bits;
+#endif
+
+#ifdef __powerpc64__
+// From https://gcc.gnu.org/wiki/Ieee128PowerPC:
+// PowerPC64 uses the following suffixes:
+// IFmode: IBM extended double
+// KFmode: IEEE 128-bit floating point
+// TFmode: Matches the default for long double. With -mabi=ieeelongdouble,
+// it is IEEE 128-bit, with -mabi=ibmlongdouble IBM extended double
+// Since compiler-rt only implements the tf set of libcalls, we use long double
+// for the tf_float typedef.
+typedef long double tf_float;
+#define CRT_LDBL_128BIT
+#define CRT_HAS_F128
+#if __LDBL_MANT_DIG__ == 113 && !defined(__LONG_DOUBLE_IBM128__)
+#define CRT_HAS_IEEE_TF
+#define CRT_LDBL_IEEE_F128
+#endif
+#define TF_C(x) x##L
+#elif __LDBL_MANT_DIG__ == 113
+// Use long double instead of __float128 if it matches the IEEE 128-bit format.
+#define CRT_LDBL_128BIT
+#define CRT_HAS_F128
+#define CRT_HAS_IEEE_TF
+#define CRT_LDBL_IEEE_F128
+typedef long double tf_float;
+#define TF_C(x) x##L
+#elif defined(__FLOAT128__) || defined(__SIZEOF_FLOAT128__)
+#define CRT_HAS___FLOAT128_KEYWORD
+#define CRT_HAS_F128
+// NB: we assume the __float128 type uses IEEE representation.
+#define CRT_HAS_IEEE_TF
+typedef __float128 tf_float;
+#define TF_C(x) x##Q
+#endif
+
+#ifdef CRT_HAS_F128
+typedef union {
+ uqwords u;
+ tf_float f;
+} tf_bits;
+#endif
+// __(u)int128_t is currently needed to compile the *tf builtins as we would
+// otherwise need to manually expand the bit manipulation on two 64-bit value.
+#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128)
+#define CRT_HAS_TF_MODE
+#endif
+
+#if CRT_HAS_FLOATING_POINT
#if __STDC_VERSION__ >= 199901L
typedef float _Complex Fcomplex;
typedef double _Complex Dcomplex;
typedef long double _Complex Lcomplex;
+#if defined(CRT_LDBL_128BIT)
+typedef Lcomplex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#elif defined(CRT_HAS___FLOAT128_KEYWORD)
+#if defined(__clang_major__) && __clang_major__ > 10
+// Clang prior to 11 did not support __float128 _Complex.
+typedef __float128 _Complex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#elif defined(__GNUC__) && __GNUC__ >= 7
+// GCC does not allow __float128 _Complex, but accepts _Float128 _Complex.
+typedef _Float128 _Complex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#endif
+#endif
#define COMPLEX_REAL(x) __real__(x)
#define COMPLEX_IMAGINARY(x) __imag__(x)
@@ -194,5 +258,17 @@ typedef struct {
#define COMPLEX_REAL(x) (x).real
#define COMPLEX_IMAGINARY(x) (x).imaginary
#endif
+
+#ifdef CRT_HAS_NATIVE_COMPLEX_F128
+#define COMPLEXTF_REAL(x) __real__(x)
+#define COMPLEXTF_IMAGINARY(x) __imag__(x)
+#elif defined(CRT_HAS_F128)
+typedef struct {
+ tf_float real, imaginary;
+} Qcomplex;
+#define COMPLEXTF_REAL(x) (x).real
+#define COMPLEXTF_IMAGINARY(x) (x).imaginary
+#endif
+
#endif
#endif // INT_TYPES_H
diff --git a/compiler-rt/lib/builtins/multc3.c b/compiler-rt/lib/builtins/multc3.c
index bb7f6aabfe2c..f20e53ccbf23 100644
--- a/compiler-rt/lib/builtins/multc3.c
+++ b/compiler-rt/lib/builtins/multc3.c
@@ -10,56 +10,61 @@
//
//===----------------------------------------------------------------------===//
+#define QUAD_PRECISION
+#include "fp_lib.h"
#include "int_lib.h"
#include "int_math.h"
+#if defined(CRT_HAS_TF_MODE)
+
// Returns: the product of a + ib and c + id
-COMPILER_RT_ABI long double _Complex __multc3(long double a, long double b,
- long double c, long double d) {
- long double ac = a * c;
- long double bd = b * d;
- long double ad = a * d;
- long double bc = b * c;
- long double _Complex z;
- __real__ z = ac - bd;
- __imag__ z = ad + bc;
- if (crt_isnan(__real__ z) && crt_isnan(__imag__ z)) {
+COMPILER_RT_ABI Qcomplex __multc3(fp_t a, fp_t b, fp_t c, fp_t d) {
+ fp_t ac = a * c;
+ fp_t bd = b * d;
+ fp_t ad = a * d;
+ fp_t bc = b * c;
+ Qcomplex z;
+ COMPLEXTF_REAL(z) = ac - bd;
+ COMPLEXTF_IMAGINARY(z) = ad + bc;
+ if (crt_isnan(COMPLEXTF_REAL(z)) && crt_isnan(COMPLEXTF_IMAGINARY(z))) {
int recalc = 0;
if (crt_isinf(a) || crt_isinf(b)) {
- a = crt_copysignl(crt_isinf(a) ? 1 : 0, a);
- b = crt_copysignl(crt_isinf(b) ? 1 : 0, b);
+ a = crt_copysigntf(crt_isinf(a) ? 1 : 0, a);
+ b = crt_copysigntf(crt_isinf(b) ? 1 : 0, b);
if (crt_isnan(c))
- c = crt_copysignl(0, c);
+ c = crt_copysigntf(0, c);
if (crt_isnan(d))
- d = crt_copysignl(0, d);
+ d = crt_copysigntf(0, d);
recalc = 1;
}
if (crt_isinf(c) || crt_isinf(d)) {
- c = crt_copysignl(crt_isinf(c) ? 1 : 0, c);
- d = crt_copysignl(crt_isinf(d) ? 1 : 0, d);
+ c = crt_copysigntf(crt_isinf(c) ? 1 : 0, c);
+ d = crt_copysigntf(crt_isinf(d) ? 1 : 0, d);
if (crt_isnan(a))
- a = crt_copysignl(0, a);
+ a = crt_copysigntf(0, a);
if (crt_isnan(b))
- b = crt_copysignl(0, b);
+ b = crt_copysigntf(0, b);
recalc = 1;
}
if (!recalc &&
(crt_isinf(ac) || crt_isinf(bd) || crt_isinf(ad) || crt_isinf(bc))) {
if (crt_isnan(a))
- a = crt_copysignl(0, a);
+ a = crt_copysigntf(0, a);
if (crt_isnan(b))
- b = crt_copysignl(0, b);
+ b = crt_copysigntf(0, b);
if (crt_isnan(c))
- c = crt_copysignl(0, c);
+ c = crt_copysigntf(0, c);
if (crt_isnan(d))
- d = crt_copysignl(0, d);
+ d = crt_copysigntf(0, d);
recalc = 1;
}
if (recalc) {
- __real__ z = CRT_INFINITY * (a * c - b * d);
- __imag__ z = CRT_INFINITY * (a * d + b * c);
+ COMPLEXTF_REAL(z) = CRT_INFINITY * (a * c - b * d);
+ COMPLEXTF_IMAGINARY(z) = CRT_INFINITY * (a * d + b * c);
}
}
return z;
}
+
+#endif
diff --git a/compiler-rt/lib/builtins/mulxc3.c b/compiler-rt/lib/builtins/mulxc3.c
index 2f7f14c28453..66b5b58190f7 100644
--- a/compiler-rt/lib/builtins/mulxc3.c
+++ b/compiler-rt/lib/builtins/mulxc3.c
@@ -17,12 +17,12 @@
// Returns: the product of a + ib and c + id
-COMPILER_RT_ABI Lcomplex __mulxc3(long double __a, long double __b,
- long double __c, long double __d) {
- long double __ac = __a * __c;
- long double __bd = __b * __d;
- long double __ad = __a * __d;
- long double __bc = __b * __c;
+COMPILER_RT_ABI Lcomplex __mulxc3(xf_float __a, xf_float __b, xf_float __c,
+ xf_float __d) {
+ xf_float __ac = __a * __c;
+ xf_float __bd = __b * __d;
+ xf_float __ad = __a * __d;
+ xf_float __bc = __b * __c;
Lcomplex z;
COMPLEX_REAL(z) = __ac - __bd;
COMPLEX_IMAGINARY(z) = __ad + __bc;
diff --git a/compiler-rt/lib/builtins/negdi2.c b/compiler-rt/lib/builtins/negdi2.c
index 5a525d4b0e55..714ac8ca66d3 100644
--- a/compiler-rt/lib/builtins/negdi2.c
+++ b/compiler-rt/lib/builtins/negdi2.c
@@ -17,5 +17,5 @@
COMPILER_RT_ABI di_int __negdi2(di_int a) {
// Note: this routine is here for API compatibility; any sane compiler
// should expand it inline.
- return -a;
+ return -(du_int)a;
}
diff --git a/compiler-rt/lib/builtins/negti2.c b/compiler-rt/lib/builtins/negti2.c
index d52ba4e13a46..ab6e09ded819 100644
--- a/compiler-rt/lib/builtins/negti2.c
+++ b/compiler-rt/lib/builtins/negti2.c
@@ -19,7 +19,7 @@
COMPILER_RT_ABI ti_int __negti2(ti_int a) {
// Note: this routine is here for API compatibility; any sane compiler
// should expand it inline.
- return -a;
+ return -(tu_int)a;
}
#endif // CRT_HAS_128BIT
diff --git a/compiler-rt/lib/builtins/negvti2.c b/compiler-rt/lib/builtins/negvti2.c
index 8f92e1046d0c..fc1484015a8b 100644
--- a/compiler-rt/lib/builtins/negvti2.c
+++ b/compiler-rt/lib/builtins/negvti2.c
@@ -19,7 +19,7 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI ti_int __negvti2(ti_int a) {
- const ti_int MIN = (ti_int)1 << ((int)(sizeof(ti_int) * CHAR_BIT) - 1);
+ const ti_int MIN = (tu_int)1 << ((int)(sizeof(ti_int) * CHAR_BIT) - 1);
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/compiler-rt/lib/builtins/powitf2.c b/compiler-rt/lib/builtins/powitf2.c
index 74fe707a4e8c..e02db40767ac 100644
--- a/compiler-rt/lib/builtins/powitf2.c
+++ b/compiler-rt/lib/builtins/powitf2.c
@@ -17,9 +17,9 @@
// Returns: a ^ b
-COMPILER_RT_ABI long double __powitf2(long double a, int b) {
+COMPILER_RT_ABI fp_t __powitf2(fp_t a, int b) {
const int recip = b < 0;
- long double r = 1;
+ fp_t r = 1;
while (1) {
if (b & 1)
r *= a;
diff --git a/compiler-rt/lib/builtins/powixf2.c b/compiler-rt/lib/builtins/powixf2.c
index 3edfe9fd7af5..ab8c694ada2a 100644
--- a/compiler-rt/lib/builtins/powixf2.c
+++ b/compiler-rt/lib/builtins/powixf2.c
@@ -16,9 +16,9 @@
// Returns: a ^ b
-COMPILER_RT_ABI long double __powixf2(long double a, int b) {
+COMPILER_RT_ABI xf_float __powixf2(xf_float a, int b) {
const int recip = b < 0;
- long double r = 1;
+ xf_float r = 1;
while (1) {
if (b & 1)
r *= a;
diff --git a/compiler-rt/lib/builtins/trunctfdf2.c b/compiler-rt/lib/builtins/trunctfdf2.c
index f0d2e4141f3b..a5bdded53751 100644
--- a/compiler-rt/lib/builtins/trunctfdf2.c
+++ b/compiler-rt/lib/builtins/trunctfdf2.c
@@ -14,6 +14,6 @@
#define DST_DOUBLE
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI double __trunctfdf2(long double a) { return __truncXfYf2__(a); }
+COMPILER_RT_ABI dst_t __trunctfdf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/trunctfhf2.c b/compiler-rt/lib/builtins/trunctfhf2.c
index f7776327251c..3f031e0f8445 100644
--- a/compiler-rt/lib/builtins/trunctfhf2.c
+++ b/compiler-rt/lib/builtins/trunctfhf2.c
@@ -15,8 +15,6 @@
#define DST_HALF
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI _Float16 __trunctfhf2(long double a) {
- return __truncXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __trunctfhf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/trunctfsf2.c b/compiler-rt/lib/builtins/trunctfsf2.c
index 242735f738c1..b65b5af2fc00 100644
--- a/compiler-rt/lib/builtins/trunctfsf2.c
+++ b/compiler-rt/lib/builtins/trunctfsf2.c
@@ -14,6 +14,6 @@
#define DST_SINGLE
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI float __trunctfsf2(long double a) { return __truncXfYf2__(a); }
+COMPILER_RT_ABI dst_t __trunctfsf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/compiler-rt/lib/builtins/trunctfxf2.c b/compiler-rt/lib/builtins/trunctfxf2.c
new file mode 100644
index 000000000000..49bd32d42aac
--- /dev/null
+++ b/compiler-rt/lib/builtins/trunctfxf2.c
@@ -0,0 +1,23 @@
+//===-- lib/trunctfsf2.c - long double -> quad conversion ---------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits.
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_TF_MODE) && __LDBL_MANT_DIG__ == 64 && defined(__x86_64__)
+
+#define SRC_QUAD
+#define DST_80
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI xf_float __trunctfxf2(tf_float a) { return __truncXfYf2__(a); }
+
+#endif
diff --git a/compiler-rt/lib/builtins/x86_64/chkstk.S b/compiler-rt/lib/builtins/x86_64/chkstk.S
index ad7953a116ac..494ee261193b 100644
--- a/compiler-rt/lib/builtins/x86_64/chkstk.S
+++ b/compiler-rt/lib/builtins/x86_64/chkstk.S
@@ -18,6 +18,7 @@
.text
.balign 4
DEFINE_COMPILERRT_FUNCTION(___chkstk_ms)
+DEFINE_COMPILERRT_FUNCTION(__chkstk)
push %rcx
push %rax
cmp $0x1000,%rax
@@ -35,6 +36,7 @@ DEFINE_COMPILERRT_FUNCTION(___chkstk_ms)
pop %rax
pop %rcx
ret
+END_COMPILERRT_FUNCTION(__chkstk)
END_COMPILERRT_FUNCTION(___chkstk_ms)
#endif // __x86_64__
diff --git a/compiler-rt/lib/builtins/x86_64/chkstk2.S b/compiler-rt/lib/builtins/x86_64/chkstk2.S
deleted file mode 100644
index 33d10d5b63be..000000000000
--- a/compiler-rt/lib/builtins/x86_64/chkstk2.S
+++ /dev/null
@@ -1,43 +0,0 @@
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "../assembly.h"
-
-#ifdef __x86_64__
-
-// _chkstk (_alloca) routine - probe stack between %rsp and (%rsp-%rax) in 4k increments,
-// then decrement %rsp by %rax. Preserves all registers except %rsp and flags.
-// This routine is windows specific
-// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-
-.text
-.balign 4
-DEFINE_COMPILERRT_FUNCTION(__alloca)
- mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
- // fallthrough
-DEFINE_COMPILERRT_FUNCTION(___chkstk)
- push %rcx
- cmp $0x1000,%rax
- lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
- jb 1f
-2:
- sub $0x1000,%rcx
- test %rcx,(%rcx)
- sub $0x1000,%rax
- cmp $0x1000,%rax
- ja 2b
-1:
- sub %rax,%rcx
- test %rcx,(%rcx)
-
- lea 8(%rsp),%rax // load pointer to the return address into rax
- mov %rcx,%rsp // install the new top of stack pointer into rsp
- mov -8(%rax),%rcx // restore rcx
- push (%rax) // push return address onto the stack
- sub %rsp,%rax // restore the original value in rax
- ret
-END_COMPILERRT_FUNCTION(___chkstk)
-END_COMPILERRT_FUNCTION(__alloca)
-
-#endif // __x86_64__
diff --git a/compiler-rt/lib/builtins/x86_64/floatdixf.c b/compiler-rt/lib/builtins/x86_64/floatdixf.c
index cf8450ce6f42..54636e283a0e 100644
--- a/compiler-rt/lib/builtins/x86_64/floatdixf.c
+++ b/compiler-rt/lib/builtins/x86_64/floatdixf.c
@@ -2,12 +2,12 @@
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-// long double __floatdixf(di_int a);
+// xf_float __floatdixf(di_int a);
#ifdef __x86_64__
#include "../int_lib.h"
-long double __floatdixf(int64_t a) { return (long double)a; }
+xf_float __floatdixf(int64_t a) { return (xf_float)a; }
#endif // __i386__
diff --git a/compiler-rt/lib/builtins/x86_64/floatundixf.S b/compiler-rt/lib/builtins/x86_64/floatundixf.S
index 9e3bcedcb7e4..cf7286f0d6c0 100644
--- a/compiler-rt/lib/builtins/x86_64/floatundixf.S
+++ b/compiler-rt/lib/builtins/x86_64/floatundixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatundixf(du_int a);
+// xf_float __floatundixf(du_int a);
#ifdef __x86_64__
diff --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp
index 22f0b175dd87..ad1c91623514 100644
--- a/compiler-rt/lib/cfi/cfi.cpp
+++ b/compiler-rt/lib/cfi/cfi.cpp
@@ -51,7 +51,11 @@ using namespace __sanitizer;
namespace __cfi {
+#if SANITIZER_LOONGARCH64
+#define kCfiShadowLimitsStorageSize 16384 // 16KiB on loongarch64 per page
+#else
#define kCfiShadowLimitsStorageSize 4096 // 1 page
+#endif
// Lets hope that the data segment is mapped with 4K pages.
// The pointer to the cfi shadow region is stored at the start of this page.
// The rest of the page is unused and re-mapped read-only.
diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp
index a579c9e53444..5e85c8fda3e2 100644
--- a/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/compiler-rt/lib/dfsan/dfsan.cpp
@@ -823,12 +823,12 @@ bool PrintOriginTraceFramesToStr(Origin o, InternalScopedString *out) {
dfsan_origin origin_id = o.raw_id();
o = o.getNextChainedOrigin(&stack);
if (o.isChainedOrigin())
- out->append(
+ out->AppendF(
" %sOrigin value: 0x%x, Taint value was stored to memory at%s\n",
d.Origin(), origin_id, d.Default());
else
- out->append(" %sOrigin value: 0x%x, Taint value was created at%s\n",
- d.Origin(), origin_id, d.Default());
+ out->AppendF(" %sOrigin value: 0x%x, Taint value was created at%s\n",
+ d.Origin(), origin_id, d.Default());
// Includes a trailing newline, so no need to add it again.
stack.PrintTo(out);
@@ -849,9 +849,9 @@ bool PrintOriginTraceToStr(const void *addr, const char *description,
const dfsan_origin origin = *__dfsan::origin_for(addr);
- out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
- d.Origin(), label, addr, description ? description : "",
- d.Default());
+ out->AppendF(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
+ d.Origin(), label, addr, description ? description : "",
+ d.Default());
Origin o = Origin::FromRawId(origin);
return PrintOriginTraceFramesToStr(o, out);
diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp
index f41dd50617fb..38371d353368 100644
--- a/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -2240,8 +2240,14 @@ typedef int dfsan_label_va;
// '%.3f').
struct Formatter {
Formatter(char *str_, const char *fmt_, size_t size_)
- : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_),
- width(-1) {}
+ : str(str_),
+ str_off(0),
+ size(size_),
+ fmt_start(fmt_),
+ fmt_cur(fmt_),
+ width(-1),
+ num_scanned(-1),
+ skip(false) {}
int format() {
char *tmp_fmt = build_format_string();
@@ -2266,12 +2272,50 @@ struct Formatter {
return retval;
}
- char *build_format_string() {
+ int scan() {
+ char *tmp_fmt = build_format_string(true);
+ int read_count = 0;
+ int retval = sscanf(str + str_off, tmp_fmt, &read_count);
+ if (retval > 0) {
+ if (-1 == num_scanned)
+ num_scanned = 0;
+ num_scanned += retval;
+ }
+ free(tmp_fmt);
+ return read_count;
+ }
+
+ template <typename T>
+ int scan(T arg) {
+ char *tmp_fmt = build_format_string(true);
+ int read_count = 0;
+ int retval = sscanf(str + str_off, tmp_fmt, arg, &read_count);
+ if (retval > 0) {
+ if (-1 == num_scanned)
+ num_scanned = 0;
+ num_scanned += retval;
+ }
+ free(tmp_fmt);
+ return read_count;
+ }
+
+ // with_n -> toggles adding %n on/off; off by default
+ char *build_format_string(bool with_n = false) {
size_t fmt_size = fmt_cur - fmt_start + 1;
- char *new_fmt = (char *)malloc(fmt_size + 1);
+ size_t add_size = 0;
+ if (with_n)
+ add_size = 2;
+ char *new_fmt = (char *)malloc(fmt_size + 1 + add_size);
assert(new_fmt);
internal_memcpy(new_fmt, fmt_start, fmt_size);
- new_fmt[fmt_size] = '\0';
+ if (!with_n) {
+ new_fmt[fmt_size] = '\0';
+ } else {
+ new_fmt[fmt_size] = '%';
+ new_fmt[fmt_size + 1] = 'n';
+ new_fmt[fmt_size + 2] = '\0';
+ }
+
return new_fmt;
}
@@ -2303,6 +2347,8 @@ struct Formatter {
const char *fmt_start;
const char *fmt_cur;
int width;
+ int num_scanned;
+ bool skip;
};
// Formats the input and propagates the input labels to the output. The output
@@ -2495,6 +2541,249 @@ static int format_buffer(char *str, size_t size, const char *fmt,
return formatter.str_off;
}
+// This function is an inverse of format_buffer: we take the input buffer,
+// scan it in search for format strings and store the results in the varargs.
+// The labels are propagated from the input buffer to the varargs.
+static int scan_buffer(char *str, size_t size, const char *fmt,
+ dfsan_label *va_labels, dfsan_label *ret_label,
+ dfsan_origin *str_origin, dfsan_origin *ret_origin,
+ va_list ap) {
+ Formatter formatter(str, fmt, size);
+ while (*formatter.fmt_cur) {
+ formatter.fmt_start = formatter.fmt_cur;
+ formatter.width = -1;
+ formatter.skip = false;
+ int read_count = 0;
+ void *dst_ptr = 0;
+ size_t write_size = 0;
+ if (*formatter.fmt_cur != '%') {
+ // Ordinary character. Consume all the characters until a '%' or the end
+ // of the string.
+ for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%';
+ ++formatter.fmt_cur) {
+ }
+ read_count = formatter.scan();
+ dfsan_set_label(0, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ } else {
+ // Conversion directive. Consume all the characters until a conversion
+ // specifier or the end of the string.
+ bool end_fmt = false;
+ for (; *formatter.fmt_cur && !end_fmt;) {
+ switch (*++formatter.fmt_cur) {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ switch (*(formatter.fmt_cur - 1)) {
+ case 'h':
+ // Also covers the 'hh' case (since the size of the arg is still
+ // an int).
+ dst_ptr = va_arg(ap, int *);
+ read_count = formatter.scan((int *)dst_ptr);
+ write_size = sizeof(int);
+ break;
+ case 'l':
+ if (formatter.fmt_cur - formatter.fmt_start >= 2 &&
+ *(formatter.fmt_cur - 2) == 'l') {
+ dst_ptr = va_arg(ap, long long int *);
+ read_count = formatter.scan((long long int *)dst_ptr);
+ write_size = sizeof(long long int);
+ } else {
+ dst_ptr = va_arg(ap, long int *);
+ read_count = formatter.scan((long int *)dst_ptr);
+ write_size = sizeof(long int);
+ }
+ break;
+ case 'q':
+ dst_ptr = va_arg(ap, long long int *);
+ read_count = formatter.scan((long long int *)dst_ptr);
+ write_size = sizeof(long long int);
+ break;
+ case 'j':
+ dst_ptr = va_arg(ap, intmax_t *);
+ read_count = formatter.scan((intmax_t *)dst_ptr);
+ write_size = sizeof(intmax_t);
+ break;
+ case 'z':
+ case 't':
+ dst_ptr = va_arg(ap, size_t *);
+ read_count = formatter.scan((size_t *)dst_ptr);
+ write_size = sizeof(size_t);
+ break;
+ default:
+ dst_ptr = va_arg(ap, int *);
+ read_count = formatter.scan((int *)dst_ptr);
+ write_size = sizeof(int);
+ }
+ // get the label associated with the string at the corresponding
+ // place
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+
+ break;
+
+ case 'a':
+ case 'A':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ if (*(formatter.fmt_cur - 1) == 'L') {
+ dst_ptr = va_arg(ap, long double *);
+ read_count = formatter.scan((long double *)dst_ptr);
+ write_size = sizeof(long double);
+ } else if (*(formatter.fmt_cur - 1) == 'l') {
+ dst_ptr = va_arg(ap, double *);
+ read_count = formatter.scan((double *)dst_ptr);
+ write_size = sizeof(double);
+ } else {
+ dst_ptr = va_arg(ap, float *);
+ read_count = formatter.scan((float *)dst_ptr);
+ write_size = sizeof(float);
+ }
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 'c':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, char *);
+ read_count = formatter.scan((char *)dst_ptr);
+ write_size = sizeof(char);
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 's': {
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, char *);
+ read_count = formatter.scan((char *)dst_ptr);
+ if (1 == read_count) {
+ // special case: we have parsed a single string and we need to
+ // update read_count with the string size
+ read_count = strlen((char *)dst_ptr);
+ }
+ if (str_origin)
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ va_labels++;
+ dfsan_mem_shadow_transfer(dst_ptr, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ }
+ end_fmt = true;
+ break;
+ }
+
+ case 'p':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, void *);
+ read_count =
+ formatter.scan((int *)dst_ptr); // note: changing void* to int*
+ // since we need to call sizeof
+ write_size = sizeof(int);
+
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 'n': {
+ if (!formatter.skip) {
+ int *ptr = va_arg(ap, int *);
+ *ptr = (int)formatter.str_off;
+ *va_labels++ = 0;
+ dfsan_set_label(0, ptr, sizeof(*ptr));
+ if (str_origin != nullptr)
+ *str_origin++ = 0;
+ }
+ end_fmt = true;
+ break;
+ }
+
+ case '%':
+ read_count = formatter.scan();
+ end_fmt = true;
+ break;
+
+ case '*':
+ formatter.skip = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (read_count < 0) {
+ // There was an error.
+ return read_count;
+ }
+
+ formatter.fmt_cur++;
+ formatter.str_off += read_count;
+ }
+
+ (void)va_labels; // Silence unused-but-set-parameter warning
+ *ret_label = 0;
+ if (ret_origin)
+ *ret_origin = 0;
+
+ // Number of items scanned in total.
+ return formatter.num_scanned;
+}
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
@@ -2502,6 +2791,7 @@ int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
dfsan_label *ret_label, ...) {
va_list ap;
va_start(ap, ret_label);
+
int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
nullptr, ap);
va_end(ap);
@@ -2550,6 +2840,58 @@ int __dfso_snprintf(char *str, size_t size, const char *format,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, ...) {
+ va_list ap;
+ va_start(ap, ret_label);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
+ nullptr, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, dfsan_origin str_origin,
+ dfsan_origin format_origin, dfsan_origin *va_origins,
+ dfsan_origin *ret_origin, ...) {
+ va_list ap;
+ va_start(ap, ret_origin);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, &str_origin,
+ ret_origin, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw___isoc99_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, ...) {
+ va_list ap;
+ va_start(ap, ret_label);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
+ nullptr, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso___isoc99_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, dfsan_origin str_origin,
+ dfsan_origin format_origin, dfsan_origin *va_origins,
+ dfsan_origin *ret_origin, ...) {
+ va_list ap;
+ va_start(ap, ret_origin);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, &str_origin,
+ ret_origin, ap);
+ va_end(ap);
+ return ret;
+}
+
static void BeforeFork() {
StackDepotLockAll();
GetChainedOriginDepot()->LockAll();
diff --git a/compiler-rt/lib/dfsan/done_abilist.txt b/compiler-rt/lib/dfsan/done_abilist.txt
index 84d1b5188401..c582584d77e4 100644
--- a/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/compiler-rt/lib/dfsan/done_abilist.txt
@@ -308,6 +308,10 @@ fun:gettimeofday=custom
fun:sprintf=custom
fun:snprintf=custom
+# scanf-like
+fun:sscanf=custom
+fun:__isoc99_sscanf=custom
+
# TODO: custom
fun:asprintf=discard
fun:qsort=discard
diff --git a/compiler-rt/lib/fuzzer/FuzzerCommand.h b/compiler-rt/lib/fuzzer/FuzzerCommand.h
index eb68be9a65b6..718d7e951fb1 100644
--- a/compiler-rt/lib/fuzzer/FuzzerCommand.h
+++ b/compiler-rt/lib/fuzzer/FuzzerCommand.h
@@ -19,6 +19,7 @@
#include <sstream>
#include <string>
#include <vector>
+#include <thread>
namespace fuzzer {
diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h
index 912082be8fba..48b5a2cff02e 100644
--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h
+++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h
@@ -18,6 +18,7 @@
#include "FuzzerSHA1.h"
#include "FuzzerTracePC.h"
#include <algorithm>
+#include <bitset>
#include <chrono>
#include <numeric>
#include <random>
@@ -382,6 +383,7 @@ public:
}
// Remove most abundant rare feature.
+ IsRareFeature[Delete] = false;
RareFeatures[Delete] = RareFeatures.back();
RareFeatures.pop_back();
@@ -397,6 +399,7 @@ public:
// Add rare feature, handle collisions, and update energy.
RareFeatures.push_back(Idx);
+ IsRareFeature[Idx] = true;
GlobalFeatureFreqs[Idx] = 0;
for (auto II : Inputs) {
II->DeleteFeatureFreq(Idx);
@@ -450,9 +453,7 @@ public:
uint16_t Freq = GlobalFeatureFreqs[Idx32]++;
// Skip if abundant.
- if (Freq > FreqOfMostAbundantRareFeature ||
- std::find(RareFeatures.begin(), RareFeatures.end(), Idx32) ==
- RareFeatures.end())
+ if (Freq > FreqOfMostAbundantRareFeature || !IsRareFeature[Idx32])
return;
// Update global frequencies.
@@ -581,6 +582,7 @@ private:
uint16_t FreqOfMostAbundantRareFeature = 0;
uint16_t GlobalFeatureFreqs[kFeatureSetSize] = {};
std::vector<uint32_t> RareFeatures;
+ std::bitset<kFeatureSetSize> IsRareFeature;
std::string OutputCorpus;
};
diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index 8c8c95392c7e..8674d788932f 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -293,9 +293,12 @@ static int RunInMultipleProcesses(const std::vector<std::string> &Args,
std::vector<std::thread> V;
std::thread Pulse(PulseThread);
Pulse.detach();
- for (unsigned i = 0; i < NumWorkers; i++)
- V.push_back(std::thread(WorkerThread, std::ref(Cmd), &Counter, NumJobs,
- &HasErrors));
+ V.resize(NumWorkers);
+ for (unsigned i = 0; i < NumWorkers; i++) {
+ V[i] = std::thread(WorkerThread, std::ref(Cmd), &Counter, NumJobs,
+ &HasErrors);
+ SetThreadName(V[i], "FuzzerWorker");
+ }
for (auto &T : V)
T.join();
return HasErrors ? 1 : 0;
diff --git a/compiler-rt/lib/fuzzer/FuzzerFlags.def b/compiler-rt/lib/fuzzer/FuzzerFlags.def
index 11815349b014..fc3b3aa8c98a 100644
--- a/compiler-rt/lib/fuzzer/FuzzerFlags.def
+++ b/compiler-rt/lib/fuzzer/FuzzerFlags.def
@@ -167,7 +167,7 @@ FUZZER_FLAG_INT(purge_allocator_interval, 1, "Purge allocator caches and "
"purge_allocator_interval=-1 to disable this functionality.")
FUZZER_FLAG_INT(trace_malloc, 0, "If >= 1 will print all mallocs/frees. "
"If >= 2 will also print stack traces.")
-FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon"
+FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon "
"reaching this limit of RSS memory usage.")
FUZZER_FLAG_INT(malloc_limit_mb, 0, "If non-zero, the fuzzer will exit "
"if the target tries to allocate this number of Mb with one malloc call. "
diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
index 8b430c5428d8..935dd2342e18 100644
--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
@@ -799,7 +799,7 @@ void Fuzzer::ReadAndExecuteSeedCorpora(std::vector<SizedFile> &CorporaFiles) {
TotalSize += File.Size;
}
if (Options.MaxLen == 0)
- SetMaxInputLen(std::min(std::max(kMinDefaultLen, MaxSize), kMaxSaneLen));
+ SetMaxInputLen(std::clamp(MaxSize, kMinDefaultLen, kMaxSaneLen));
assert(MaxInputLen > 0);
// Test the callback with empty input and never try it again.
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtil.h b/compiler-rt/lib/fuzzer/FuzzerUtil.h
index 5296e7784b3f..554567e1b8fc 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -59,6 +59,8 @@ size_t GetPeakRSSMb();
int ExecuteCommand(const Command &Cmd);
bool ExecuteCommand(const Command &Cmd, std::string *CmdOutput);
+void SetThreadName(std::thread &thread, const std::string &name);
+
// Fuchsia does not have popen/pclose.
FILE *OpenProcessPipe(const char *Command, const char *Mode);
int CloseProcessPipe(FILE *F);
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
index a5bed658a446..6c3ece30f67b 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
@@ -165,6 +165,11 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+ // Darwin allows to set the name only on the current thread it seems
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_APPLE
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index 6a56505fbf1a..cfb81cd3f780 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -605,6 +605,10 @@ size_t PageSize() {
return PageSizeCached;
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_FUCHSIA
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
index 717af11bc79f..5729448b0beb 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
@@ -40,6 +40,14 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+#if LIBFUZZER_LINUX || LIBFUZZER_FREEBSD
+ (void)pthread_setname_np(thread.native_handle(), name.c_str());
+#elif LIBFUZZER_NETBSD
+ (void)pthread_set_name_np(thread.native_handle(), "%s", name.c_str());
+#endif
+}
+
} // namespace fuzzer
#endif
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index 6d9bc766c695..71770166805f 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -233,6 +233,11 @@ size_t PageSize() {
return PageSizeCached;
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+ // to UTF-8 then SetThreadDescription ?
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_WINDOWS
diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index de07b6798c19..a02a35321c2b 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -20,6 +20,15 @@
#include <stddef.h>
#include <stdint.h>
// IWYU pragma: no_include <__stddef_max_align_t.h>
+// IWYU pragma: no_include <__stddef_null.h>
+// IWYU pragma: no_include <__stddef_nullptr_t.h>
+// IWYU pragma: no_include <__stddef_offsetof.h>
+// IWYU pragma: no_include <__stddef_ptrdiff_t.h>
+// IWYU pragma: no_include <__stddef_rsize_t.h>
+// IWYU pragma: no_include <__stddef_size_t.h>
+// IWYU pragma: no_include <__stddef_unreachable.h>
+// IWYU pragma: no_include <__stddef_wchar_t.h>
+// IWYU pragma: no_include <__stddef_wint_t.h>
namespace gwp_asan {
// This class is the primary implementation of the allocator portion of GWP-
diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp
index 000c0f76c1da..2f6cb10caf1b 100644
--- a/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/compiler-rt/lib/hwasan/hwasan.cpp
@@ -86,9 +86,11 @@ static void InitializeFlags() {
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
- // For now only tested on Linux. Other plantforms can be turned on as they
- // become ready.
- cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
+ // For now only tested on Linux and Fuchsia. Other plantforms can be turned
+ // on as they become ready.
+ constexpr bool can_detect_leaks =
+ (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA;
+ cf.detect_leaks = cf.detect_leaks && can_detect_leaks;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
@@ -170,7 +172,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
auto sds = StackDepotGetStats();
AllocatorStatCounters asc;
GetAllocatorStats(asc);
- s.append(
+ s.AppendF(
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
" heap: %zd",
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 48f1873ed682..d21ba024a20e 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -234,28 +234,23 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
void *user_ptr = allocated;
- // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
- // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
- // retag to 0.
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
- (flags()->tag_in_malloc || flags()->tag_in_free) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
- if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
- tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
- uptr tag_size = orig_size ? orig_size : 1;
- uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
- user_ptr =
- (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
- if (full_granule_size != tag_size) {
- u8 *short_granule =
- reinterpret_cast<u8 *>(allocated) + full_granule_size;
- TagMemoryAligned((uptr)short_granule, kShadowAlignment,
- tag_size % kShadowAlignment);
- short_granule[kShadowAlignment - 1] = tag;
- }
- } else {
- user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
+ tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ uptr tag_size = orig_size ? orig_size : 1;
+ uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
+ if (full_granule_size != tag_size) {
+ u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
+ TagMemoryAligned((uptr)short_granule, kShadowAlignment,
+ tag_size % kShadowAlignment);
+ short_granule[kShadowAlignment - 1] = tag;
}
+ } else {
+ // Tagging can not be completely skipped. If it's disabled, we need to tag
+ // with zeros.
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
}
Metadata *meta =
@@ -345,7 +340,8 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
// Always store full 8-bit tags on free to maximize UAF detection.
tag_t tag;
if (t) {
diff --git a/compiler-rt/lib/hwasan/hwasan_flags.inc b/compiler-rt/lib/hwasan/hwasan_flags.inc
index 978fa46b705c..058a0457b9e7 100644
--- a/compiler-rt/lib/hwasan/hwasan_flags.inc
+++ b/compiler-rt/lib/hwasan/hwasan_flags.inc
@@ -84,3 +84,10 @@ HWASAN_FLAG(bool, malloc_bisect_dump, false,
// are untagged before the call.
HWASAN_FLAG(bool, fail_without_syscall_abi, true,
"Exit if fail to request relaxed syscall ABI.")
+
+HWASAN_FLAG(
+ uptr, fixed_shadow_base, -1,
+ "If not -1, HWASan will attempt to allocate the shadow at this address, "
+ "instead of choosing one dynamically."
+ "Tip: this can be combined with the compiler option, "
+ "-hwasan-mapping-offset, to optimize the instrumentation.")
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index 1a49320b0719..96df4dd0c24d 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -19,6 +19,7 @@
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
#include "hwasan_platform_interceptors.h"
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
@@ -31,6 +32,21 @@
using namespace __hwasan;
+struct HWAsanInterceptorContext {
+ const char *interceptor_name;
+};
+
+# define ACCESS_MEMORY_RANGE(offset, size, access) \
+ do { \
+ __hwasan::CheckAddressSized<ErrorAction::Recover, access>((uptr)offset, \
+ size); \
+ } while (0)
+
+# define HWASAN_READ_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Load)
+# define HWASAN_WRITE_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Store)
+
# if !SANITIZER_APPLE
# define HWASAN_INTERCEPT_FUNC(name) \
do { \
@@ -58,9 +74,8 @@ using namespace __hwasan;
# if HWASAN_WITH_INTERCEPTORS
-# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) __hwasan_loadN((uptr)p, (uptr)s)
-# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
- __hwasan_storeN((uptr)p, (uptr)s)
+# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) HWASAN_READ_RANGE(p, s)
+# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) HWASAN_WRITE_RANGE(p, s)
# define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
(void)(p); \
@@ -75,17 +90,14 @@ using namespace __hwasan;
# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- do { \
- } while (false)
+ HWASAN_WRITE_RANGE(ptr, size)
# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
- do { \
- (void)(ctx); \
- (void)(ptr); \
- (void)(size); \
- } while (false)
+ HWASAN_READ_RANGE(ptr, size)
# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ HWAsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
do { \
(void)(ctx); \
(void)(func); \
@@ -134,29 +146,16 @@ using namespace __hwasan;
(void)(name); \
} while (false)
-# define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- (void)(ctx); \
- (void)(to); \
- (void)(from); \
- (void)(size); \
- } while (false)
-
-# define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- (void)(ctx); \
- (void)(to); \
- (void)(from); \
- (void)(size); \
- } while (false)
-
-# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- (void)(ctx); \
- (void)(block); \
- (void)(c); \
- (void)(size); \
- } while (false)
+# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
+ common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
# define COMMON_INTERCEPTOR_STRERROR() \
do { \
@@ -309,9 +308,9 @@ INTERCEPTOR(int, pthread_detach, void *thread) {
return result;
}
-INTERCEPTOR(int, pthread_exit, void *retval) {
+INTERCEPTOR(void, pthread_exit, void *retval) {
hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
- return REAL(pthread_exit)(retval);
+ REAL(pthread_exit)(retval);
}
# if SANITIZER_GLIBC
@@ -520,12 +519,12 @@ void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
+# if HWASAN_WITH_INTERCEPTORS
InitializeCommonInterceptors();
(void)(read_iovec);
(void)(write_iovec);
-# if HWASAN_WITH_INTERCEPTORS
# if defined(__linux__)
INTERCEPT_FUNCTION(__libc_longjmp);
INTERCEPT_FUNCTION(longjmp);
diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp
index 6f5e9432974e..f01fa4276413 100644
--- a/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -106,8 +106,12 @@ static uptr GetHighMemEnd() {
}
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
- __hwasan_shadow_memory_dynamic_address =
- FindDynamicShadowStart(shadow_size_bytes);
+ if (flags()->fixed_shadow_base != (uptr)-1) {
+ __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
+ } else {
+ __hwasan_shadow_memory_dynamic_address =
+ FindDynamicShadowStart(shadow_size_bytes);
+ }
}
static void MaybeDieIfNoTaggingAbi(const char *message) {
@@ -294,25 +298,6 @@ void InstallAtExitHandler() { atexit(HwasanAtExit); }
// ---------------------- TSD ---------------- {{{1
-extern "C" void __hwasan_thread_enter() {
- hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
-}
-
-extern "C" void __hwasan_thread_exit() {
- Thread *t = GetCurrentThread();
- // Make sure that signal handler can not see a stale current thread pointer.
- atomic_signal_fence(memory_order_seq_cst);
- if (t) {
- // Block async signals on the thread as the handler can be instrumented.
- // After this point instrumented code can't access essential data from TLS
- // and will crash.
- // Bionic already calls __hwasan_thread_exit with blocked signals.
- if (SANITIZER_GLIBC)
- BlockSignals();
- hwasanThreadList().ReleaseThread(t);
- }
-}
-
# if HWASAN_WITH_INTERCEPTORS
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
@@ -561,4 +546,25 @@ void InstallAtExitCheckLeaks() {
} // namespace __hwasan
+using namespace __hwasan;
+
+extern "C" void __hwasan_thread_enter() {
+ hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
+}
+
+extern "C" void __hwasan_thread_exit() {
+ Thread *t = GetCurrentThread();
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ if (t) {
+ // Block async signals on the thread as the handler can be instrumented.
+ // After this point instrumented code can't access essential data from TLS
+ // and will crash.
+ // Bionic already calls __hwasan_thread_exit with blocked signals.
+ if (SANITIZER_GLIBC)
+ BlockSignals();
+ hwasanThreadList().ReleaseThread(t);
+ }
+}
+
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h b/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
index 33ae70a4ded9..d92b51052194 100644
--- a/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
+++ b/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
@@ -56,20 +56,20 @@
#undef SANITIZER_INTERCEPT_STRCASECMP
#define SANITIZER_INTERCEPT_STRCASECMP 0
-#undef SANITIZER_INTERCEPT_MEMSET
-#define SANITIZER_INTERCEPT_MEMSET 0
+// #undef SANITIZER_INTERCEPT_MEMSET
+// #define SANITIZER_INTERCEPT_MEMSET 0
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#define SANITIZER_INTERCEPT_MEMMOVE 0
+// #undef SANITIZER_INTERCEPT_MEMMOVE
+// #define SANITIZER_INTERCEPT_MEMMOVE 0
-#undef SANITIZER_INTERCEPT_MEMCPY
-#define SANITIZER_INTERCEPT_MEMCPY 0
+// #undef SANITIZER_INTERCEPT_MEMCPY
+// #define SANITIZER_INTERCEPT_MEMCPY 0
-#undef SANITIZER_INTERCEPT_MEMCMP
-#define SANITIZER_INTERCEPT_MEMCMP 0
+// #undef SANITIZER_INTERCEPT_MEMCMP
+// #define SANITIZER_INTERCEPT_MEMCMP 0
-#undef SANITIZER_INTERCEPT_BCMP
-#define SANITIZER_INTERCEPT_BCMP 0
+// #undef SANITIZER_INTERCEPT_BCMP
+// #define SANITIZER_INTERCEPT_BCMP 0
#undef SANITIZER_INTERCEPT_STRNDUP
#define SANITIZER_INTERCEPT_STRNDUP 0
diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp
index efe6f5770491..5e8aa315801b 100644
--- a/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -37,7 +38,7 @@ namespace __hwasan {
class ScopedReport {
public:
- ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
+ explicit ScopedReport(bool fatal) : fatal(fatal) {
Lock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
++hwasan_report_count;
@@ -65,11 +66,7 @@ class ScopedReport {
Lock lock(&error_message_lock_);
if (!error_message_ptr_)
return;
- uptr len = internal_strlen(msg);
- uptr old_size = error_message_ptr_->size();
- error_message_ptr_->resize(old_size + len);
- // overwrite old trailing '\0', keep new trailing '\0' untouched.
- internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
+ error_message_ptr_->Append(msg);
}
static void SetErrorReportCallback(void (*callback)(const char *)) {
@@ -78,17 +75,17 @@ class ScopedReport {
}
private:
- ScopedErrorReportLock error_report_lock_;
- InternalMmapVector<char> error_message_;
+ InternalScopedString error_message_;
bool fatal;
- static InternalMmapVector<char> *error_message_ptr_;
static Mutex error_message_lock_;
+ static InternalScopedString *error_message_ptr_
+ SANITIZER_GUARDED_BY(error_message_lock_);
static void (*error_report_callback_)(const char *);
};
-InternalMmapVector<char> *ScopedReport::error_message_ptr_;
Mutex ScopedReport::error_message_lock_;
+InternalScopedString *ScopedReport::error_message_ptr_;
void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
@@ -112,29 +109,45 @@ static void MaybePrintAndroidHelpUrl() {
#endif
}
+namespace {
// A RAII object that holds a copy of the current thread stack ring buffer.
// The actual stack buffer may change while we are iterating over it (for
// example, Printf may call syslog() which can itself be built with hwasan).
class SavedStackAllocations {
public:
- SavedStackAllocations(StackAllocationsRingBuffer *rb) {
+ SavedStackAllocations() = default;
+
+ explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
+
+ void CopyFrom(Thread *t) {
+ StackAllocationsRingBuffer *rb = t->stack_allocations();
uptr size = rb->size() * sizeof(uptr);
void *storage =
MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ thread_id_ = t->unique_id();
}
~SavedStackAllocations() {
- StackAllocationsRingBuffer *rb = get();
- UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ if (rb_) {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+ }
+
+ const StackAllocationsRingBuffer *get() const {
+ return (const StackAllocationsRingBuffer *)&rb_;
}
StackAllocationsRingBuffer *get() {
return (StackAllocationsRingBuffer *)&rb_;
}
+ u32 thread_id() const { return thread_id_; }
+
private:
- uptr rb_;
+ uptr rb_ = 0;
+ u32 thread_id_;
};
class Decorator: public __sanitizer::SanitizerCommonDecorator {
@@ -147,6 +160,7 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
const char *Location() { return Green(); }
const char *Thread() { return Green(); }
};
+} // namespace
static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
HeapAllocationRecord *har, uptr *ring_index,
@@ -187,7 +201,7 @@ static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
return false;
}
-static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
+static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
tag_t addr_tag, uptr untagged_addr) {
uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
bool found_local = false;
@@ -243,12 +257,13 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
break;
uptr pc_mask = (1ULL << 48) - 1;
uptr pc = record & pc_mask;
- frame_desc.append(" record_addr:0x%zx record:0x%zx",
- reinterpret_cast<uptr>(record_addr), record);
+ frame_desc.AppendF(" record_addr:0x%zx record:0x%zx",
+ reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
- RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
frame->ClearAll();
}
Printf("%s\n", frame_desc.data());
@@ -306,22 +321,342 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
return 0;
}
-static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
- tag_t *left, tag_t *right) {
- Decorator d;
- uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
- HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+void ReportStats() {}
+
+constexpr uptr kDumpWidth = 16;
+constexpr uptr kShadowLines = 17;
+constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
+
+constexpr uptr kShortLines = 3;
+constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
+constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
+
+static uptr GetPrintTagStart(uptr addr) {
+ addr = MemToShadow(addr);
+ addr = RoundDownTo(addr, kDumpWidth);
+ addr -= kDumpWidth * (kShadowLines / 2);
+ return addr;
+}
+
+template <typename PrintTag>
+static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
+ InternalScopedString &s,
+ PrintTag print_tag) {
+ uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
+ uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
+ uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
+ for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
+ s.Append(row == center_row_beg ? "=>" : " ");
+ s.AppendF("%p:", (void *)ShadowToMem(row));
+ for (uptr i = 0; i < kDumpWidth; i++) {
+ s.Append(row + i == addr ? "[" : " ");
+ print_tag(s, row + i);
+ s.Append(row + i == addr ? "]" : " ");
+ }
+ s.AppendF("\n");
+ }
+}
+
+template <typename GetTag, typename GetShortTag>
+static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
+ GetShortTag get_short_tag) {
+ InternalScopedString s;
+ addr = MemToShadow(addr);
+ s.AppendF(
+ "Memory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShadowLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ s.AppendF("%02x", tag);
+ });
+
+ s.AppendF(
+ "Tags for short granules around the buggy address (one tag corresponds "
+ "to %zd bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShortLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ if (tag >= 1 && tag <= kShadowAlignment) {
+ tag_t short_tag = get_short_tag(tag_addr);
+ s.AppendF("%02x", short_tag);
+ } else {
+ s.AppendF("..");
+ }
+ });
+ s.AppendF(
+ "See "
+ "https://clang.llvm.org/docs/"
+ "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
+ "description of short granule tags\n");
+ Printf("%s", s.data());
+}
+
+static uptr GetTopPc(const StackTrace *stack) {
+ return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
+ : 0;
+}
+
+namespace {
+class BaseReport {
+ public:
+ BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
+ : scoped_report(fatal),
+ stack(stack),
+ tagged_addr(tagged_addr),
+ access_size(access_size),
+ untagged_addr(UntagAddr(tagged_addr)),
+ ptr_tag(GetTagFromPointer(tagged_addr)),
+ mismatch_offset(FindMismatchOffset()),
+ heap(CopyHeapChunk()),
+ allocations(CopyAllocations()),
+ candidate(FindBufferOverflowCandidate()),
+ shadow(CopyShadow()) {}
+
+ protected:
+ struct OverflowCandidate {
+ uptr untagged_addr = 0;
+ bool after = false;
+ bool is_close = false;
+
+ struct {
+ uptr begin = 0;
+ uptr end = 0;
+ u32 thread_id = 0;
+ u32 stack_id = 0;
+ bool is_allocated = false;
+ } heap;
+ };
+
+ struct HeapAllocation {
+ HeapAllocationRecord har = {};
+ uptr ring_index = 0;
+ uptr num_matching_addrs = 0;
+ uptr num_matching_addrs_4b = 0;
+ u32 free_thread_id = 0;
+ };
+
+ struct Allocations {
+ ArrayRef<SavedStackAllocations> stack;
+ ArrayRef<HeapAllocation> heap;
+ };
+
+ struct HeapChunk {
+ uptr begin = 0;
+ uptr size = 0;
+ u32 stack_id = 0;
+ bool from_small_heap = false;
+ bool is_allocated = false;
+ };
+
+ struct Shadow {
+ uptr addr = 0;
+ tag_t tags[kShadowDumpSize] = {};
+ tag_t short_tags[kShortDumpSize] = {};
+ };
+
+ sptr FindMismatchOffset() const;
+ Shadow CopyShadow() const;
+ tag_t GetTagCopy(uptr addr) const;
+ tag_t GetShortTagCopy(uptr addr) const;
+ HeapChunk CopyHeapChunk() const;
+ Allocations CopyAllocations();
+ OverflowCandidate FindBufferOverflowCandidate() const;
+ void PrintAddressDescription() const;
+ void PrintHeapOrGlobalCandidate() const;
+ void PrintTags(uptr addr) const;
+
+ SavedStackAllocations stack_allocations_storage[16];
+ HeapAllocation heap_allocations_storage[256];
+
+ const ScopedReport scoped_report;
+ const StackTrace *stack = nullptr;
+ const uptr tagged_addr = 0;
+ const uptr access_size = 0;
+ const uptr untagged_addr = 0;
+ const tag_t ptr_tag = 0;
+ const sptr mismatch_offset = 0;
+
+ const HeapChunk heap;
+ const Allocations allocations;
+ const OverflowCandidate candidate;
+
+ const Shadow shadow;
+};
+
+sptr BaseReport::FindMismatchOffset() const {
+ if (!access_size)
+ return 0;
+ sptr offset =
+ __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
+ CHECK_GE(offset, 0);
+ CHECK_LT(offset, static_cast<sptr>(access_size));
+ tag_t *tag_ptr =
+ reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
+ tag_t mem_tag = *tag_ptr;
+
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
+ ~(kShadowAlignment - 1));
+ // If offset is 0, (untagged_addr + offset) is not aligned to granules.
+ // This is the offset of the leftmost accessed byte within the bad granule.
+ u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
+ tag_t short_tag = granule_ptr[kShadowAlignment - 1];
+ // The first mismatch was a short granule that matched the ptr_tag.
+ if (short_tag == ptr_tag) {
+ // If the access starts after the end of the short granule, then the first
+ // bad byte is the first byte of the access; otherwise it is the first
+ // byte past the end of the short granule
+ if (mem_tag > in_granule_offset) {
+ offset += mem_tag - in_granule_offset;
+ }
+ }
+ }
+ return offset;
+}
+
+BaseReport::Shadow BaseReport::CopyShadow() const {
+ Shadow result;
+ if (!MemIsApp(untagged_addr))
+ return result;
+
+ result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
+ uptr tag_addr = result.addr;
+ uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
+ for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
+ if (!MemIsShadow(tag_addr))
+ continue;
+ result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
+ if (i < kShortDumpOffset || i >= short_end)
+ continue;
+ uptr granule_addr = ShadowToMem(tag_addr);
+ if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
+ IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
+ result.short_tags[i - kShortDumpOffset] =
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
+ }
+ }
+ return result;
+}
+
+tag_t BaseReport::GetTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr);
+ uptr idx = addr - shadow.addr;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
+ return shadow.tags[idx];
+}
+
+tag_t BaseReport::GetShortTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr + kShortDumpOffset);
+ uptr idx = addr - shadow.addr - kShortDumpOffset;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
+ return shadow.short_tags[idx];
+}
+
+BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
+ HeapChunk result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ result.begin = chunk.Beg();
+ if (result.begin) {
+ result.size = chunk.ActualSize();
+ result.from_small_heap = chunk.FromSmallHeap();
+ result.is_allocated = chunk.IsAllocated();
+ result.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+BaseReport::Allocations BaseReport::CopyAllocations() {
+ if (MemIsShadow(untagged_addr))
+ return {};
+ uptr stack_allocations_count = 0;
+ uptr heap_allocations_count = 0;
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
+ t->AddrIsInStack(untagged_addr)) {
+ stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
+ }
+
+ if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
+ if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
+ &ring_index, &num_matching_addrs,
+ &num_matching_addrs_4b)) {
+ auto &ha = heap_allocations_storage[heap_allocations_count++];
+ ha.har = har;
+ ha.ring_index = ring_index;
+ ha.num_matching_addrs = num_matching_addrs;
+ ha.num_matching_addrs_4b = num_matching_addrs_4b;
+ ha.free_thread_id = t->unique_id();
+ }
+ }
+ });
+
+ return {{stack_allocations_storage, stack_allocations_count},
+ {heap_allocations_storage, heap_allocations_count}};
+}
+
+BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
+ OverflowCandidate result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches ptr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
+ tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
+ uptr candidate_distance = 0;
+ for (; candidate_distance < 1000; candidate_distance++) {
+ if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
+ candidate_tag_ptr = left;
+ break;
+ }
+ --left;
+ if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
+ TagsEqual(ptr_tag, right)) {
+ candidate_tag_ptr = right;
+ break;
+ }
+ ++right;
+ }
+
+ constexpr auto kCloseCandidateDistance = 1;
+ result.is_close = candidate_distance <= kCloseCandidateDistance;
+
+ result.after = candidate_tag_ptr == left;
+ result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
+ HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
if (chunk.IsAllocated()) {
+ result.heap.is_allocated = true;
+ result.heap.begin = chunk.Beg();
+ result.heap.end = chunk.End();
+ result.heap.thread_id = chunk.GetAllocThreadId();
+ result.heap.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+void BaseReport::PrintHeapOrGlobalCandidate() const {
+ Decorator d;
+ if (candidate.heap.is_allocated) {
uptr offset;
const char *whence;
- if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
- offset = untagged_addr - chunk.Beg();
+ if (candidate.heap.begin <= untagged_addr &&
+ untagged_addr < candidate.heap.end) {
+ offset = untagged_addr - candidate.heap.begin;
whence = "inside";
- } else if (candidate == left) {
- offset = untagged_addr - chunk.End();
+ } else if (candidate.after) {
+ offset = untagged_addr - candidate.heap.end;
whence = "after";
} else {
- offset = chunk.Beg() - untagged_addr;
+ offset = candidate.heap.begin - untagged_addr;
whence = "before";
}
Printf("%s", d.Error());
@@ -329,12 +664,13 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
Printf("%s", d.Default());
Printf("%s", d.Location());
Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
- untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
- chunk.End());
+ untagged_addr, offset, whence,
+ candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
+ candidate.heap.end);
Printf("%s", d.Allocation());
- Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId());
+ Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(candidate.heap.stack_id).Print();
return;
}
// Check whether the address points into a loaded library. If so, this is
@@ -342,47 +678,45 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
const char *module_name;
uptr module_address;
Symbolizer *sym = Symbolizer::GetOrInit();
- if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
+ if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
+ &module_address)) {
Printf("%s", d.Error());
Printf("\nCause: global-overflow\n");
Printf("%s", d.Default());
DataInfo info;
Printf("%s", d.Location());
- if (sym->SymbolizeData(mem, &info) && info.start) {
+ if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
Printf(
"%p is located %zd bytes %s a %zd-byte global variable "
"%s [%p,%p) in %s\n",
untagged_addr,
- candidate == left ? untagged_addr - (info.start + info.size)
- : info.start - untagged_addr,
- candidate == left ? "after" : "before", info.size, info.name,
+ candidate.after ? untagged_addr - (info.start + info.size)
+ : info.start - untagged_addr,
+ candidate.after ? "after" : "before", info.size, info.name,
info.start, info.start + info.size, module_name);
} else {
- uptr size = GetGlobalSizeFromDescriptor(mem);
+ uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
if (size == 0)
// We couldn't find the size of the global from the descriptors.
Printf(
"%p is located %s a global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate == left ? "after" : "before", mem,
- module_name, module_address);
+ untagged_addr, candidate.after ? "after" : "before",
+ candidate.untagged_addr, module_name, module_address);
else
Printf(
"%p is located %s a %zd-byte global variable in "
"\n #0 0x%x (%s+0x%x)\n",
- untagged_addr, candidate == left ? "after" : "before", size, mem,
- module_name, module_address);
+ untagged_addr, candidate.after ? "after" : "before", size,
+ candidate.untagged_addr, module_name, module_address);
}
Printf("%s", d.Default());
}
}
-void PrintAddressDescription(
- uptr tagged_addr, uptr access_size,
- StackAllocationsRingBuffer *current_stack_allocations) {
+void BaseReport::PrintAddressDescription() const {
Decorator d;
int num_descriptions_printed = 0;
- uptr untagged_addr = UntagAddr(tagged_addr);
if (MemIsShadow(untagged_addr)) {
Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
@@ -391,113 +725,80 @@ void PrintAddressDescription(
}
// Print some very basic information about the address, if it's a heap.
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (uptr beg = chunk.Beg()) {
- uptr size = chunk.ActualSize();
- Printf("%s[%p,%p) is a %s %s heap chunk; "
- "size: %zd offset: %zd\n%s",
- d.Location(),
- beg, beg + size,
- chunk.FromSmallHeap() ? "small" : "large",
- chunk.IsAllocated() ? "allocated" : "unallocated",
- size, untagged_addr - beg,
- d.Default());
+ if (heap.begin) {
+ Printf(
+ "%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(), heap.begin, heap.begin + heap.size,
+ heap.from_small_heap ? "small" : "large",
+ heap.is_allocated ? "allocated" : "unallocated", heap.size,
+ untagged_addr - heap.begin, d.Default());
}
- tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ auto announce_by_id = [](u32 thread_id) {
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (thread_id == t->unique_id())
+ t->Announce();
+ });
+ };
- bool on_stack = false;
// Check stack first. If the address is on the stack of a live thread, we
// know it cannot be a heap / global overflow.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- if (t->AddrIsInStack(untagged_addr)) {
- on_stack = true;
- // TODO(fmayer): figure out how to distinguish use-after-return and
- // stack-buffer-overflow.
- Printf("%s", d.Error());
- Printf("\nCause: stack tag-mismatch\n");
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
- t->unique_id());
- Printf("%s", d.Default());
- t->Announce();
-
- auto *sa = (t == GetCurrentThread() && current_stack_allocations)
- ? current_stack_allocations
- : t->stack_allocations();
- PrintStackAllocations(sa, addr_tag, untagged_addr);
- num_descriptions_printed++;
- }
- });
+ for (const auto &sa : allocations.stack) {
+ // TODO(fmayer): figure out how to distinguish use-after-return and
+ // stack-buffer-overflow.
+ Printf("%s", d.Error());
+ Printf("\nCause: stack tag-mismatch\n");
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ sa.thread_id());
+ Printf("%s", d.Default());
+ announce_by_id(sa.thread_id());
+ PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
- // Check if this looks like a heap buffer overflow by scanning
- // the shadow left and right and looking for the first adjacent
- // object with a different memory tag. If that tag matches addr_tag,
- // check the allocator if it has a live chunk there.
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
- uptr candidate_distance = 0;
- for (; candidate_distance < 1000; candidate_distance++) {
- if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
- TagsEqual(addr_tag, left)) {
- candidate = left;
- break;
- }
- --left;
- if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
- TagsEqual(addr_tag, right)) {
- candidate = right;
- break;
- }
- ++right;
+ if (allocations.stack.empty() && candidate.untagged_addr &&
+ candidate.is_close) {
+ PrintHeapOrGlobalCandidate();
+ num_descriptions_printed++;
}
- constexpr auto kCloseCandidateDistance = 1;
+ for (const auto &ha : allocations.heap) {
+ const HeapAllocationRecord har = ha.har;
+
+ Printf("%s", d.Error());
+ Printf("\nCause: use-after-free\n");
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%u here:\n", ha.free_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
- if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ Printf("%s", d.Allocation());
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
+ flags()->heap_history_size);
+ Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
+ Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
+ ha.num_matching_addrs_4b);
+
+ announce_by_id(ha.free_thread_id);
+ // TODO: announce_by_id(har.alloc_thread_id);
num_descriptions_printed++;
}
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- // Scan all threads' ring buffers to find if it's a heap-use-after-free.
- HeapAllocationRecord har;
- uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
- if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
- &ring_index, &num_matching_addrs,
- &num_matching_addrs_4b)) {
- Printf("%s", d.Error());
- Printf("\nCause: use-after-free\n");
- Printf("%s", d.Location());
- Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
- untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
- har.requested_size, UntagAddr(har.tagged_addr),
- UntagAddr(har.tagged_addr) + har.requested_size);
- Printf("%s", d.Allocation());
- Printf("freed by thread T%u here:\n", t->unique_id());
- Printf("%s", d.Default());
- GetStackTraceFromId(har.free_context_id).Print();
-
- Printf("%s", d.Allocation());
- Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
- Printf("%s", d.Default());
- GetStackTraceFromId(har.alloc_context_id).Print();
-
- // Print a developer note: the index of this heap object
- // in the thread's deallocation ring buffer.
- Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
- flags()->heap_history_size);
- Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
- Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
- num_matching_addrs_4b);
-
- t->Announce();
- num_descriptions_printed++;
- }
- });
-
- if (candidate && num_descriptions_printed == 0) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ if (candidate.untagged_addr && num_descriptions_printed == 0) {
+ PrintHeapOrGlobalCandidate();
num_descriptions_printed++;
}
@@ -516,77 +817,24 @@ void PrintAddressDescription(
}
}
-void ReportStats() {}
-
-static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
- void (*print_tag)(InternalScopedString &s,
- tag_t *tag)) {
- const uptr row_len = 16; // better be power of two.
- tag_t *center_row_beg = reinterpret_cast<tag_t *>(
- RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
- tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
- tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s;
- for (tag_t *row = beg_row; row < end_row; row += row_len) {
- s.append("%s", row == center_row_beg ? "=>" : " ");
- s.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr>(row)));
- for (uptr i = 0; i < row_len; i++) {
- s.append("%s", row + i == tag_ptr ? "[" : " ");
- print_tag(s, &row[i]);
- s.append("%s", row + i == tag_ptr ? "]" : " ");
- }
- s.append("\n");
+void BaseReport::PrintTags(uptr addr) const {
+ if (shadow.addr) {
+ PrintTagsAroundAddr(
+ addr, [&](uptr addr) { return GetTagCopy(addr); },
+ [&](uptr addr) { return GetShortTagCopy(addr); });
}
- Printf("%s", s.data());
}
-static void PrintTagsAroundAddr(tag_t *tag_ptr) {
- Printf(
- "Memory tags around the buggy address (one tag corresponds to %zd "
- "bytes):\n", kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
- s.append("%02x", *tag);
- });
-
- Printf(
- "Tags for short granules around the buggy address (one tag corresponds "
- "to %zd bytes):\n",
- kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
- if (*tag >= 1 && *tag <= kShadowAlignment) {
- uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
- s.append("%02x",
- *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
- } else {
- s.append("..");
- }
- });
- Printf(
- "See "
- "https://clang.llvm.org/docs/"
- "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
- "description of short granule tags\n");
-}
+class InvalidFreeReport : public BaseReport {
+ public:
+ InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
+ ~InvalidFreeReport();
-uptr GetTopPc(StackTrace *stack) {
- return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
- : 0;
-}
+ private:
+};
-void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
- ScopedReport R(flags()->halt_on_error);
-
- uptr untagged_addr = UntagAddr(tagged_addr);
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr = nullptr;
- tag_t mem_tag = 0;
- if (MemIsApp(untagged_addr)) {
- tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
- if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
- mem_tag = *tag_ptr;
- else
- tag_ptr = nullptr;
- }
+InvalidFreeReport::~InvalidFreeReport() {
Decorator d;
Printf("%s", d.Error());
uptr pc = GetTopPc(stack);
@@ -600,36 +848,49 @@ void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
SanitizerToolName, bug_type, untagged_addr, pc);
}
Printf("%s", d.Access());
- if (tag_ptr)
- Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
+ if (shadow.addr) {
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
+ GetTagCopy(MemToShadow(untagged_addr)));
+ }
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, 0, nullptr);
-
- if (tag_ptr)
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintAddressDescription();
+ PrintTags(untagged_addr);
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
- const u8 *expected) {
- uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
- u8 actual_expected[kShadowAlignment];
- internal_memcpy(actual_expected, expected, tail_size);
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- // Short granule is stashed in the last byte of the magic string. To avoid
- // confusion, make the expected magic string contain the short granule tag.
- if (orig_size % kShadowAlignment != 0) {
- actual_expected[tail_size - 1] = ptr_tag;
+class TailOverwrittenReport : public BaseReport {
+ public:
+ explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
+ uptr orig_size, const u8 *expected)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
+ orig_size(orig_size),
+ tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ internal_memcpy(tail_copy,
+ reinterpret_cast<u8 *>(untagged_addr + orig_size),
+ tail_size);
+ internal_memcpy(actual_expected, expected, tail_size);
+ // Short granule is stashed in the last byte of the magic string. To avoid
+ // confusion, make the expected magic string contain the short granule tag.
+ if (orig_size % kShadowAlignment != 0)
+ actual_expected[tail_size - 1] = ptr_tag;
}
+ ~TailOverwrittenReport();
- ScopedReport R(flags()->halt_on_error);
+ private:
+ const uptr orig_size = 0;
+ const uptr tail_size = 0;
+ u8 actual_expected[kShadowAlignment] = {};
+ u8 tail_copy[kShadowAlignment] = {};
+};
+
+TailOverwrittenReport::~TailOverwrittenReport() {
Decorator d;
- uptr untagged_addr = UntagAddr(tagged_addr);
Printf("%s", d.Error());
const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
@@ -642,61 +903,62 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
Printf("deallocated here:\n");
Printf("%s", d.Default());
stack->Print();
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (chunk.Beg()) {
+ if (heap.begin) {
Printf("%s", d.Allocation());
Printf("allocated here:\n");
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(heap.stack_id).Print();
}
InternalScopedString s;
- CHECK_GT(tail_size, 0U);
- CHECK_LT(tail_size, kShadowAlignment);
- u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
- s.append("Tail contains: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
+ u8 *tail = tail_copy;
+ s.AppendF("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
+ s.AppendF("\n");
+ s.AppendF("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
+ s.AppendF("\n");
+ s.AppendF(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(" ");
for (uptr i = 0; i < tail_size; i++)
- s.append("%02x ", tail[i]);
- s.append("\n");
- s.append("Expected: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
- for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
- s.append("\n");
- s.append(" ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(" ");
- for (uptr i = 0; i < tail_size; i++)
- s.append("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
-
- s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
- "after a heap object, but within the %zd-byte granule, e.g.\n"
- " char *x = new char[20];\n"
- " x[25] = 42;\n"
- "%s does not detect such bugs in uninstrumented code at the time of write,"
- "\nbut can detect them at the time of free/delete.\n"
- "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
- kShadowAlignment, SanitizerToolName);
+ s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
+
+ s.AppendF(
+ "\nThis error occurs when a buffer overflow overwrites memory\n"
+ "after a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "%s does not detect such bugs in uninstrumented code at the time of "
+ "write,"
+ "\nbut can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
+ kShadowAlignment, SanitizerToolName);
Printf("%s", s.data());
GetCurrentThread()->Announce();
-
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintTags(untagged_addr);
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
- bool is_store, bool fatal, uptr *registers_frame) {
- ScopedReport R(fatal);
- SavedStackAllocations current_stack_allocations(
- GetCurrentThread()->stack_allocations());
+class TagMismatchReport : public BaseReport {
+ public:
+ explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
+ uptr access_size, bool is_store, bool fatal,
+ uptr *registers_frame)
+ : BaseReport(stack, fatal, tagged_addr, access_size),
+ is_store(is_store),
+ registers_frame(registers_frame) {}
+ ~TagMismatchReport();
+
+ private:
+ const bool is_store;
+ const uptr *registers_frame;
+};
+TagMismatchReport::~TagMismatchReport() {
Decorator d;
- uptr untagged_addr = UntagAddr(tagged_addr);
// TODO: when possible, try to print heap-use-after-free, etc.
const char *bug_type = "tag-mismatch";
uptr pc = GetTopPc(stack);
@@ -706,32 +968,12 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
Thread *t = GetCurrentThread();
- sptr offset =
- __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
- CHECK_GE(offset, 0);
- CHECK_LT(offset, static_cast<sptr>(access_size));
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr =
- reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
- tag_t mem_tag = *tag_ptr;
+ tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf("%s", d.Access());
if (mem_tag && mem_tag < kShadowAlignment) {
- tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
- ~(kShadowAlignment - 1));
- // If offset is 0, (untagged_addr + offset) is not aligned to granules.
- // This is the offset of the leftmost accessed byte within the bad granule.
- u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
- tag_t short_tag = granule_ptr[kShadowAlignment - 1];
- // The first mismatch was a short granule that matched the ptr_tag.
- if (short_tag == ptr_tag) {
- // If the access starts after the end of the short granule, then the first
- // bad byte is the first byte of the access; otherwise it is the first
- // byte past the end of the short granule
- if (mem_tag > in_granule_offset) {
- offset += mem_tag - in_granule_offset;
- }
- }
+ tag_t short_tag =
+ GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf(
"%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
@@ -741,17 +983,16 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
mem_tag, t->unique_id());
}
- if (offset != 0)
- Printf("Invalid access starting at offset %zu\n", offset);
+ if (mismatch_offset)
+ Printf("Invalid access starting at offset %zu\n", mismatch_offset);
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, access_size,
- current_stack_allocations.get());
+ PrintAddressDescription();
t->Announce();
- PrintTagsAroundAddr(tag_ptr);
+ PrintTags(untagged_addr + mismatch_offset);
if (registers_frame)
ReportRegisters(registers_frame, pc);
@@ -759,10 +1000,26 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
+} // namespace
+
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ InvalidFreeReport R(stack, tagged_addr);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ const u8 *expected) {
+ TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
+}
+
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame) {
+ TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
+ registers_frame);
+}
// See the frame breakdown defined in __hwasan_tag_mismatch (from
// hwasan_tag_mismatch_{aarch64,riscv64}.S).
-void ReportRegisters(uptr *frame, uptr pc) {
+void ReportRegisters(const uptr *frame, uptr pc) {
Printf("Registers where the failure occurred (pc %p):\n", pc);
// We explicitly print a single line (4 registers/line) each iteration to
@@ -774,7 +1031,8 @@ void ReportRegisters(uptr *frame, uptr pc) {
frame[0], frame[1], frame[2], frame[3]);
#elif SANITIZER_RISCV64
Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
- reinterpret_cast<u8 *>(frame) + 256, frame[1], frame[2], frame[3]);
+ reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
+ frame[3]);
#endif
Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
frame[4], frame[5], frame[6], frame[7]);
@@ -792,7 +1050,7 @@ void ReportRegisters(uptr *frame, uptr pc) {
// passes it to this function.
#if defined(__aarch64__)
Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
- frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
+ frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
#elif SANITIZER_RISCV64
Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
frame[29], frame[30], frame[31]);
diff --git a/compiler-rt/lib/hwasan/hwasan_report.h b/compiler-rt/lib/hwasan/hwasan_report.h
index de86c38fc01f..bb9492a18cf9 100644
--- a/compiler-rt/lib/hwasan/hwasan_report.h
+++ b/compiler-rt/lib/hwasan/hwasan_report.h
@@ -26,7 +26,7 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
void ReportInvalidFree(StackTrace *stack, uptr addr);
void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
const u8 *expected);
-void ReportRegisters(uptr *registers_frame, uptr pc);
+void ReportRegisters(const uptr *registers_frame, uptr pc);
void ReportAtExitStatistics();
diff --git a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
index bcb0df420190..fd060c51cd8e 100644
--- a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -89,16 +89,16 @@ __hwasan_tag_mismatch:
ubfx x16, x0, #4, #52
ldrb w16, [x9, x16]
cmp w16, #0xf
- b.hi __hwasan_tag_mismatch_v2
+ b.hi mismatch
cmp w16, w17
- b.lo __hwasan_tag_mismatch_v2
+ b.lo mismatch
// Load the real tag from the last byte of the granule and compare against
// the pointer tag.
orr x16, x0, #0xf
ldrb w16, [x16]
cmp x16, x0, lsr #56
- b.ne __hwasan_tag_mismatch_v2
+ b.ne mismatch
// Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
// call and resume execution.
@@ -108,6 +108,8 @@ __hwasan_tag_mismatch:
.global __hwasan_tag_mismatch_v2
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
+// Avoid using global label, to prevent "relocation out of range".
+mismatch:
CFI_STARTPROC
BTI_J
diff --git a/compiler-rt/lib/interception/interception.h b/compiler-rt/lib/interception/interception.h
index 078d33b61be3..9d8b60b2eef5 100644
--- a/compiler-rt/lib/interception/interception.h
+++ b/compiler-rt/lib/interception/interception.h
@@ -181,10 +181,15 @@ const interpose_substitution substitution_##func_name[] \
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
-# define __ASM_WEAK_WRAPPER(func)
+# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
# else
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if defined(__arm__) || defined(__aarch64__)
+# define ASM_TYPE_FUNCTION_STR "%function"
+# else
+# define ASM_TYPE_FUNCTION_STR "@function"
+# endif
// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__); \
@@ -196,7 +201,8 @@ const interpose_substitution substitution_##func_name[] \
__ASM_WEAK_WRAPPER(func) \
".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
- ".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
+ ".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
+ ASM_TYPE_FUNCTION_STR "\n" \
SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
diff --git a/compiler-rt/lib/interception/interception_win.cpp b/compiler-rt/lib/interception/interception_win.cpp
index 00c317510e42..f1a549b938e2 100644
--- a/compiler-rt/lib/interception/interception_win.cpp
+++ b/compiler-rt/lib/interception/interception_win.cpp
@@ -1,4 +1,4 @@
-//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
+//===-- interception_win.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -431,7 +431,8 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
// The following prologues cannot be patched because of the short jump
// jumping to the patching region.
-#if SANITIZER_WINDOWS64
+// Short jump patterns below are only for x86_64.
+# if SANITIZER_WINDOWS_x64
// ntdll!wcslen in Win11
// 488bc1 mov rax,rcx
// 0fb710 movzx edx,word ptr [rax]
@@ -457,7 +458,12 @@ static const u8 kPrologueWithShortJump2[] = {
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
-#if SANITIZER_WINDOWS64
+#if SANITIZER_ARM64
+ // An ARM64 instruction is 4 bytes long.
+ return 4;
+#endif
+
+# if SANITIZER_WINDOWS_x64
if (memcmp((u8*)address, kPrologueWithShortJump1,
sizeof(kPrologueWithShortJump1)) == 0 ||
memcmp((u8*)address, kPrologueWithShortJump2,
@@ -539,7 +545,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
return 7;
}
-#if SANITIZER_WINDOWS64
+# if SANITIZER_WINDOWS_x64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
@@ -572,6 +578,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x018a: // mov al, byte ptr [rcx]
return 2;
+ case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
if (rel_offset)
*rel_offset = 2;
@@ -619,7 +626,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
-
+ case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
// Instructions having offset relative to 'rip' need offset adjustment.
if (rel_offset)
*rel_offset = 3;
@@ -721,16 +728,22 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
if (!instruction_size)
return false;
- _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ _memcpy((void *)(to + cursor), (void *)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
- uptr delta = to - from;
- uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
-#if SANITIZER_WINDOWS64
- if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+# if SANITIZER_WINDOWS64
+ // we want to make sure that the new relative offset still fits in 32-bits
+ // this will be untrue if relocated_offset \notin [-2**31, 2**31)
+ s64 delta = to - from;
+ s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
+ if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll)
return false;
-#endif
- *(u32*)(to + cursor + rel_offset) = relocated_offset;
+# else
+ // on 32-bit, the relative offset will always be correct
+ s32 delta = to - from;
+ s32 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
+# endif
+ *(s32 *)(to + cursor + rel_offset) = relocated_offset;
}
cursor += instruction_size;
}
diff --git a/compiler-rt/lib/lsan/lsan_allocator.h b/compiler-rt/lib/lsan/lsan_allocator.h
index 84cce4c6baeb..5eed0cbdb309 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.h
+++ b/compiler-rt/lib/lsan/lsan_allocator.h
@@ -68,20 +68,42 @@ using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#else
# if SANITIZER_FUCHSIA || defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
+# if SANITIZER_RISCV64
+// See the comments in compiler-rt/lib/asan/asan_allocator.h for why these
+// values were chosen.
+const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
+using LSanSizeClassMap = SizeClassMap</*kNumBits=*/2,
+ /*kMinSizeLog=*/5,
+ /*kMidSizeLog=*/8,
+ /*kMaxSizeLog=*/18,
+ /*kNumCachedHintT=*/8,
+ /*kMaxBytesCachedLog=*/10>;
+static_assert(LSanSizeClassMap::kNumClassesRounded <= 32,
+ "32 size classes is the optimal number to ensure tests run "
+ "effieciently on Fuchsia.");
+# else
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# endif
+# elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+using LSanSizeClassMap = DefaultSizeClassMap;
# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
# else
const uptr kAllocatorSpace = 0x500000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
+ using SizeClassMap = LSanSizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
using AddressSpaceView = AddressSpaceViewTy;
diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
index 9b73ddbdc756..8b1af5b629fb 100644
--- a/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/compiler-rt/lib/lsan/lsan_common.cpp
@@ -950,8 +950,8 @@ void LeakReport::PrintSummary() {
allocations += leaks_[i].hit_count;
}
InternalScopedString summary;
- summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
- allocations);
+ summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
ReportErrorSummary(summary.data());
}
diff --git a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
index bcad1c205fc7..cb3fe1f859f7 100644
--- a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
+++ b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -119,7 +119,8 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
if (i < params->allocator_caches.size() &&
params->allocator_caches[i] >= begin &&
- end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
+ params->allocator_caches[i] <= end &&
+ end - params->allocator_caches[i] >= sizeof(AllocatorCache)) {
// Split the range in two and omit the allocator cache within.
ScanRangeForPointers(begin, params->allocator_caches[i],
&params->argument->frontier, "TLS", kReachable);
diff --git a/compiler-rt/lib/lsan/lsan_interceptors.cpp b/compiler-rt/lib/lsan/lsan_interceptors.cpp
index fac6133ddf21..885f7ad5ddba 100644
--- a/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -490,9 +490,9 @@ INTERCEPTOR(int, pthread_detach, void *thread) {
return result;
}
-INTERCEPTOR(int, pthread_exit, void *retval) {
+INTERCEPTOR(void, pthread_exit, void *retval) {
GetThreadArgRetval().Finish(GetThreadSelf(), retval);
- return REAL(pthread_exit)(retval);
+ REAL(pthread_exit)(retval);
}
# if SANITIZER_INTERCEPT_TRYJOIN
diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index efdfa5ad04a6..af46ffdb248e 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -738,3 +738,13 @@ int __memprof_profile_dump() {
// detected during the dumping process.
return 0;
}
+
+void __memprof_profile_reset() {
+ if (report_file.fd != kInvalidFd && report_file.fd != kStdoutFd &&
+ report_file.fd != kStderrFd) {
+ CloseFile(report_file.fd);
+ // Setting the file descriptor to kInvalidFd ensures that we will reopen the
+ // file when invoking Write again.
+ report_file.fd = kInvalidFd;
+ }
+}
diff --git a/compiler-rt/lib/memprof/memprof_descriptions.cpp b/compiler-rt/lib/memprof/memprof_descriptions.cpp
index 669b1acd8c71..48b74b6bc87f 100644
--- a/compiler-rt/lib/memprof/memprof_descriptions.cpp
+++ b/compiler-rt/lib/memprof/memprof_descriptions.cpp
@@ -49,14 +49,14 @@ void DescribeThread(MemprofThreadContext *context) {
}
context->announced = true;
InternalScopedString str;
- str.append("Thread %s", MemprofThreadIdAndName(context).c_str());
+ str.AppendF("Thread %s", MemprofThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
+ str.AppendF(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
- str.append(" created by %s here:\n",
- MemprofThreadIdAndName(context->parent_tid).c_str());
+ str.AppendF(" created by %s here:\n",
+ MemprofThreadIdAndName(context->parent_tid).c_str());
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
diff --git a/compiler-rt/lib/memprof/memprof_interface_internal.h b/compiler-rt/lib/memprof/memprof_interface_internal.h
index 0aca4afc9afa..318bc4104405 100644
--- a/compiler-rt/lib/memprof/memprof_interface_internal.h
+++ b/compiler-rt/lib/memprof/memprof_interface_internal.h
@@ -49,6 +49,7 @@ extern uptr __memprof_shadow_memory_dynamic_address;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char
__memprof_profile_filename[1];
SANITIZER_INTERFACE_ATTRIBUTE int __memprof_profile_dump();
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_profile_reset();
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_load(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_store(uptr p);
diff --git a/compiler-rt/lib/msan/msan.cpp b/compiler-rt/lib/msan/msan.cpp
index 987c5560825d..c4f47dea1104 100644
--- a/compiler-rt/lib/msan/msan.cpp
+++ b/compiler-rt/lib/msan/msan.cpp
@@ -138,8 +138,8 @@ static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
#include "msan_flags.inc"
#undef MSAN_FLAG
- FlagHandlerKeepGoing *fh_keep_going =
- new (FlagParser::Alloc) FlagHandlerKeepGoing(&f->halt_on_error);
+ FlagHandlerKeepGoing *fh_keep_going = new (GetGlobalLowLevelAllocator())
+ FlagHandlerKeepGoing(&f->halt_on_error);
parser->RegisterHandler("keep_going", fh_keep_going,
"deprecated, use halt_on_error");
}
diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp
index f5e0d3cb9a67..c2d740e7762b 100644
--- a/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -243,23 +243,37 @@ INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
#define MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
#endif
+#if (!SANITIZER_FREEBSD && !SANITIZER_NETBSD) || __GLIBC_PREREQ(2, 33)
+template <class T>
+static NOINLINE void clear_mallinfo(T *sret) {
+ ENSURE_MSAN_INITED();
+ internal_memset(sret, 0, sizeof(*sret));
+ __msan_unpoison(sret, sizeof(*sret));
+}
+#endif
+
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-// This function actually returns a struct by value, but we can't unpoison a
-// temporary! The following is equivalent on all supported platforms but
-// aarch64 (which uses a different register for sret value). We have a test
-// to confirm that.
-INTERCEPTOR(void, mallinfo, __sanitizer_struct_mallinfo *sret) {
-#ifdef __aarch64__
- uptr r8;
- asm volatile("mov %0,x8" : "=r" (r8));
- sret = reinterpret_cast<__sanitizer_struct_mallinfo*>(r8);
+// Interceptors use NRVO and assume that sret will be pre-allocated in
+// caller frame.
+INTERCEPTOR(__sanitizer_struct_mallinfo, mallinfo) {
+ __sanitizer_struct_mallinfo sret;
+ clear_mallinfo(&sret);
+ return sret;
+}
+# define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+#else
+# define MSAN_MAYBE_INTERCEPT_MALLINFO
#endif
- REAL(memset)(sret, 0, sizeof(*sret));
- __msan_unpoison(sret, sizeof(*sret));
+
+#if __GLIBC_PREREQ(2, 33)
+INTERCEPTOR(__sanitizer_struct_mallinfo2, mallinfo2) {
+ __sanitizer_struct_mallinfo2 sret;
+ clear_mallinfo(&sret);
+ return sret;
}
-#define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+# define MSAN_MAYBE_INTERCEPT_MALLINFO2 INTERCEPT_FUNCTION(mallinfo2)
#else
-#define MSAN_MAYBE_INTERCEPT_MALLINFO
+# define MSAN_MAYBE_INTERCEPT_MALLINFO2
#endif
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
@@ -401,11 +415,25 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
__msan_unpoison(endptr, sizeof(*endptr)); \
return res;
+// On s390x, long double return values are passed via implicit reference,
+// which needs to be unpoisoned. We make the implicit pointer explicit.
+#define INTERCEPTOR_STRTO_SRET_BODY(func, sret, ...) \
+ ENSURE_MSAN_INITED(); \
+ REAL(func)(sret, __VA_ARGS__); \
+ __msan_unpoison(sret, sizeof(*sret)); \
+ __msan_unpoison(endptr, sizeof(*endptr));
+
#define INTERCEPTOR_STRTO(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \
INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \
}
+#define INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR(void, func, ret_type *sret, const char_type *nptr, \
+ char_type **endptr) { \
+ INTERCEPTOR_STRTO_SRET_BODY(func, sret, nptr, endptr); \
+ }
+
#define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
int base) { \
@@ -418,6 +446,12 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \
}
+#define INTERCEPTOR_STRTO_SRET_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(void, func, ret_type *sret, const char_type *nptr, \
+ char_type **endptr, void *loc) { \
+ INTERCEPTOR_STRTO_SRET_BODY(func, sret, nptr, endptr, loc); \
+ }
+
#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
int base, void *loc) { \
@@ -429,6 +463,10 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO(ret_type, func, char_type) \
INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type)
+#define INTERCEPTORS_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, func##_l, char_type)
+
#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type)
@@ -440,6 +478,12 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \
INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type)
+#define INTERCEPTORS_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, __##func##_internal, char_type)
+
#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \
@@ -449,7 +493,11 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTORS_STRTO(double, strtod, char)
INTERCEPTORS_STRTO(float, strtof, char)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, strtold, char)
+#else
INTERCEPTORS_STRTO(long double, strtold, char)
+#endif
INTERCEPTORS_STRTO_BASE(long, strtol, char)
INTERCEPTORS_STRTO_BASE(long long, strtoll, char)
INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char)
@@ -458,12 +506,43 @@ INTERCEPTORS_STRTO_BASE(u64, strtouq, char)
INTERCEPTORS_STRTO(double, wcstod, wchar_t)
INTERCEPTORS_STRTO(float, wcstof, wchar_t)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, wcstold, wchar_t)
+#else
INTERCEPTORS_STRTO(long double, wcstold, wchar_t)
+#endif
INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t)
INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)
+#if SANITIZER_GLIBC
+INTERCEPTORS_STRTO(double, __isoc23_strtod, char)
+INTERCEPTORS_STRTO(float, __isoc23_strtof, char)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, __isoc23_strtold, char)
+#else
+INTERCEPTORS_STRTO(long double, __isoc23_strtold, char)
+#endif
+INTERCEPTORS_STRTO_BASE(long, __isoc23_strtol, char)
+INTERCEPTORS_STRTO_BASE(long long, __isoc23_strtoll, char)
+INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_strtoul, char)
+INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_strtoull, char)
+INTERCEPTORS_STRTO_BASE(u64, __isoc23_strtouq, char)
+
+INTERCEPTORS_STRTO(double, __isoc23_wcstod, wchar_t)
+INTERCEPTORS_STRTO(float, __isoc23_wcstof, wchar_t)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, __isoc23_wcstold, wchar_t)
+#else
+INTERCEPTORS_STRTO(long double, __isoc23_wcstold, wchar_t)
+#endif
+INTERCEPTORS_STRTO_BASE(long, __isoc23_wcstol, wchar_t)
+INTERCEPTORS_STRTO_BASE(long long, __isoc23_wcstoll, wchar_t)
+INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_wcstoul, wchar_t)
+INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_wcstoull, wchar_t)
+#endif
+
#if SANITIZER_NETBSD
#define INTERCEPT_STRTO(func) \
INTERCEPT_FUNCTION(func); \
@@ -474,6 +553,12 @@ INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)
INTERCEPT_FUNCTION(func##_l); \
INTERCEPT_FUNCTION(__##func##_l); \
INTERCEPT_FUNCTION(__##func##_internal);
+
+#define INTERCEPT_STRTO_VER(func, ver) \
+ INTERCEPT_FUNCTION_VER(func, ver); \
+ INTERCEPT_FUNCTION_VER(func##_l, ver); \
+ INTERCEPT_FUNCTION_VER(__##func##_l, ver); \
+ INTERCEPT_FUNCTION_VER(__##func##_internal, ver);
#endif
@@ -1713,6 +1798,7 @@ void InitializeInterceptors() {
MSAN_MAYBE_INTERCEPT_CFREE;
MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
MSAN_MAYBE_INTERCEPT_MALLINFO;
+ MSAN_MAYBE_INTERCEPT_MALLINFO2;
MSAN_MAYBE_INTERCEPT_MALLOPT;
MSAN_MAYBE_INTERCEPT_MALLOC_STATS;
INTERCEPT_FUNCTION(fread);
@@ -1735,7 +1821,11 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(strncat);
INTERCEPT_STRTO(strtod);
INTERCEPT_STRTO(strtof);
+#ifdef SANITIZER_NLDBL_VERSION
+ INTERCEPT_STRTO_VER(strtold, SANITIZER_NLDBL_VERSION);
+#else
INTERCEPT_STRTO(strtold);
+#endif
INTERCEPT_STRTO(strtol);
INTERCEPT_STRTO(strtoul);
INTERCEPT_STRTO(strtoll);
@@ -1743,11 +1833,33 @@ void InitializeInterceptors() {
INTERCEPT_STRTO(strtouq);
INTERCEPT_STRTO(wcstod);
INTERCEPT_STRTO(wcstof);
+#ifdef SANITIZER_NLDBL_VERSION
+ INTERCEPT_STRTO_VER(wcstold, SANITIZER_NLDBL_VERSION);
+#else
INTERCEPT_STRTO(wcstold);
+#endif
INTERCEPT_STRTO(wcstol);
INTERCEPT_STRTO(wcstoul);
INTERCEPT_STRTO(wcstoll);
INTERCEPT_STRTO(wcstoull);
+#if SANITIZER_GLIBC
+ INTERCEPT_STRTO(__isoc23_strtod);
+ INTERCEPT_STRTO(__isoc23_strtof);
+ INTERCEPT_STRTO(__isoc23_strtold);
+ INTERCEPT_STRTO(__isoc23_strtol);
+ INTERCEPT_STRTO(__isoc23_strtoul);
+ INTERCEPT_STRTO(__isoc23_strtoll);
+ INTERCEPT_STRTO(__isoc23_strtoull);
+ INTERCEPT_STRTO(__isoc23_strtouq);
+ INTERCEPT_STRTO(__isoc23_wcstod);
+ INTERCEPT_STRTO(__isoc23_wcstof);
+ INTERCEPT_STRTO(__isoc23_wcstold);
+ INTERCEPT_STRTO(__isoc23_wcstol);
+ INTERCEPT_STRTO(__isoc23_wcstoul);
+ INTERCEPT_STRTO(__isoc23_wcstoll);
+ INTERCEPT_STRTO(__isoc23_wcstoull);
+#endif
+
#ifdef SANITIZER_NLDBL_VERSION
INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION);
INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION);
diff --git a/compiler-rt/lib/msan/msan_report.cpp b/compiler-rt/lib/msan/msan_report.cpp
index 90164e50ca3a..99bf81f66dc9 100644
--- a/compiler-rt/lib/msan/msan_report.cpp
+++ b/compiler-rt/lib/msan/msan_report.cpp
@@ -269,7 +269,7 @@ void DescribeMemoryRange(const void *x, uptr size) {
void ReportUMRInsideAddressRange(const char *function, const void *start,
uptr size, uptr offset) {
- function = StripFunctionName(function);
+ function = StackTracePrinter::GetOrInit()->StripFunctionName(function);
Decorator d;
Printf("%s", d.Warning());
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
diff --git a/compiler-rt/lib/orc/bitmask_enum.h b/compiler-rt/lib/orc/bitmask_enum.h
new file mode 100644
index 000000000000..b9fb776bdf23
--- /dev/null
+++ b/compiler-rt/lib/orc/bitmask_enum.h
@@ -0,0 +1,151 @@
+//===---- bitmask_enum.h - Enable bitmask operations on enums ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_BITMASK_ENUM_H
+#define ORC_RT_BITMASK_ENUM_H
+
+#include "stl_extras.h"
+
+#include <cassert>
+#include <type_traits>
+
+namespace __orc_rt {
+
+/// ORC_RT_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you
+/// can perform bitwise operations on it without putting static_cast everywhere.
+///
+/// \code
+/// enum MyEnum {
+/// E1 = 1, E2 = 2, E3 = 4, E4 = 8,
+/// ORC_RT_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
+/// };
+///
+/// void Foo() {
+/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
+/// }
+/// \endcode
+///
+/// Normally when you do a bitwise operation on an enum value, you get back an
+/// instance of the underlying type (e.g. int). But using this macro, bitwise
+/// ops on your enum will return you back instances of the enum. This is
+/// particularly useful for enums which represent a combination of flags.
+///
+/// The parameter to ORC_RT_MARK_AS_BITMASK_ENUM should be the largest
+/// individual value in your enum.
+///
+/// All of the enum's values must be non-negative.
+#define ORC_RT_MARK_AS_BITMASK_ENUM(LargestValue) \
+ ORC_RT_BITMASK_LARGEST_ENUMERATOR = LargestValue
+
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK can be used to declare an enum type as a bit
+/// set, so that bitwise operation on such enum does not require static_cast.
+///
+/// \code
+/// enum MyEnum { E1 = 1, E2 = 2, E3 = 4, E4 = 8 };
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK(MyEnum, E4);
+///
+/// void Foo() {
+/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // No static_cast
+/// }
+/// \endcode
+///
+/// The second parameter to ORC_RT_DECLARE_ENUM_AS_BITMASK specifies the largest
+/// bit value of the enum type.
+///
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK should be used in __orc_rt namespace.
+///
+/// This a non-intrusive alternative for ORC_RT_MARK_AS_BITMASK_ENUM. It allows
+/// declaring more than one non-scoped enumerations as bitmask types in the same
+/// scope. Otherwise it provides the same functionality as
+/// ORC_RT_MARK_AS_BITMASK_ENUM.
+#define ORC_RT_DECLARE_ENUM_AS_BITMASK(Enum, LargestValue) \
+ template <> struct is_bitmask_enum<Enum> : std::true_type {}; \
+ template <> struct largest_bitmask_enum_bit<Enum> { \
+ static constexpr std::underlying_type_t<Enum> value = LargestValue; \
+ }
+
+/// Traits class to determine whether an enum has been declared as a bitwise
+/// enum via ORC_RT_DECLARE_ENUM_AS_BITMASK.
+template <typename E, typename Enable = void>
+struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+struct is_bitmask_enum<
+ E, std::enable_if_t<sizeof(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR) >= 0>>
+ : std::true_type {};
+
+template <typename E>
+inline constexpr bool is_bitmask_enum_v = is_bitmask_enum<E>::value;
+
+/// Traits class to deermine bitmask enum largest bit.
+template <typename E, typename Enable = void> struct largest_bitmask_enum_bit;
+
+template <typename E>
+struct largest_bitmask_enum_bit<
+ E, std::enable_if_t<sizeof(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR) >= 0>> {
+ using UnderlyingTy = std::underlying_type_t<E>;
+ static constexpr UnderlyingTy value =
+ static_cast<UnderlyingTy>(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR);
+};
+
+template <typename E> constexpr std::underlying_type_t<E> Mask() {
+ return bit_ceil(largest_bitmask_enum_bit<E>::value) - 1;
+}
+
+template <typename E> constexpr std::underlying_type_t<E> Underlying(E Val) {
+ auto U = static_cast<std::underlying_type_t<E>>(Val);
+ assert(U >= 0 && "Negative enum values are not allowed");
+ assert(U <= Mask<E>() && "Enum value too large (or langest val too small");
+ return U;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator~(E Val) {
+ return static_cast<E>(~Underlying(Val) & Mask<E>());
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator|(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) | Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator&(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) & Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator^(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator|=(E &LHS, E RHS) {
+ LHS = LHS | RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator&=(E &LHS, E RHS) {
+ LHS = LHS & RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator^=(E &LHS, E RHS) {
+ LHS = LHS ^ RHS;
+ return LHS;
+}
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_BITMASK_ENUM_H
diff --git a/compiler-rt/lib/orc/elfnix_tls.ppc64.S b/compiler-rt/lib/orc/elfnix_tls.ppc64.S
new file mode 100644
index 000000000000..84854795dba1
--- /dev/null
+++ b/compiler-rt/lib/orc/elfnix_tls.ppc64.S
@@ -0,0 +1,33 @@
+//===-- orc_rt_elfnix_tls.ppc64.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+// The content of this file is PowerPC64 only.
+#if defined(__powerpc64__)
+
+ .text
+ // TODO: add fast-path for repeat access.
+ // See https://github.com/llvm/llvm-project/issues/51162.
+ .global ___orc_rt_elfnix_tls_get_addr
+___orc_rt_elfnix_tls_get_addr:
+ addis 2, 12, .TOC.-___orc_rt_elfnix_tls_get_addr@ha
+ addi 2, 2, .TOC.-___orc_rt_elfnix_tls_get_addr@l
+ mflr 0
+ std 0, 16(1)
+ stdu 1, -32(1)
+ bl __orc_rt_elfnix_tls_get_addr_impl
+ nop
+ addi 1, 1, 32
+ ld 0, 16(1)
+ mtlr 0
+ blr
+
+#endif
diff --git a/compiler-rt/lib/orc/macho_platform.cpp b/compiler-rt/lib/orc/macho_platform.cpp
index cb248aae0666..e3a1cdf3c4fc 100644
--- a/compiler-rt/lib/orc/macho_platform.cpp
+++ b/compiler-rt/lib/orc/macho_platform.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "macho_platform.h"
+#include "bitmask_enum.h"
#include "common.h"
#include "debug.h"
#include "error.h"
@@ -34,7 +35,7 @@ using namespace __orc_rt::macho;
// Declare function tags for functions in the JIT process.
ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_push_initializers_tag)
-ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_symbol_lookup_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_push_symbols_tag)
struct objc_image_info;
struct mach_header;
@@ -148,6 +149,16 @@ struct TLVDescriptor {
};
class MachOPlatformRuntimeState {
+public:
+ // Used internally by MachOPlatformRuntimeState, but made public to enable
+ // serialization.
+ enum class MachOExecutorSymbolFlags : uint8_t {
+ None = 0,
+ Weak = 1U << 0,
+ Callable = 1U << 1,
+ ORC_RT_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Callable)
+ };
+
private:
struct AtExitEntry {
void (*Func)(void *);
@@ -256,11 +267,17 @@ private:
IntervalMap<char *, UnwindSections, IntervalCoalescing::Disabled>;
struct JITDylibState {
+
+ using SymbolTableMap =
+ std::unordered_map<std::string_view,
+ std::pair<ExecutorAddr, MachOExecutorSymbolFlags>>;
+
std::string Name;
void *Header = nullptr;
bool Sealed = false;
size_t LinkedAgainstRefCount = 0;
size_t DlRefCount = 0;
+ SymbolTableMap SymbolTable;
std::vector<JITDylibState *> Deps;
AtExitsVector AtExits;
const objc_image_info *ObjCImageInfo = nullptr;
@@ -296,6 +313,14 @@ public:
Error deregisterJITDylib(void *Header);
Error registerThreadDataSection(span<const char> ThreadDataSection);
Error deregisterThreadDataSection(span<const char> ThreadDataSection);
+ Error registerObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries);
+ Error deregisterObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries);
Error registerObjectPlatformSections(
ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindSections,
std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs);
@@ -306,7 +331,7 @@ public:
const char *dlerror();
void *dlopen(std::string_view Name, int Mode);
int dlclose(void *DSOHandle);
- void *dlsym(void *DSOHandle, std::string_view Symbol);
+ void *dlsym(void *DSOHandle, const char *Symbol);
int registerAtExit(void (*F)(void *), void *Arg, void *DSOHandle);
void runAtExits(std::unique_lock<std::mutex> &JDStatesLock,
@@ -321,8 +346,19 @@ private:
JITDylibState *getJITDylibStateByHeader(void *DSOHandle);
JITDylibState *getJITDylibStateByName(std::string_view Path);
- Expected<ExecutorAddr> lookupSymbolInJITDylib(void *DSOHandle,
- std::string_view Symbol);
+ /// Requests materialization of the given symbols. For each pair, the bool
+ /// element indicates whether the symbol is required (true) or weakly
+ /// referenced (false).
+ Error requestPushSymbols(JITDylibState &JDS,
+ span<std::pair<std::string_view, bool>> Symbols);
+
+ /// Attempts to look up the given symbols locally, requesting a push from the
+ /// remote if they're not found. Results are written to the Result span, which
+ /// must have the same size as the Symbols span.
+ Error
+ lookupSymbols(JITDylibState &JDS, std::unique_lock<std::mutex> &JDStatesLock,
+ span<std::pair<ExecutorAddr, MachOExecutorSymbolFlags>> Result,
+ span<std::pair<std::string_view, bool>> Symbols);
bool lookupUnwindSections(void *Addr, unw_dynamic_unwind_sections &Info);
@@ -366,6 +402,47 @@ private:
std::map<const char *, size_t> ThreadDataSections;
};
+} // anonymous namespace
+
+namespace __orc_rt {
+
+class SPSMachOExecutorSymbolFlags;
+
+template <>
+class SPSSerializationTraits<
+ SPSMachOExecutorSymbolFlags,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags> {
+private:
+ using UT = std::underlying_type_t<
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>;
+
+public:
+ static size_t
+ size(const MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ return sizeof(UT);
+ }
+
+ static bool
+ serialize(SPSOutputBuffer &OB,
+ const MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ return SPSArgList<UT>::serialize(OB, static_cast<UT>(SF));
+ }
+
+ static bool
+ deserialize(SPSInputBuffer &IB,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ UT Tmp;
+ if (!SPSArgList<UT>::deserialize(IB, Tmp))
+ return false;
+ SF = static_cast<MachOPlatformRuntimeState::MachOExecutorSymbolFlags>(Tmp);
+ return true;
+ }
+};
+
+} // namespace __orc_rt
+
+namespace {
+
MachOPlatformRuntimeState *MachOPlatformRuntimeState::MOPS = nullptr;
Error MachOPlatformRuntimeState::create() {
@@ -492,6 +569,48 @@ Error MachOPlatformRuntimeState::deregisterThreadDataSection(
return Error::success();
}
+Error MachOPlatformRuntimeState::registerObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries) {
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for "
+ "unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ for (auto &[NameAddr, SymAddr, Flags] : Entries)
+ JDS->SymbolTable[NameAddr.toPtr<const char *>()] = {SymAddr, Flags};
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries) {
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for "
+ "unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ for (auto &[NameAddr, SymAddr, Flags] : Entries)
+ JDS->SymbolTable.erase(NameAddr.toPtr<const char *>());
+
+ return Error::success();
+}
+
Error MachOPlatformRuntimeState::registerObjectPlatformSections(
ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindInfo,
std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs) {
@@ -577,7 +696,7 @@ Error MachOPlatformRuntimeState::deregisterObjectPlatformSections(
// TODO: Add a JITDylib prepare-for-teardown operation that clears all
// registered sections, causing this function to take the fast-path.
ORC_RT_DEBUG({
- printdbg("MachOPlatform: Registering object sections for %p.\n",
+ printdbg("MachOPlatform: Deregistering object sections for %p.\n",
HeaderAddr.toPtr<void *>());
});
@@ -687,15 +806,26 @@ int MachOPlatformRuntimeState::dlclose(void *DSOHandle) {
return 0;
}
-void *MachOPlatformRuntimeState::dlsym(void *DSOHandle,
- std::string_view Symbol) {
- auto Addr = lookupSymbolInJITDylib(DSOHandle, Symbol);
- if (!Addr) {
- DLFcnError = toString(Addr.takeError());
- return 0;
+void *MachOPlatformRuntimeState::dlsym(void *DSOHandle, const char *Symbol) {
+ std::unique_lock<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "In call to dlsym, unrecognized header address " << DSOHandle;
+ DLFcnError = ErrStream.str();
+ return nullptr;
}
- return Addr->toPtr<void *>();
+ std::string MangledName = std::string("_") + Symbol;
+ std::pair<std::string_view, bool> Lookup(MangledName, false);
+ std::pair<ExecutorAddr, MachOExecutorSymbolFlags> Result;
+
+ if (auto Err = lookupSymbols(*JDS, Lock, {&Result, 1}, {&Lookup, 1})) {
+ DLFcnError = toString(std::move(Err));
+ return nullptr;
+ }
+
+ return Result.first.toPtr<void *>();
}
int MachOPlatformRuntimeState::registerAtExit(void (*F)(void *), void *Arg,
@@ -774,17 +904,84 @@ MachOPlatformRuntimeState::getJITDylibStateByName(std::string_view Name) {
return nullptr;
}
-Expected<ExecutorAddr>
-MachOPlatformRuntimeState::lookupSymbolInJITDylib(void *DSOHandle,
- std::string_view Sym) {
- Expected<ExecutorAddr> Result((ExecutorAddr()));
- if (auto Err = WrapperFunction<SPSExpected<SPSExecutorAddr>(
- SPSExecutorAddr, SPSString)>::call(&__orc_rt_macho_symbol_lookup_tag,
- Result,
- ExecutorAddr::fromPtr(DSOHandle),
- Sym))
+Error MachOPlatformRuntimeState::requestPushSymbols(
+ JITDylibState &JDS, span<std::pair<std::string_view, bool>> Symbols) {
+ Error OpErr = Error::success();
+ if (auto Err = WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSString, bool>>)>::
+ call(&__orc_rt_macho_push_symbols_tag, OpErr,
+ ExecutorAddr::fromPtr(JDS.Header), Symbols)) {
+ cantFail(std::move(OpErr));
return std::move(Err);
- return Result;
+ }
+ return OpErr;
+}
+
+Error MachOPlatformRuntimeState::lookupSymbols(
+ JITDylibState &JDS, std::unique_lock<std::mutex> &JDStatesLock,
+ span<std::pair<ExecutorAddr, MachOExecutorSymbolFlags>> Result,
+ span<std::pair<std::string_view, bool>> Symbols) {
+ assert(JDStatesLock.owns_lock() &&
+ "JDStatesLock should be locked at call-site");
+ assert(Result.size() == Symbols.size() &&
+ "Results and Symbols span sizes should match");
+
+ // Make an initial pass over the local symbol table.
+ std::vector<size_t> MissingSymbolIndexes;
+ for (size_t Idx = 0; Idx != Symbols.size(); ++Idx) {
+ auto I = JDS.SymbolTable.find(Symbols[Idx].first);
+ if (I != JDS.SymbolTable.end())
+ Result[Idx] = I->second;
+ else
+ MissingSymbolIndexes.push_back(Idx);
+ }
+
+ // If everything has been resolved already then bail out early.
+ if (MissingSymbolIndexes.empty())
+ return Error::success();
+
+ // Otherwise call back to the controller to try to request that the symbol
+ // be materialized.
+ std::vector<std::pair<std::string_view, bool>> MissingSymbols;
+ MissingSymbols.reserve(MissingSymbolIndexes.size());
+ ORC_RT_DEBUG({
+ printdbg("requesting push of %i missing symbols...\n",
+ MissingSymbolIndexes.size());
+ });
+ for (auto MissingIdx : MissingSymbolIndexes)
+ MissingSymbols.push_back(Symbols[MissingIdx]);
+
+ JDStatesLock.unlock();
+ if (auto Err = requestPushSymbols(
+ JDS, {MissingSymbols.data(), MissingSymbols.size()}))
+ return Err;
+ JDStatesLock.lock();
+
+ // Try to resolve the previously missing symbols locally.
+ std::vector<size_t> MissingRequiredSymbols;
+ for (auto MissingIdx : MissingSymbolIndexes) {
+ auto I = JDS.SymbolTable.find(Symbols[MissingIdx].first);
+ if (I != JDS.SymbolTable.end())
+ Result[MissingIdx] = I->second;
+ else {
+ if (Symbols[MissingIdx].second)
+ MissingRequiredSymbols.push_back(MissingIdx);
+ else
+ Result[MissingIdx] = {ExecutorAddr(), {}};
+ }
+ }
+
+ // Error out if any missing symbols could not be resolved.
+ if (!MissingRequiredSymbols.empty()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Lookup could not find required symbols: [ ";
+ for (auto MissingIdx : MissingRequiredSymbols)
+ ErrStream << "\"" << Symbols[MissingIdx].first << "\" ";
+ ErrStream << "]";
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ return Error::success();
}
// eh-frame registration functions.
@@ -1194,6 +1391,38 @@ __orc_rt_macho_register_object_platform_sections(char *ArgData,
}
ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_register_object_symbol_table(char *ArgData, size_t ArgSize) {
+ using SymtabContainer = std::vector<
+ std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>>;
+ return WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSExecutorAddr, SPSExecutorAddr,
+ SPSMachOExecutorSymbolFlags>>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, SymtabContainer &Symbols) {
+ return MachOPlatformRuntimeState::get()
+ .registerObjectSymbolTable(HeaderAddr, Symbols);
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_deregister_object_symbol_table(char *ArgData, size_t ArgSize) {
+ using SymtabContainer = std::vector<
+ std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>>;
+ return WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSExecutorAddr, SPSExecutorAddr,
+ SPSMachOExecutorSymbolFlags>>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, SymtabContainer &Symbols) {
+ return MachOPlatformRuntimeState::get()
+ .deregisterObjectSymbolTable(HeaderAddr, Symbols);
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_deregister_object_platform_sections(char *ArgData,
size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr,
diff --git a/compiler-rt/lib/orc/simple_packed_serialization.h b/compiler-rt/lib/orc/simple_packed_serialization.h
index 9cebbeadee02..488d2407ddd4 100644
--- a/compiler-rt/lib/orc/simple_packed_serialization.h
+++ b/compiler-rt/lib/orc/simple_packed_serialization.h
@@ -281,6 +281,13 @@ public:
static constexpr bool available = true;
};
+/// Trivial span<T> -> SPSSequence<SPSElementTagT> serialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceSerialization<SPSElementTagT, span<T>> {
+public:
+ static constexpr bool available = true;
+};
+
/// Trivial SPSSequence<SPSElementTagT> -> std::vector<T> deserialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceDeserialization<SPSElementTagT, std::vector<T>> {
@@ -385,6 +392,44 @@ public:
}
};
+/// SPSTuple serialization for std::tuple.
+template <typename... SPSTagTs, typename... Ts>
+class SPSSerializationTraits<SPSTuple<SPSTagTs...>, std::tuple<Ts...>> {
+private:
+ using TupleArgList = typename SPSTuple<SPSTagTs...>::AsArgList;
+ using ArgIndices = std::make_index_sequence<sizeof...(Ts)>;
+
+ template <std::size_t... I>
+ static size_t size(const std::tuple<Ts...> &T, std::index_sequence<I...>) {
+ return TupleArgList::size(std::get<I>(T)...);
+ }
+
+ template <std::size_t... I>
+ static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T,
+ std::index_sequence<I...>) {
+ return TupleArgList::serialize(OB, std::get<I>(T)...);
+ }
+
+ template <std::size_t... I>
+ static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T,
+ std::index_sequence<I...>) {
+ return TupleArgList::deserialize(IB, std::get<I>(T)...);
+ }
+
+public:
+ static size_t size(const std::tuple<Ts...> &T) {
+ return size(T, ArgIndices{});
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T) {
+ return serialize(OB, T, ArgIndices{});
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T) {
+ return deserialize(IB, T, ArgIndices{});
+ }
+};
+
/// SPSTuple serialization for std::pair.
template <typename SPSTagT1, typename SPSTagT2, typename T1, typename T2>
class SPSSerializationTraits<SPSTuple<SPSTagT1, SPSTagT2>, std::pair<T1, T2>> {
diff --git a/compiler-rt/lib/orc/stl_extras.h b/compiler-rt/lib/orc/stl_extras.h
index 33c877b193c5..80a6cd13ac28 100644
--- a/compiler-rt/lib/orc/stl_extras.h
+++ b/compiler-rt/lib/orc/stl_extras.h
@@ -13,6 +13,7 @@
#ifndef ORC_RT_STL_EXTRAS_H
#define ORC_RT_STL_EXTRAS_H
+#include <cstdint>
#include <utility>
#include <tuple>
@@ -28,6 +29,17 @@ template <class Ty> struct identity {
const Ty &operator()(const Ty &self) const { return self; }
};
+/// Substitute for std::bit_ceil.
+constexpr uint64_t bit_ceil(uint64_t Val) noexcept {
+ Val |= (Val >> 1);
+ Val |= (Val >> 2);
+ Val |= (Val >> 4);
+ Val |= (Val >> 8);
+ Val |= (Val >> 16);
+ Val |= (Val >> 32);
+ return Val + 1;
+}
+
} // namespace __orc_rt
#endif // ORC_RT_STL_EXTRAS
diff --git a/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp b/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp
new file mode 100644
index 000000000000..4c27d54fb4a9
--- /dev/null
+++ b/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp
@@ -0,0 +1,143 @@
+//===-- adt_test.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "bitmask_enum.h"
+#include "gtest/gtest.h"
+
+#include <sstream>
+#include <string>
+
+using namespace __orc_rt;
+
+namespace {
+
+enum Flags { F0 = 0, F1 = 1, F2 = 2, F3 = 4, F4 = 8 };
+
+} // namespace
+
+namespace __orc_rt {
+ORC_RT_DECLARE_ENUM_AS_BITMASK(Flags, F4);
+} // namespace __orc_rt
+
+static_assert(is_bitmask_enum<Flags>::value != 0);
+static_assert(largest_bitmask_enum_bit<Flags>::value == Flags::F4);
+
+namespace {
+
+static_assert(is_bitmask_enum<Flags>::value != 0);
+static_assert(largest_bitmask_enum_bit<Flags>::value == Flags::F4);
+
+TEST(BitmaskEnumTest, BitwiseOr) {
+ Flags f = F1 | F2;
+ EXPECT_EQ(3, f);
+
+ f = f | F3;
+ EXPECT_EQ(7, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseOrEquals) {
+ Flags f = F1;
+ f |= F3;
+ EXPECT_EQ(5, f);
+
+ // |= should return a reference to the LHS.
+ f = F2;
+ (f |= F3) = F1;
+ EXPECT_EQ(F1, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseAnd) {
+ Flags f = static_cast<Flags>(3) & F2;
+ EXPECT_EQ(F2, f);
+
+ f = (f | F3) & (F1 | F2 | F3);
+ EXPECT_EQ(6, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseAndEquals) {
+ Flags f = F1 | F2 | F3;
+ f &= F1 | F2;
+ EXPECT_EQ(3, f);
+
+ // &= should return a reference to the LHS.
+ (f &= F1) = F3;
+ EXPECT_EQ(F3, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseXor) {
+ Flags f = (F1 | F2) ^ (F2 | F3);
+ EXPECT_EQ(5, f);
+
+ f = f ^ F1;
+ EXPECT_EQ(4, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseXorEquals) {
+ Flags f = (F1 | F2);
+ f ^= (F2 | F4);
+ EXPECT_EQ(9, f);
+
+ // ^= should return a reference to the LHS.
+ (f ^= F4) = F3;
+ EXPECT_EQ(F3, f);
+}
+
+TEST(BitmaskEnumTest, ConstantExpression) {
+ constexpr Flags f1 = ~F1;
+ constexpr Flags f2 = F1 | F2;
+ constexpr Flags f3 = F1 & F2;
+ constexpr Flags f4 = F1 ^ F2;
+ EXPECT_EQ(f1, ~F1);
+ EXPECT_EQ(f2, F1 | F2);
+ EXPECT_EQ(f3, F1 & F2);
+ EXPECT_EQ(f4, F1 ^ F2);
+}
+
+TEST(BitmaskEnumTest, BitwiseNot) {
+ Flags f = ~F1;
+ EXPECT_EQ(14, f); // Largest value for f is 15.
+ EXPECT_EQ(15, ~F0);
+}
+
+enum class FlagsClass {
+ F0 = 0,
+ F1 = 1,
+ F2 = 2,
+ F3 = 4,
+ ORC_RT_MARK_AS_BITMASK_ENUM(F3)
+};
+
+TEST(BitmaskEnumTest, ScopedEnum) {
+ FlagsClass f = (FlagsClass::F1 & ~FlagsClass::F0) | FlagsClass::F2;
+ f |= FlagsClass::F3;
+ EXPECT_EQ(7, static_cast<int>(f));
+}
+
+struct Container {
+ enum Flags {
+ F0 = 0,
+ F1 = 1,
+ F2 = 2,
+ F3 = 4,
+ ORC_RT_MARK_AS_BITMASK_ENUM(F3)
+ };
+
+ static Flags getFlags() {
+ Flags f = F0 | F1;
+ f |= F2;
+ return f;
+ }
+};
+
+TEST(BitmaskEnumTest, EnumInStruct) { EXPECT_EQ(3, Container::getFlags()); }
+
+} // namespace
diff --git a/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp b/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp
index 5577ef919fc6..e7a78062b210 100644
--- a/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp
+++ b/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp
@@ -154,6 +154,11 @@ TEST(SimplePackedSerializationTest, SpanSerialization) {
EXPECT_EQ(InS.data(), Buffer.get() + sizeof(uint64_t));
}
+TEST(SimplePackedSerializationTest, StdTupleSerialization) {
+ std::tuple<int32_t, std::string, bool> P(42, "foo", true);
+ blobSerializationRoundTrip<SPSTuple<int32_t, SPSString, bool>>(P);
+}
+
TEST(SimplePackedSerializationTest, StdPairSerialization) {
std::pair<int32_t, std::string> P(42, "foo");
blobSerializationRoundTrip<SPSTuple<int32_t, SPSString>,
diff --git a/compiler-rt/lib/profile/InstrProfiling.c b/compiler-rt/lib/profile/InstrProfiling.c
index 0dd5ff5ae633..da04d8ebdec9 100644
--- a/compiler-rt/lib/profile/InstrProfiling.c
+++ b/compiler-rt/lib/profile/InstrProfiling.c
@@ -60,6 +60,10 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
(__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE) ? 0xFF : 0;
memset(I, ResetValue, E - I);
+ I = __llvm_profile_begin_bitmap();
+ E = __llvm_profile_end_bitmap();
+ memset(I, 0x0, E - I);
+
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
const __llvm_profile_data *DI;
diff --git a/compiler-rt/lib/profile/InstrProfiling.h b/compiler-rt/lib/profile/InstrProfiling.h
index 4433d7bd4887..c5b0b34f2d8a 100644
--- a/compiler-rt/lib/profile/InstrProfiling.h
+++ b/compiler-rt/lib/profile/InstrProfiling.h
@@ -88,6 +88,8 @@ const char *__llvm_profile_begin_names(void);
const char *__llvm_profile_end_names(void);
char *__llvm_profile_begin_counters(void);
char *__llvm_profile_end_counters(void);
+char *__llvm_profile_begin_bitmap(void);
+char *__llvm_profile_end_bitmap(void);
ValueProfNode *__llvm_profile_begin_vnodes();
ValueProfNode *__llvm_profile_end_vnodes();
uint32_t *__llvm_profile_begin_orderfile();
@@ -101,11 +103,11 @@ void __llvm_profile_reset_counters(void);
/*!
* \brief Merge profile data from buffer.
*
- * Read profile data form buffer \p Profile and merge with in-process profile
- * counters. The client is expected to have checked or already knows the profile
- * data in the buffer matches the in-process counter structure before calling
- * it. Returns 0 (success) if the profile data is valid. Upon reading
- * invalid/corrupted profile data, returns 1 (failure).
+ * Read profile data from buffer \p Profile and merge with in-process profile
+ * counters and bitmaps. The client is expected to have checked or already
+ * know the profile data in the buffer matches the in-process counter
+ * structure before calling it. Returns 0 (success) if the profile data is
+ * valid. Upon reading invalid/corrupted profile data, returns 1 (failure).
*/
int __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
@@ -113,8 +115,8 @@ int __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
*
* Returns 0 (success) if the profile data in buffer \p Profile with size
* \p Size was generated by the same binary and therefore matches
- * structurally the in-process counters. If the profile data in buffer is
- * not compatible, the interface returns 1 (failure).
+ * structurally the in-process counters and bitmaps. If the profile data in
+ * buffer is not compatible, the interface returns 1 (failure).
*/
int __llvm_profile_check_compatibility(const char *Profile,
uint64_t Size);
@@ -276,6 +278,13 @@ uint64_t __llvm_profile_get_num_counters(const char *Begin, const char *End);
/*! \brief Get the size of the profile counters section in bytes. */
uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End);
+/*! \brief Get the number of bytes in the profile bitmap section. */
+uint64_t __llvm_profile_get_num_bitmap_bytes(const char *Begin,
+ const char *End);
+
+/*! \brief Get the size of the profile name section in bytes. */
+uint64_t __llvm_profile_get_name_size(const char *Begin, const char *End);
+
/* ! \brief Given the sizes of the data and counter information, return the
* number of padding bytes before and after the counters, and after the names,
* in the raw profile.
@@ -286,8 +295,9 @@ uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End);
* needed to achieve that.
*/
void __llvm_profile_get_padding_sizes_for_counters(
- uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
- uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NumBitmapBytes,
+ uint64_t NamesSize, uint64_t *PaddingBytesBeforeCounters,
+ uint64_t *PaddingBytesAfterCounters, uint64_t *PaddingBytesAfterBitmap,
uint64_t *PaddingBytesAfterNames);
/*!
diff --git a/compiler-rt/lib/profile/InstrProfilingBuffer.c b/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 61ac5d9c0285..cd1f067bd188 100644
--- a/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -43,11 +43,14 @@ uint64_t __llvm_profile_get_size_for_buffer(void) {
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
const char *CountersBegin = __llvm_profile_begin_counters();
const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
return __llvm_profile_get_size_for_buffer_internal(
- DataBegin, DataEnd, CountersBegin, CountersEnd, NamesBegin, NamesEnd);
+ DataBegin, DataEnd, CountersBegin, CountersEnd, BitmapBegin, BitmapEnd,
+ NamesBegin, NamesEnd);
}
COMPILER_RT_VISIBILITY
@@ -83,6 +86,17 @@ uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End) {
__llvm_profile_counter_entry_size();
}
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_num_bitmap_bytes(const char *Begin,
+ const char *End) {
+ return (End - Begin);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_name_size(const char *Begin, const char *End) {
+ return End - Begin;
+}
+
/// Calculate the number of padding bytes needed to add to \p Offset in order
/// for (\p Offset + Padding) to be page-aligned.
static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset) {
@@ -102,13 +116,16 @@ static int needsCounterPadding(void) {
COMPILER_RT_VISIBILITY
void __llvm_profile_get_padding_sizes_for_counters(
- uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
- uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NumBitmapBytes,
+ uint64_t NamesSize, uint64_t *PaddingBytesBeforeCounters,
+ uint64_t *PaddingBytesAfterCounters, uint64_t *PaddingBytesAfterBitmapBytes,
uint64_t *PaddingBytesAfterNames) {
if (!needsCounterPadding()) {
*PaddingBytesBeforeCounters = 0;
*PaddingBytesAfterCounters =
__llvm_profile_get_num_padding_bytes(CountersSize);
+ *PaddingBytesAfterBitmapBytes =
+ __llvm_profile_get_num_padding_bytes(NumBitmapBytes);
*PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize);
return;
}
@@ -118,31 +135,37 @@ void __llvm_profile_get_padding_sizes_for_counters(
*PaddingBytesBeforeCounters =
calculateBytesNeededToPageAlign(sizeof(__llvm_profile_header) + DataSize);
*PaddingBytesAfterCounters = calculateBytesNeededToPageAlign(CountersSize);
+ *PaddingBytesAfterBitmapBytes =
+ calculateBytesNeededToPageAlign(NumBitmapBytes);
*PaddingBytesAfterNames = calculateBytesNeededToPageAlign(NamesSize);
}
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const char *CountersBegin, const char *CountersEnd, const char *NamesBegin,
- const char *NamesEnd) {
+ const char *CountersBegin, const char *CountersEnd, const char *BitmapBegin,
+ const char *BitmapEnd, const char *NamesBegin, const char *NamesEnd) {
/* Match logic in __llvm_profile_write_buffer(). */
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
uint64_t CountersSize =
__llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ const uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
/* Determine how much padding is needed before/after the counters and after
* the names. */
uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
__llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+ DataSize, CountersSize, NumBitmapBytes, NamesSize,
+ &PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
return sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) +
DataSize + PaddingBytesBeforeCounters + CountersSize +
- PaddingBytesAfterCounters + NamesSize + PaddingBytesAfterNames;
+ PaddingBytesAfterCounters + NumBitmapBytes +
+ PaddingBytesAfterBitmapBytes + NamesSize + PaddingBytesAfterNames;
}
COMPILER_RT_VISIBILITY
@@ -160,9 +183,11 @@ COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer(char *Buffer) {
COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd, const char *CountersBegin,
- const char *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+ const char *CountersEnd, const char *BitmapBegin, const char *BitmapEnd,
+ const char *NamesBegin, const char *NamesEnd) {
ProfDataWriter BufferWriter;
initBufferWriter(&BufferWriter, Buffer);
return lprofWriteDataImpl(&BufferWriter, DataBegin, DataEnd, CountersBegin,
- CountersEnd, 0, NamesBegin, NamesEnd, 0);
+ CountersEnd, BitmapBegin, BitmapEnd, 0, NamesBegin,
+ NamesEnd, 0);
}
diff --git a/compiler-rt/lib/profile/InstrProfilingFile.c b/compiler-rt/lib/profile/InstrProfilingFile.c
index 54e3030d5899..1685b30b9492 100644
--- a/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -108,14 +108,18 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
const char *CountersBegin = __llvm_profile_begin_counters();
const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
uint64_t CountersSize =
__llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
- /* Check that the counter and data sections in this image are
+ /* Check that the counter, bitmap, and data sections in this image are
* page-aligned. */
unsigned PageSize = getpagesize();
if ((intptr_t)CountersBegin % PageSize != 0) {
@@ -123,6 +127,11 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
CountersBegin, PageSize);
return 1;
}
+ if ((intptr_t)BitmapBegin % PageSize != 0) {
+ PROF_ERR("Bitmap section not page-aligned (start = %p, pagesz = %u).\n",
+ BitmapBegin, PageSize);
+ return 1;
+ }
if ((intptr_t)DataBegin % PageSize != 0) {
PROF_ERR("Data section not page-aligned (start = %p, pagesz = %u).\n",
DataBegin, PageSize);
@@ -132,10 +141,11 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
/* Determine how much padding is needed before/after the counters and
* after the names. */
uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
__llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+ DataSize, CountersSize, NumBitmapBytes, NamesSize,
+ &PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
uint64_t PageAlignedCountersLength = CountersSize + PaddingBytesAfterCounters;
uint64_t FileOffsetToCounters = CurrentFileOffset +
@@ -155,6 +165,31 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
FileOffsetToCounters);
return 1;
}
+
+ /* Also mmap MCDC bitmap bytes. If there aren't any bitmap bytes, mmap()
+ * will fail with EINVAL. */
+ if (NumBitmapBytes == 0)
+ return 0;
+
+ uint64_t PageAlignedBitmapLength =
+ NumBitmapBytes + PaddingBytesAfterBitmapBytes;
+ uint64_t FileOffsetToBitmap =
+ CurrentFileOffset + sizeof(__llvm_profile_header) + DataSize +
+ PaddingBytesBeforeCounters + CountersSize + PaddingBytesAfterCounters;
+ void *BitmapMmap =
+ mmap((void *)BitmapBegin, PageAlignedBitmapLength, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToBitmap);
+ if (BitmapMmap != BitmapBegin) {
+ PROF_ERR(
+ "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
+ " - BitmapBegin: %p\n"
+ " - PageAlignedBitmapLength: %" PRIu64 "\n"
+ " - Fileno: %d\n"
+ " - FileOffsetToBitmap: %" PRIu64 "\n",
+ strerror(errno), BitmapBegin, PageAlignedBitmapLength, Fileno,
+ FileOffsetToBitmap);
+ return 1;
+ }
return 0;
}
#elif defined(__ELF__) || defined(_WIN32)
@@ -197,6 +232,8 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
const char *CountersBegin = __llvm_profile_begin_counters();
const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
/* Get the file size. */
uint64_t FileSize = 0;
@@ -218,6 +255,11 @@ static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
/* Return the memory allocated for counters to OS. */
lprofReleaseMemoryPagesToOS((uintptr_t)CountersBegin, (uintptr_t)CountersEnd);
+
+ /* BIAS MODE not supported yet for Bitmap (MCDC). */
+
+ /* Return the memory allocated for counters to OS. */
+ lprofReleaseMemoryPagesToOS((uintptr_t)BitmapBegin, (uintptr_t)BitmapEnd);
return 0;
}
#else
@@ -426,7 +468,12 @@ static void createProfileDir(const char *Filename) {
static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) {
FILE *ProfileFile = getProfileFile();
int rc;
-
+ // initializeProfileForContinuousMode will lock the profile, but if
+ // ProfileFile is set by user via __llvm_profile_set_file_object, it's assumed
+ // unlocked at this point.
+ if (ProfileFile && !__llvm_profile_is_continuous_mode_enabled()) {
+ lprofLockFileHandle(ProfileFile);
+ }
if (!ProfileFile) {
createProfileDir(ProfileFileName);
ProfileFile = lprofOpenFileEx(ProfileFileName);
@@ -478,6 +525,9 @@ static int writeFile(const char *OutputName) {
if (OutputFile == getProfileFile()) {
fflush(OutputFile);
+ if (doMerging() && !__llvm_profile_is_continuous_mode_enabled()) {
+ lprofUnlockFileHandle(OutputFile);
+ }
} else {
fclose(OutputFile);
}
@@ -1026,10 +1076,14 @@ int __llvm_profile_write_file(void) {
int rc, Length;
const char *Filename;
char *FilenameBuf;
- int PDeathSig = 0;
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ int PDeathSig = lprofSuspendSigKill();
if (lprofProfileDumped() || __llvm_profile_is_continuous_mode_enabled()) {
PROF_NOTE("Profile data not written to file: %s.\n", "already written");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return 0;
}
@@ -1040,6 +1094,8 @@ int __llvm_profile_write_file(void) {
/* Check the filename. */
if (!Filename) {
PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
@@ -1049,12 +1105,11 @@ int __llvm_profile_write_file(void) {
"expected %d, but get %d\n",
INSTR_PROF_RAW_VERSION,
(int)GET_VERSION(__llvm_profile_get_version()));
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
- // Temporarily suspend getting SIGKILL when the parent exits.
- PDeathSig = lprofSuspendSigKill();
-
/* Write profile data to the file. */
rc = writeFile(Filename);
if (rc)
@@ -1087,7 +1142,9 @@ int __llvm_orderfile_write_file(void) {
int rc, Length, LengthBeforeAppend, SuffixLength;
const char *Filename;
char *FilenameBuf;
- int PDeathSig = 0;
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ int PDeathSig = lprofSuspendSigKill();
SuffixLength = strlen(OrderFileSuffix);
Length = getCurFilenameLength() + SuffixLength;
@@ -1097,6 +1154,8 @@ int __llvm_orderfile_write_file(void) {
/* Check the filename. */
if (!Filename) {
PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
@@ -1111,12 +1170,11 @@ int __llvm_orderfile_write_file(void) {
"expected %d, but get %d\n",
INSTR_PROF_RAW_VERSION,
(int)GET_VERSION(__llvm_profile_get_version()));
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
- // Temporarily suspend getting SIGKILL when the parent exits.
- PDeathSig = lprofSuspendSigKill();
-
/* Write order data to the file. */
rc = writeOrderFile(Filename);
if (rc)
diff --git a/compiler-rt/lib/profile/InstrProfilingInternal.h b/compiler-rt/lib/profile/InstrProfilingInternal.h
index 360165e32ab3..03ed67fcfa76 100644
--- a/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -21,8 +21,8 @@
*/
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const char *CountersBegin, const char *CountersEnd, const char *NamesBegin,
- const char *NamesEnd);
+ const char *CountersBegin, const char *CountersEnd, const char *BitmapBegin,
+ const char *BitmapEnd, const char *NamesBegin, const char *NamesEnd);
/*!
* \brief Write instrumentation data to the given buffer, given explicit
@@ -36,7 +36,8 @@ uint64_t __llvm_profile_get_size_for_buffer_internal(
int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd, const char *CountersBegin,
- const char *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+ const char *CountersEnd, const char *BitmapBegin, const char *BitmapEnd,
+ const char *NamesBegin, const char *NamesEnd);
/*!
* The data structure describing the data to be written by the
@@ -153,6 +154,7 @@ int lprofWriteDataImpl(ProfDataWriter *Writer,
const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
const char *CountersBegin, const char *CountersEnd,
+ const char *BitmapBegin, const char *BitmapEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite);
diff --git a/compiler-rt/lib/profile/InstrProfilingMerge.c b/compiler-rt/lib/profile/InstrProfilingMerge.c
index 432e824955f8..e8ae2189cccd 100644
--- a/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -47,13 +47,12 @@ uint64_t lprofGetLoadModuleSignature(void) {
COMPILER_RT_VISIBILITY
int __llvm_profile_check_compatibility(const char *ProfileData,
uint64_t ProfileSize) {
- /* Check profile header only for now */
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
SrcDataStart =
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header) +
Header->BinaryIdsSize);
- SrcDataEnd = SrcDataStart + Header->DataSize;
+ SrcDataEnd = SrcDataStart + Header->NumData;
if (ProfileSize < sizeof(__llvm_profile_header))
return 1;
@@ -61,21 +60,26 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
/* Check the header first. */
if (Header->Magic != __llvm_profile_get_magic() ||
Header->Version != __llvm_profile_get_version() ||
- Header->DataSize !=
+ Header->NumData !=
__llvm_profile_get_num_data(__llvm_profile_begin_data(),
__llvm_profile_end_data()) ||
- Header->CountersSize !=
+ Header->NumCounters !=
__llvm_profile_get_num_counters(__llvm_profile_begin_counters(),
__llvm_profile_end_counters()) ||
- Header->NamesSize != (uint64_t)(__llvm_profile_end_names() -
- __llvm_profile_begin_names()) ||
+ Header->NumBitmapBytes !=
+ __llvm_profile_get_num_bitmap_bytes(__llvm_profile_begin_bitmap(),
+ __llvm_profile_end_bitmap()) ||
+ Header->NamesSize !=
+ __llvm_profile_get_name_size(__llvm_profile_begin_names(),
+ __llvm_profile_end_names()) ||
Header->ValueKindLast != IPVK_Last)
return 1;
if (ProfileSize <
sizeof(__llvm_profile_header) + Header->BinaryIdsSize +
- Header->DataSize * sizeof(__llvm_profile_data) + Header->NamesSize +
- Header->CountersSize * __llvm_profile_counter_entry_size())
+ Header->NumData * sizeof(__llvm_profile_data) + Header->NamesSize +
+ Header->NumCounters * __llvm_profile_counter_entry_size() +
+ Header->NumBitmapBytes)
return 1;
for (SrcData = SrcDataStart,
@@ -83,7 +87,8 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
SrcData < SrcDataEnd; ++SrcData, ++DstData) {
if (SrcData->NameRef != DstData->NameRef ||
SrcData->FuncHash != DstData->FuncHash ||
- SrcData->NumCounters != DstData->NumCounters)
+ SrcData->NumCounters != DstData->NumCounters ||
+ SrcData->NumBitmapBytes != DstData->NumBitmapBytes)
return 1;
}
@@ -102,34 +107,55 @@ static uintptr_t signextIfWin64(void *V) {
COMPILER_RT_VISIBILITY
int __llvm_profile_merge_from_buffer(const char *ProfileData,
uint64_t ProfileSize) {
- if (__llvm_profile_get_version() & VARIANT_MASK_DBG_CORRELATE) {
- PROF_ERR(
- "%s\n",
- "Debug info correlation does not support profile merging at runtime. "
- "Instead, merge raw profiles using the llvm-profdata tool.");
+ if (__llvm_profile_get_version() & VARIANT_MASK_TEMPORAL_PROF) {
+ PROF_ERR("%s\n",
+ "Temporal profiles do not support profile merging at runtime. "
+ "Instead, merge raw profiles using the llvm-profdata tool.");
return 1;
}
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
- char *SrcCountersStart;
+ char *SrcCountersStart, *DstCounter;
+ const char *SrcCountersEnd, *SrcCounter;
+ const char *SrcBitmapStart;
const char *SrcNameStart;
const char *SrcValueProfDataStart, *SrcValueProfData;
uintptr_t CountersDelta = Header->CountersDelta;
+ uintptr_t BitmapDelta = Header->BitmapDelta;
SrcDataStart =
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header) +
Header->BinaryIdsSize);
- SrcDataEnd = SrcDataStart + Header->DataSize;
+ SrcDataEnd = SrcDataStart + Header->NumData;
SrcCountersStart = (char *)SrcDataEnd;
- SrcNameStart = SrcCountersStart +
- Header->CountersSize * __llvm_profile_counter_entry_size();
+ SrcCountersEnd = SrcCountersStart +
+ Header->NumCounters * __llvm_profile_counter_entry_size();
+ SrcBitmapStart = SrcCountersEnd;
+ SrcNameStart = SrcBitmapStart + Header->NumBitmapBytes;
SrcValueProfDataStart =
SrcNameStart + Header->NamesSize +
__llvm_profile_get_num_padding_bytes(Header->NamesSize);
- if (SrcNameStart < SrcCountersStart)
+ if (SrcNameStart < SrcCountersStart || SrcNameStart < SrcBitmapStart)
return 1;
+ // Merge counters by iterating the entire counter section when data section is
+ // empty due to correlation.
+ if (Header->NumData == 0) {
+ for (SrcCounter = SrcCountersStart,
+ DstCounter = __llvm_profile_begin_counters();
+ SrcCounter < SrcCountersEnd;) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE) {
+ *DstCounter &= *SrcCounter;
+ } else {
+ *(uint64_t *)DstCounter += *(uint64_t *)SrcCounter;
+ }
+ SrcCounter += __llvm_profile_counter_entry_size();
+ DstCounter += __llvm_profile_counter_entry_size();
+ }
+ return 0;
+ }
+
for (SrcData = SrcDataStart,
DstData = (__llvm_profile_data *)__llvm_profile_begin_data(),
SrcValueProfData = SrcValueProfDataStart;
@@ -140,6 +166,8 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
// extend CounterPtr to get the original value.
char *DstCounters =
(char *)((uintptr_t)DstData + signextIfWin64(DstData->CounterPtr));
+ char *DstBitmap =
+ (char *)((uintptr_t)DstData + signextIfWin64(DstData->BitmapPtr));
unsigned NVK = 0;
// SrcData is a serialized representation of the memory image. We need to
@@ -169,6 +197,21 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
}
}
+ const char *SrcBitmap =
+ SrcBitmapStart + ((uintptr_t)SrcData->BitmapPtr - BitmapDelta);
+ // BitmapDelta also needs to be decreased as we advance to the next data
+ // record.
+ BitmapDelta -= sizeof(*SrcData);
+ unsigned NB = SrcData->NumBitmapBytes;
+ // NumBitmapBytes may legitimately be 0. Just keep going.
+ if (NB != 0) {
+ if (SrcBitmap < SrcBitmapStart || (SrcBitmap + NB) > SrcNameStart)
+ return 1;
+ // Merge Src and Dst Bitmap bytes by simply ORing them together.
+ for (unsigned I = 0; I < NB; I++)
+ DstBitmap[I] |= SrcBitmap[I];
+ }
+
/* Now merge value profile data. */
if (!VPMergeHook)
continue;
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c b/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
index 63219da18ae3..9f46a98d78ac 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
@@ -187,6 +187,8 @@ void __llvm_profile_register_names_function(void *NamesStart,
// define these zero length variables in each of the above 4 sections.
static int dummy_cnts[0] COMPILER_RT_SECTION(
COMPILER_RT_SEG INSTR_PROF_CNTS_SECT_NAME);
+static int dummy_bits[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_BITS_SECT_NAME);
static int dummy_data[0] COMPILER_RT_SECTION(
COMPILER_RT_SEG INSTR_PROF_DATA_SECT_NAME);
static const int dummy_name[0] COMPILER_RT_SECTION(
@@ -202,8 +204,9 @@ static int dummy_vnds[0] COMPILER_RT_SECTION(
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
COMPILER_RT_VISIBILITY
-void *__llvm_profile_keep[] = {(void *)&dummy_cnts, (void *)&dummy_data,
- (void *)&dummy_name, (void *)&dummy_vnds};
+void *__llvm_profile_keep[] = {(void *)&dummy_cnts, (void *)&dummy_bits,
+ (void *)&dummy_data, (void *)&dummy_name,
+ (void *)&dummy_vnds};
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c b/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
index d9f2a113f5b0..2154d242a817 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
@@ -31,6 +31,11 @@ extern char
COMPILER_RT_VISIBILITY
extern char CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
COMPILER_RT_VISIBILITY
+extern char
+ BitmapStart __asm("section$start$__DATA$" INSTR_PROF_BITS_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern char BitmapEnd __asm("section$end$__DATA$" INSTR_PROF_BITS_SECT_NAME);
+COMPILER_RT_VISIBILITY
extern uint32_t
OrderFileStart __asm("section$start$__DATA$" INSTR_PROF_ORDERFILE_SECT_NAME);
@@ -56,6 +61,10 @@ char *__llvm_profile_begin_counters(void) { return &CountersStart; }
COMPILER_RT_VISIBILITY
char *__llvm_profile_end_counters(void) { return &CountersEnd; }
COMPILER_RT_VISIBILITY
+char *__llvm_profile_begin_bitmap(void) { return &BitmapStart; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_end_bitmap(void) { return &BitmapEnd; }
+COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
COMPILER_RT_VISIBILITY
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index 2cce0a4b2c48..d0c42462e5e3 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -35,6 +35,8 @@
#define PROF_NAME_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_NAME_COMMON)
#define PROF_CNTS_START INSTR_PROF_SECT_START(INSTR_PROF_CNTS_COMMON)
#define PROF_CNTS_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_CNTS_COMMON)
+#define PROF_BITS_START INSTR_PROF_SECT_START(INSTR_PROF_BITS_COMMON)
+#define PROF_BITS_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_BITS_COMMON)
#define PROF_ORDERFILE_START INSTR_PROF_SECT_START(INSTR_PROF_ORDERFILE_COMMON)
#define PROF_VNODES_START INSTR_PROF_SECT_START(INSTR_PROF_VNODES_COMMON)
#define PROF_VNODES_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_VNODES_COMMON)
@@ -48,6 +50,8 @@ extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY
COMPILER_RT_WEAK;
extern char PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_BITS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_BITS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
@@ -74,6 +78,12 @@ COMPILER_RT_VISIBILITY char *__llvm_profile_begin_counters(void) {
COMPILER_RT_VISIBILITY char *__llvm_profile_end_counters(void) {
return &PROF_CNTS_STOP;
}
+COMPILER_RT_VISIBILITY char *__llvm_profile_begin_bitmap(void) {
+ return &PROF_BITS_START;
+}
+COMPILER_RT_VISIBILITY char *__llvm_profile_end_bitmap(void) {
+ return &PROF_BITS_STOP;
+}
COMPILER_RT_VISIBILITY uint32_t *__llvm_profile_begin_orderfile(void) {
return &PROF_ORDERFILE_START;
}
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformOther.c b/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
index c7b6e842c9fa..5319ca813b43 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
@@ -88,6 +88,10 @@ COMPILER_RT_VISIBILITY
char *__llvm_profile_begin_counters(void) { return CountersFirst; }
COMPILER_RT_VISIBILITY
char *__llvm_profile_end_counters(void) { return CountersLast; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_begin_bitmap(void) { return BitmapFirst; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_end_bitmap(void) { return BitmapLast; }
/* TODO: correctly set up OrderFileFirst. */
COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return OrderFileFirst; }
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c b/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
index dd576b2f8357..9dbd702865fd 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
@@ -14,6 +14,7 @@
#if defined(_MSC_VER)
/* Merge read-write sections into .data. */
#pragma comment(linker, "/MERGE:.lprfc=.data")
+#pragma comment(linker, "/MERGE:.lprfb=.data")
#pragma comment(linker, "/MERGE:.lprfd=.data")
#pragma comment(linker, "/MERGE:.lprfv=.data")
#pragma comment(linker, "/MERGE:.lprfnd=.data")
@@ -30,6 +31,8 @@
#pragma section(".lprfd$Z", read, write)
#pragma section(".lprfc$A", read, write)
#pragma section(".lprfc$Z", read, write)
+#pragma section(".lprfb$A", read, write)
+#pragma section(".lprfb$Z", read, write)
#pragma section(".lorderfile$A", read, write)
#pragma section(".lprfnd$A", read, write)
#pragma section(".lprfnd$Z", read, write)
@@ -43,6 +46,8 @@ const char COMPILER_RT_SECTION(".lprfn$Z") NamesEnd = '\0';
char COMPILER_RT_SECTION(".lprfc$A") CountersStart;
char COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
+char COMPILER_RT_SECTION(".lprfb$A") BitmapStart;
+char COMPILER_RT_SECTION(".lprfb$Z") BitmapEnd;
uint32_t COMPILER_RT_SECTION(".lorderfile$A") OrderFileStart;
ValueProfNode COMPILER_RT_SECTION(".lprfnd$A") VNodesStart;
@@ -58,6 +63,8 @@ const char *__llvm_profile_end_names(void) { return &NamesEnd; }
char *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
char *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_begin_bitmap(void) { return &BitmapStart + 1; }
+char *__llvm_profile_end_bitmap(void) { return &BitmapEnd; }
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
ValueProfNode *__llvm_profile_begin_vnodes(void) { return &VNodesStart + 1; }
diff --git a/compiler-rt/lib/profile/InstrProfilingWriter.c b/compiler-rt/lib/profile/InstrProfilingWriter.c
index 4a392984fe6b..4d767d138514 100644
--- a/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -246,32 +246,33 @@ COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer,
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
const char *CountersBegin = __llvm_profile_begin_counters();
const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
return lprofWriteDataImpl(Writer, DataBegin, DataEnd, CountersBegin,
- CountersEnd, VPDataReader, NamesBegin, NamesEnd,
- SkipNameDataWrite);
+ CountersEnd, BitmapBegin, BitmapEnd, VPDataReader,
+ NamesBegin, NamesEnd, SkipNameDataWrite);
}
COMPILER_RT_VISIBILITY int
lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
const char *CountersBegin, const char *CountersEnd,
+ const char *BitmapBegin, const char *BitmapEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite) {
- int DebugInfoCorrelate =
- (__llvm_profile_get_version() & VARIANT_MASK_DBG_CORRELATE) != 0ULL;
-
/* Calculate size of sections. */
const uint64_t DataSectionSize =
- DebugInfoCorrelate ? 0 : __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t NumData =
- DebugInfoCorrelate ? 0 : __llvm_profile_get_num_data(DataBegin, DataEnd);
+ __llvm_profile_get_data_size(DataBegin, DataEnd);
+ const uint64_t NumData = __llvm_profile_get_num_data(DataBegin, DataEnd);
const uint64_t CountersSectionSize =
__llvm_profile_get_counters_size(CountersBegin, CountersEnd);
const uint64_t NumCounters =
__llvm_profile_get_num_counters(CountersBegin, CountersEnd);
- const uint64_t NamesSize = DebugInfoCorrelate ? 0 : NamesEnd - NamesBegin;
+ const uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
+ const uint64_t NamesSize = __llvm_profile_get_name_size(NamesBegin, NamesEnd);
/* Create the header. */
__llvm_profile_header Header;
@@ -279,18 +280,13 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Determine how much padding is needed before/after the counters and after
* the names. */
uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
__llvm_profile_get_padding_sizes_for_counters(
- DataSectionSize, CountersSectionSize, NamesSize,
+ DataSectionSize, CountersSectionSize, NumBitmapBytes, NamesSize,
&PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
- &PaddingBytesAfterNames);
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
{
- // TODO: Unfortunately the header's fields are named DataSize and
- // CountersSize when they should be named NumData and NumCounters,
- // respectively.
- const uint64_t CountersSize = NumCounters;
- const uint64_t DataSize = NumData;
/* Initialize header structure. */
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
#include "profile/InstrProfData.inc"
@@ -300,10 +296,11 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
* CountersDelta to match. */
#ifdef _WIN64
Header.CountersDelta = (uint32_t)Header.CountersDelta;
+ Header.BitmapDelta = (uint32_t)Header.BitmapDelta;
#endif
/* The data and names sections are omitted in lightweight mode. */
- if (DebugInfoCorrelate) {
+ if (NumData == 0 && NamesSize == 0) {
Header.CountersDelta = 0;
Header.NamesDelta = 0;
}
@@ -319,19 +316,21 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Write the profile data. */
ProfDataIOVec IOVecData[] = {
- {DebugInfoCorrelate ? NULL : DataBegin, sizeof(uint8_t), DataSectionSize,
- 0},
+ {DataBegin, sizeof(uint8_t), DataSectionSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
{CountersBegin, sizeof(uint8_t), CountersSectionSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
- {(SkipNameDataWrite || DebugInfoCorrelate) ? NULL : NamesBegin,
- sizeof(uint8_t), NamesSize, 0},
+ {BitmapBegin, sizeof(uint8_t), NumBitmapBytes, 0},
+ {NULL, sizeof(uint8_t), PaddingBytesAfterBitmapBytes, 1},
+ {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterNames, 1}};
if (Writer->Write(Writer, IOVecData, sizeof(IOVecData) / sizeof(*IOVecData)))
return -1;
- /* Value profiling is not yet supported in continuous mode. */
- if (__llvm_profile_is_continuous_mode_enabled())
+ /* Value profiling is not yet supported in continuous mode and profile
+ * correlation mode. */
+ if (__llvm_profile_is_continuous_mode_enabled() ||
+ (NumData == 0 && NamesSize == 0))
return 0;
return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index 03392b61503b..0513ae36fbc7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -138,14 +138,20 @@ void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
+constexpr uptr kMinNumPagesRounded = 16;
+constexpr uptr kMinRoundedSize = 65536;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
static LowLevelAllocateCallback low_level_alloc_callback;
+static LowLevelAllocator Alloc;
+LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
+
void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
- uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
+ uptr size_to_allocate = RoundUpTo(
+ size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index fa43ac50c61e..34a64f26478f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -636,15 +636,16 @@ class SizeClassAllocator64 {
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
// kRegionSize should be able to satisfy the largest size class.
- static_assert(kRegionSize >= SizeClassMap::kMaxSize);
+ static_assert(kRegionSize >= SizeClassMap::kMaxSize,
+ "Region size exceed largest size");
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
+ static const uptr kUserMapSize = 1 << 18;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
- static const uptr kFreeArrayMapSize = 1 << 16;
+ static const uptr kFreeArrayMapSize = 1 << 18;
atomic_sint32_t release_to_os_interval_ms_;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_asm.h b/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
index 3c9bbdc9678b..bbb18cfbdf15 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
@@ -62,7 +62,11 @@
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
-# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# if defined(__arm__) || defined(__aarch64__)
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# else
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
+# endif
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index 79b7748b8f6e..5efdd864295b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -115,8 +115,9 @@ void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
InternalScopedString buff;
- buff.append("SUMMARY: %s: %s",
- alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
+ buff.AppendF("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName,
+ error_message);
__sanitizer_report_error_summary(buff.data());
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index e7e4b8cb506d..6b327a4aa16f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -208,6 +208,11 @@ void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
// linker initialized.
+//
+// NOTE: Users should instead use the singleton provided via
+// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
+// number of mmap fragments can be reduced and use the same contiguous mmap
+// provided by this singleton.
class LowLevelAllocator {
public:
// Requires an external lock.
@@ -224,6 +229,8 @@ typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
// Passing NULL removes the callback.
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
+LowLevelAllocator &GetGlobalLowLevelAllocator();
+
// IO
void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
@@ -636,7 +643,8 @@ class InternalScopedString {
buffer_.resize(1);
buffer_[0] = '\0';
}
- void append(const char *format, ...) FORMAT(2, 3);
+ void Append(const char *str);
+ void AppendF(const char *format, ...) FORMAT(2, 3);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 299561b3ad3a..ba4670751697 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -33,16 +33,17 @@
// COMMON_INTERCEPTOR_STRERROR
//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
#include "interception/interception.h"
#include "sanitizer_addrhashmap.h"
+#include "sanitizer_dl.h"
#include "sanitizer_errno.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_tls_get_addr.h"
-#include <stdarg.h>
-
#if SANITIZER_INTERCEPTOR_HOOKS
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
@@ -445,11 +446,13 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
#define INIT_TEXTDOMAIN
#endif
-#if SANITIZER_INTERCEPT_STRCMP
+#if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP
static inline int CharCmpX(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
+#endif
+#if SANITIZER_INTERCEPT_STRCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
const char *s1, const char *s2, int result)
@@ -1491,6 +1494,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+
+INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, scanf, const char *format, ...)
@@ -1511,6 +1524,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+
+INTERCEPTOR(int, __isoc23_scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format)
+
+INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format)
#endif
#endif
@@ -1534,7 +1556,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
- COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf);
#else
#define INIT_ISOC99_SCANF
#endif
@@ -3539,30 +3567,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
(real_endptr - nptr) + 1 : 0);
}
-
#if SANITIZER_INTERCEPT_STRTOIMAX
-INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+template <typename Fn>
+static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr,
+ char **endptr, int base)
+ -> decltype(real(nullptr, nullptr, 0)) {
char *real_endptr;
- INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
+INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base);
+}
INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- char *real_endptr;
- UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return res;
+ return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base);
}
#define INIT_STRTOIMAX \
@@ -3572,6 +3596,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
#define INIT_STRTOIMAX
#endif
+#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC
+INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base);
+}
+INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base);
+}
+
+# define INIT_STRTOIMAX_C23 \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax);
+#else
+# define INIT_STRTOIMAX_C23
+#endif
+
#if SANITIZER_INTERCEPT_MBSTOWCS
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
@@ -6265,7 +6308,36 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
- if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+
+ if (filename) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+
+# if !SANITIZER_DYNAMIC
+ // We care about a very specific use-case: dladdr on
+ // statically-linked ASan may return <main program>
+ // instead of the library.
+ // We therefore only take effect if the sanitizer is statically
+ // linked, and we don't bother canonicalizing paths because
+ // dladdr should return the same address both times (we assume
+ // the user did not canonicalize the result from dladdr).
+ if (common_flags()->test_only_replace_dlopen_main_program) {
+ VPrintf(1, "dlopen interceptor: filename: %s\n", filename);
+
+ const char *SelfFName = DladdrSelfFName();
+ VPrintf(1, "dlopen interceptor: DladdrSelfFName: %p %s\n",
+ (void *)SelfFName, SelfFName);
+
+ if (SelfFName && internal_strcmp(SelfFName, filename) == 0) {
+ // It's possible they copied the string from dladdr, so
+ // we do a string comparison rather than pointer comparison.
+ VPrintf(1, "dlopen interceptor: replacing %s because it matches %s\n",
+ filename, SelfFName);
+ filename = (char *)0; // RTLD_DEFAULT
+ }
+ }
+# endif // !SANITIZER_DYNAMIC
+ }
+
void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
@@ -10304,6 +10376,7 @@ static void InitializeCommonInterceptors() {
INIT_GETCWD;
INIT_GET_CURRENT_DIR_NAME;
INIT_STRTOIMAX;
+ INIT_STRTOIMAX_C23;
INIT_MBSTOWCS;
INIT_MBSNRTOWCS;
INIT_WCSTOMBS;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 220abb89c3be..24e5dc0fb22f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- // For %ms/%mc, write the allocated output buffer as well.
+ // For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
- char *buf = *(char **)argp;
- if (buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ if (char *buf = *(char **)argp) {
+ if (dir.convSpecifier == 'c')
+ size = 1;
+ else if (dir.convSpecifier == 'C')
+ size = sizeof(wchar_t);
+ else if (dir.convSpecifier == 'S')
+ size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
+ else // 's' or '['
+ size = internal_strlen(buf) + 1;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ }
}
}
}
@@ -539,24 +547,25 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
continue;
} else if (size == FSS_STRLEN) {
if (void *argp = va_arg(aq, void *)) {
+ uptr len;
if (dir.starredPrecision) {
// FIXME: properly support starred precision for strings.
- size = 0;
+ len = 0;
} else if (dir.fieldPrecision > 0) {
// Won't read more than "precision" symbols.
- size = internal_strnlen((const char *)argp, dir.fieldPrecision);
- if (size < dir.fieldPrecision) size++;
+ len = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (len < (uptr)dir.fieldPrecision)
+ len++;
} else {
// Whole string will be accessed.
- size = internal_strlen((const char *)argp) + 1;
+ len = internal_strlen((const char *)argp) + 1;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, len);
}
} else if (size == FSS_WCSLEN) {
if (void *argp = va_arg(aq, void *)) {
// FIXME: Properly support wide-character strings (via wcsrtombs).
- size = 0;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, 0);
}
} else {
// Skip non-pointer args
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
index a5259be9335a..6b567edc97a8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -9,6 +9,7 @@
//===----------------------------------------------------------------------===//
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_frame)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index 895763ac6b6f..7b74bb1a7e0f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -119,8 +119,10 @@ void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
+ if (!msg)
+ return;
InternalScopedString msg_copy;
- msg_copy.append("%s", msg);
+ msg_copy.Append(msg);
const char *p = msg_copy.data();
// Print one line at a time.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
index 35c325359148..73668a56218c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
@@ -35,7 +35,7 @@
#include "sanitizer_common.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
-#include "sanitizer_symbolizer_fuchsia.h"
+# include "sanitizer_symbolizer_markup_constants.h"
using namespace __sanitizer;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp
new file mode 100644
index 000000000000..e957d529c2fe
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp
@@ -0,0 +1,37 @@
+//===-- sanitizer_dl.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has helper functions that depend on libc's dynamic loading
+// introspection.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_dl.h"
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_GLIBC
+# include <dlfcn.h>
+#endif
+
+namespace __sanitizer {
+extern const char *SanitizerToolName;
+
+const char *DladdrSelfFName(void) {
+#if SANITIZER_GLIBC
+ Dl_info info;
+ int ret = dladdr((void *)&SanitizerToolName, &info);
+ if (ret) {
+ return info.dli_fname;
+ }
+#endif
+
+ return nullptr;
+}
+
+} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_dl.h b/compiler-rt/lib/sanitizer_common/sanitizer_dl.h
new file mode 100644
index 000000000000..ecde0664eb04
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_dl.h
@@ -0,0 +1,26 @@
+//===-- sanitizer_dl.h ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has helper functions that depend on libc's dynamic loading
+// introspection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DL_H
+#define SANITIZER_DL_H
+
+namespace __sanitizer {
+
+// Returns the path to the shared object or - in the case of statically linked
+// sanitizers
+// - the main program itself, that contains the sanitizer.
+const char* DladdrSelfFName(void);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DL_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/compiler-rt/lib/sanitizer_common/sanitizer_file.h
index 9459c6b00acc..bef2c842d9f2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.h
@@ -84,7 +84,7 @@ bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
// Returns true on success, false on failure.
bool CreateDir(const char *pathname);
-// Starts a subprocess and returs its pid.
+// Starts a subprocess and returns its pid.
// If *_fd parameters are not kInvalidFd their corresponding input/output
// streams will be redirect to the file. The files will always be closed
// in parent process even in case of an error.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
index c620da7f220a..ca37df348580 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
@@ -19,8 +19,6 @@
namespace __sanitizer {
-LowLevelAllocator FlagParser::Alloc;
-
class UnknownFlags {
static const int kMaxUnknownFlags = 20;
const char *unknown_flags_[kMaxUnknownFlags];
@@ -49,7 +47,7 @@ void ReportUnrecognizedFlags() {
char *FlagParser::ll_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
- char *s2 = (char*)Alloc.Allocate(len + 1);
+ char *s2 = (char *)GetGlobalLowLevelAllocator().Allocate(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
@@ -185,7 +183,8 @@ void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
}
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
- flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+ flags_ =
+ (Flag *)GetGlobalLowLevelAllocator().Allocate(sizeof(Flag) * kMaxFlags);
}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
index ae49294dde95..dccdee4da2bd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -178,8 +178,6 @@ class FlagParser {
bool ParseFile(const char *path, bool ignore_missing);
void PrintFlagDescriptions();
- static LowLevelAllocator Alloc;
-
private:
void fatal_error(const char *err);
bool is_space(char c);
@@ -193,7 +191,7 @@ class FlagParser {
template <typename T>
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
T *var) {
- FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);
+ FlagHandler<T> *fh = new (GetGlobalLowLevelAllocator()) FlagHandler<T>(var);
parser->RegisterHandler(name, fh, desc);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
index d52e96a7c381..849a122386a4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
@@ -108,11 +108,11 @@ class FlagHandlerInclude final : public FlagHandlerBase {
};
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
- FlagHandlerInclude *fh_include = new (FlagParser::Alloc)
+ FlagHandlerInclude *fh_include = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ false);
parser->RegisterHandler("include", fh_include,
"read more options from the given file");
- FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc)
+ FlagHandlerInclude *fh_include_if_exists = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ true);
parser->RegisterHandler(
"include_if_exists", fh_include_if_exists,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index 6148ae56067c..7836347d233a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -269,3 +269,13 @@ COMMON_FLAG(bool, detect_write_exec, false,
COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
"TEST ONLY fail to read memory mappings to emulate sanitized "
"\"init\"")
+// With static linking, dladdr((void*)pthread_join) or similar will return the
+// path to the main program. This flag will replace dlopen(<main program,...>
+// with dlopen(NULL,...), which is the correct way to get a handle to the main
+// program.
+COMMON_FLAG(bool, test_only_replace_dlopen_main_program, false,
+ "TEST ONLY replace dlopen(<main program>,...) with dlopen(NULL)")
+
+COMMON_FLAG(bool, enable_symbolizer_markup, SANITIZER_FUCHSIA,
+ "Use sanitizer symbolizer markup, available on Linux "
+ "and always set true for Fuchsia.")
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 1e25265c00a2..0245164403c5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -226,13 +226,14 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
- false);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name ? name : name_, false);
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name ? name : name_, true);
}
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index 552d65067944..3809669dd48b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -15,6 +15,11 @@
#include "sanitizer_platform.h"
#include "sanitizer_redefine_builtins.h"
+// GCC does not understand __has_feature.
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
+
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
index 4a6fa5e8dbac..9318066afed2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
@@ -199,6 +199,14 @@ char *internal_strncat(char *dst, const char *src, uptr n) {
return dst;
}
+wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src) {
+ wchar_t *dst_it = dst;
+ do {
+ *dst_it++ = *src++;
+ } while (*src);
+ return dst;
+}
+
uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
const uptr srclen = internal_strlen(src);
if (srclen < maxlen) {
@@ -218,6 +226,14 @@ char *internal_strncpy(char *dst, const char *src, uptr n) {
return dst;
}
+wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; ++i)
+ dst[i] = src[i];
+ internal_memset(dst + i, 0, (n - i) * sizeof(wchar_t));
+ return dst;
+}
+
uptr internal_strnlen(const char *s, uptr maxlen) {
uptr i = 0;
while (i < maxlen && s[i]) i++;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
index e881db207908..1906569e2a5f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
@@ -71,7 +71,8 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...)
FORMAT(3, 4);
uptr internal_wcslen(const wchar_t *s);
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
-
+wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src);
+wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr maxlen);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index d2b3b63f3a7a..841d7c096292 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -16,101 +16,101 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_getauxval.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_procmaps.h"
-
-#if SANITIZER_LINUX && !SANITIZER_GO
-#include <asm/param.h>
-#endif
+# include "sanitizer_common.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_getauxval.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_mutex.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_procmaps.h"
+
+# if SANITIZER_LINUX && !SANITIZER_GO
+# include <asm/param.h>
+# endif
// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
// sys/stat.h, we use this trick.
-#if SANITIZER_MIPS64
-#include <asm/unistd.h>
-#include <sys/types.h>
-#define stat kernel_stat
-#if SANITIZER_GO
-#undef st_atime
-#undef st_mtime
-#undef st_ctime
-#define st_atime st_atim
-#define st_mtime st_mtim
-#define st_ctime st_ctim
-#endif
-#include <asm/stat.h>
-#undef stat
-#endif
+# if SANITIZER_MIPS64
+# include <asm/unistd.h>
+# include <sys/types.h>
+# define stat kernel_stat
+# if SANITIZER_GO
+# undef st_atime
+# undef st_mtime
+# undef st_ctime
+# define st_atime st_atim
+# define st_mtime st_mtim
+# define st_ctime st_ctim
+# endif
+# include <asm/stat.h>
+# undef stat
+# endif
-#include <dlfcn.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <link.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/param.h>
-#if !SANITIZER_SOLARIS
-#include <sys/ptrace.h>
-#endif
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <ucontext.h>
-#include <unistd.h>
-
-#if SANITIZER_LINUX
-#include <sys/utsname.h>
-#endif
+# include <dlfcn.h>
+# include <errno.h>
+# include <fcntl.h>
+# include <link.h>
+# include <pthread.h>
+# include <sched.h>
+# include <signal.h>
+# include <sys/mman.h>
+# include <sys/param.h>
+# if !SANITIZER_SOLARIS
+# include <sys/ptrace.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/syscall.h>
+# include <sys/time.h>
+# include <sys/types.h>
+# include <ucontext.h>
+# include <unistd.h>
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-#include <sys/personality.h>
-#endif
+# if SANITIZER_LINUX
+# include <sys/utsname.h>
+# endif
-#if SANITIZER_LINUX && defined(__loongarch__)
-# include <sys/sysmacros.h>
-#endif
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+# include <sys/personality.h>
+# endif
+
+# if SANITIZER_LINUX && defined(__loongarch__)
+# include <sys/sysmacros.h>
+# endif
-#if SANITIZER_FREEBSD
-#include <sys/exec.h>
-#include <sys/procctl.h>
-#include <sys/sysctl.h>
-#include <machine/atomic.h>
+# if SANITIZER_FREEBSD
+# include <machine/atomic.h>
+# include <sys/exec.h>
+# include <sys/procctl.h>
+# include <sys/sysctl.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
// FreeBSD 9.2 and 10.0.
-#include <sys/umtx.h>
+# include <sys/umtx.h>
}
-#include <sys/thr.h>
-#endif // SANITIZER_FREEBSD
+# include <sys/thr.h>
+# endif // SANITIZER_FREEBSD
-#if SANITIZER_NETBSD
-#include <limits.h> // For NAME_MAX
-#include <sys/sysctl.h>
-#include <sys/exec.h>
+# if SANITIZER_NETBSD
+# include <limits.h> // For NAME_MAX
+# include <sys/exec.h>
+# include <sys/sysctl.h>
extern struct ps_strings *__ps_strings;
-#endif // SANITIZER_NETBSD
+# endif // SANITIZER_NETBSD
-#if SANITIZER_SOLARIS
-#include <stdlib.h>
-#include <thread.h>
-#define environ _environ
-#endif
+# if SANITIZER_SOLARIS
+# include <stdlib.h>
+# include <thread.h>
+# define environ _environ
+# endif
extern char **environ;
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@@ -123,36 +123,36 @@ const int FUTEX_WAKE = 1;
const int FUTEX_PRIVATE_FLAG = 128;
const int FUTEX_WAIT_PRIVATE = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
- SANITIZER_WORDSIZE == 64 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32))
-# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
-#else
-# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
-#endif
+# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32))
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+# else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+# endif
// Note : FreeBSD had implemented both
// Linux apis, available from
// future 12.x version most likely
-#if SANITIZER_LINUX && defined(__NR_getrandom)
-# if !defined(GRND_NONBLOCK)
-# define GRND_NONBLOCK 1
-# endif
-# define SANITIZER_USE_GETRANDOM 1
-#else
-# define SANITIZER_USE_GETRANDOM 0
-#endif // SANITIZER_LINUX && defined(__NR_getrandom)
-
-#if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
-# define SANITIZER_USE_GETENTROPY 1
-#else
-# define SANITIZER_USE_GETENTROPY 0
-#endif
+# if SANITIZER_LINUX && defined(__NR_getrandom)
+# if !defined(GRND_NONBLOCK)
+# define GRND_NONBLOCK 1
+# endif
+# define SANITIZER_USE_GETRANDOM 1
+# else
+# define SANITIZER_USE_GETRANDOM 0
+# endif // SANITIZER_LINUX && defined(__NR_getrandom)
+
+# if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
+# define SANITIZER_USE_GETENTROPY 1
+# else
+# define SANITIZER_USE_GETENTROPY 0
+# endif
namespace __sanitizer {
@@ -203,33 +203,33 @@ ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
# endif
// --------------- sanitizer_libc.h
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
-#if !SANITIZER_S390
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
u64 offset) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
-#else
+# else
// mmap2 specifies file offset in 4096-byte units.
CHECK(IsAligned(offset, 4096));
return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
offset / 4096);
-#endif
+# endif
}
-#endif // !SANITIZER_S390
+# endif // !SANITIZER_S390
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address) {
return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,
new_size, flags, (uptr)new_address);
}
-#endif
+# endif
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
@@ -239,25 +239,23 @@ int internal_madvise(uptr addr, uptr length, int advice) {
return internal_syscall(SYSCALL(madvise), addr, length, advice);
}
-uptr internal_close(fd_t fd) {
- return internal_syscall(SYSCALL(close), fd);
-}
+uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); }
uptr internal_open(const char *filename, int flags) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
-#else
+# else
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
-#endif
+# endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
mode);
-#else
+# else
return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
-#endif
+# endif
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
@@ -276,12 +274,13 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
uptr internal_ftruncate(fd_t fd, uptr size) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
- (OFF_T)size));
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(ftruncate), fd, (OFF_T)size));
return res;
}
-#if (!SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_SPARC) && SANITIZER_LINUX
+# if (!SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_SPARC) && \
+ SANITIZER_LINUX
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -298,9 +297,9 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
out->st_mtime = in->st_mtime;
out->st_ctime = in->st_ctime;
}
-#endif
+# endif
-#if SANITIZER_LINUX && defined(__loongarch__)
+# if SANITIZER_LINUX && defined(__loongarch__)
static void statx_to_stat(struct statx *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor);
@@ -320,26 +319,26 @@ static void statx_to_stat(struct statx *in, struct stat *out) {
out->st_ctime = in->stx_ctime.tv_sec;
out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec;
}
-#endif
+# endif
-#if SANITIZER_MIPS64
+# if SANITIZER_MIPS64
// Undefine compatibility macros from <sys/stat.h>
// so that they would not clash with the kernel_stat
// st_[a|m|c]time fields
-#if !SANITIZER_GO
-#undef st_atime
-#undef st_mtime
-#undef st_ctime
-#endif
-#if defined(SANITIZER_ANDROID)
+# if !SANITIZER_GO
+# undef st_atime
+# undef st_mtime
+# undef st_ctime
+# endif
+# if defined(SANITIZER_ANDROID)
// Bionic sys/stat.h defines additional macros
// for compatibility with the old NDKs and
// they clash with the kernel_stat structure
// st_[a|m|c]time_nsec fields.
-#undef st_atime_nsec
-#undef st_mtime_nsec
-#undef st_ctime_nsec
-#endif
+# undef st_atime_nsec
+# undef st_mtime_nsec
+# undef st_ctime_nsec
+# endif
static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -352,91 +351,90 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
out->st_size = in->st_size;
out->st_blksize = in->st_blksize;
out->st_blocks = in->st_blocks;
-#if defined(__USE_MISC) || \
- defined(__USE_XOPEN2K8) || \
- defined(SANITIZER_ANDROID)
+# if defined(__USE_MISC) || defined(__USE_XOPEN2K8) || \
+ defined(SANITIZER_ANDROID)
out->st_atim.tv_sec = in->st_atime;
out->st_atim.tv_nsec = in->st_atime_nsec;
out->st_mtim.tv_sec = in->st_mtime;
out->st_mtim.tv_nsec = in->st_mtime_nsec;
out->st_ctim.tv_sec = in->st_ctime;
out->st_ctim.tv_nsec = in->st_ctime_nsec;
-#else
+# else
out->st_atime = in->st_atime;
out->st_atimensec = in->st_atime_nsec;
out->st_mtime = in->st_mtime;
out->st_mtimensec = in->st_mtime_nsec;
out->st_ctime = in->st_ctime;
out->st_atimensec = in->st_ctime_nsec;
-#endif
+# endif
}
-#endif
+# endif
uptr internal_stat(const char *path, void *buf) {
-# if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
-# elif SANITIZER_LINUX
-# if defined(__loongarch__)
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
struct statx bufx;
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
-# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
- !SANITIZER_SPARC
+# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
0);
-# else
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
(uptr)&buf64, 0);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# endif
-# else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# endif
+# endif
}
uptr internal_lstat(const char *path, void *buf) {
-# if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-# elif SANITIZER_LINUX
-# if defined(__loongarch__)
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
struct statx bufx;
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
STATX_BASIC_STATS, (uptr)&bufx);
statx_to_stat(&bufx, (struct stat *)buf);
return res;
-# elif (defined(_LP64) || SANITIZER_X32 || \
- (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
- !SANITIZER_SPARC
+# elif (defined(_LP64) || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-# else
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
(uptr)&buf64, AT_SYMLINK_NOFOLLOW);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# endif
-# else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# endif
+# endif
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
-#if SANITIZER_MIPS64
+# if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
// For mips64, fstat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
@@ -451,12 +449,12 @@ uptr internal_fstat(fd_t fd, void *buf) {
# else
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
# endif
-#else
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_filesize(fd_t fd) {
@@ -466,50 +464,46 @@ uptr internal_filesize(fd_t fd) {
return (uptr)st.st_size;
}
-uptr internal_dup(int oldfd) {
- return internal_syscall(SYSCALL(dup), oldfd);
-}
+uptr internal_dup(int oldfd) { return internal_syscall(SYSCALL(dup), oldfd); }
uptr internal_dup2(int oldfd, int newfd) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
-#else
+# else
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
-#endif
+# endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
bufsize);
-#else
+# else
return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
-#endif
+# endif
}
uptr internal_unlink(const char *path) {
# if SANITIZER_LINUX
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
-#else
+# else
return internal_syscall(SYSCALL(unlink), (uptr)path);
-#endif
+# endif
}
uptr internal_rename(const char *oldpath, const char *newpath) {
-# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
+# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath, 0);
-# elif SANITIZER_LINUX
+# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
-# else
+# else
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
-# endif
+# endif
}
-uptr internal_sched_yield() {
- return internal_syscall(SYSCALL(sched_yield));
-}
+uptr internal_sched_yield() { return internal_syscall(SYSCALL(sched_yield)); }
void internal_usleep(u64 useconds) {
struct timespec ts;
@@ -523,18 +517,18 @@ uptr internal_execve(const char *filename, char *const argv[],
return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
(uptr)envp);
}
-#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
-#if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD
void internal__exit(int exitcode) {
-#if SANITIZER_FREEBSD || SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_SOLARIS
internal_syscall(SYSCALL(exit), exitcode);
-#else
+# else
internal_syscall(SYSCALL(exit_group), exitcode);
-#endif
+# endif
Die(); // Unreachable.
}
-#endif // !SANITIZER_NETBSD
+# endif // !SANITIZER_NETBSD
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
@@ -556,30 +550,30 @@ bool DirExists(const char *path) {
# if !SANITIZER_NETBSD
tid_t GetTid() {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
long Tid;
thr_self(&Tid);
return Tid;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
return thr_self();
-#else
+# else
return internal_syscall(SYSCALL(gettid));
-#endif
+# endif
}
int TgKill(pid_t pid, tid_t tid, int sig) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
(void)pid;
return thr_kill(tid, sig);
-#endif
+# endif
}
-#endif
+# endif
-#if SANITIZER_GLIBC
+# if SANITIZER_GLIBC
u64 NanoTime() {
kernel_timeval tv;
internal_memset(&tv, 0, sizeof(tv));
@@ -590,19 +584,19 @@ u64 NanoTime() {
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
}
-#elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
u64 NanoTime() {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
}
-#endif
+# endif
// Like getenv, but reads env directly from /proc (on Linux) or parses the
// 'environ' array (on some others) and does not use libc. This function
// should be called first inside __asan_init.
const char *GetEnv(const char *name) {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
if (::environ != 0) {
uptr NameLen = internal_strlen(name);
for (char **Env = ::environ; *Env != 0; Env++) {
@@ -611,7 +605,7 @@ const char *GetEnv(const char *name) {
}
}
return 0; // Not found.
-#elif SANITIZER_LINUX
+# elif SANITIZER_LINUX
static char *environ;
static uptr len;
static bool inited;
@@ -621,13 +615,13 @@ const char *GetEnv(const char *name) {
if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
environ = nullptr;
}
- if (!environ || len == 0) return nullptr;
+ if (!environ || len == 0)
+ return nullptr;
uptr namelen = internal_strlen(name);
const char *p = environ;
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
- const char* endp =
- (char*)internal_memchr(p, '\0', len - (p - environ));
+ const char *endp = (char *)internal_memchr(p, '\0', len - (p - environ));
if (!endp) // this entry isn't NUL terminated
return nullptr;
else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
@@ -635,18 +629,18 @@ const char *GetEnv(const char *name) {
p = endp + 1;
}
return nullptr; // Not found.
-#else
-#error "Unsupported platform"
-#endif
+# else
+# error "Unsupported platform"
+# endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
extern "C" {
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
-#endif
+# endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@@ -659,20 +653,21 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
(*arr)[0] = buff;
int count, i;
- for (count = 1, i = 1; ; i++) {
+ for (count = 1, i = 1;; i++) {
if (buff[i] == 0) {
- if (buff[i+1] == 0) break;
- (*arr)[count] = &buff[i+1];
+ if (buff[i + 1] == 0)
+ break;
+ (*arr)[count] = &buff[i + 1];
CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
count++;
}
}
(*arr)[count] = nullptr;
}
-#endif
+# endif
static void GetArgsAndEnv(char ***argv, char ***envp) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
// On FreeBSD, retrieving the argument and environment arrays is done via the
// kern.ps_strings sysctl, which returns a pointer to a structure containing
// this information. See also <sys/exec.h>.
@@ -684,30 +679,30 @@ static void GetArgsAndEnv(char ***argv, char ***envp) {
}
*argv = pss->ps_argvstr;
*envp = pss->ps_envstr;
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
*argv = __ps_strings->ps_argvstr;
*envp = __ps_strings->ps_envstr;
-#else // SANITIZER_FREEBSD
-#if !SANITIZER_GO
+# else // SANITIZER_FREEBSD
+# if !SANITIZER_GO
if (&__libc_stack_end) {
- uptr* stack_end = (uptr*)__libc_stack_end;
+ uptr *stack_end = (uptr *)__libc_stack_end;
// Normally argc can be obtained from *stack_end, however, on ARM glibc's
// _start clobbers it:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75
// Do not special-case ARM and infer argc from argv everywhere.
int argc = 0;
while (stack_end[argc + 1]) argc++;
- *argv = (char**)(stack_end + 1);
- *envp = (char**)(stack_end + argc + 2);
+ *argv = (char **)(stack_end + 1);
+ *envp = (char **)(stack_end + argc + 2);
} else {
-#endif // !SANITIZER_GO
+# endif // !SANITIZER_GO
static const int kMaxArgv = 2000, kMaxEnvp = 2000;
ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
-#if !SANITIZER_GO
+# if !SANITIZER_GO
}
-#endif // !SANITIZER_GO
-#endif // SANITIZER_FREEBSD
+# endif // !SANITIZER_GO
+# endif // SANITIZER_FREEBSD
}
char **GetArgv() {
@@ -722,12 +717,12 @@ char **GetEnviron() {
return envp;
}
-#if !SANITIZER_SOLARIS
+# if !SANITIZER_SOLARIS
void FutexWait(atomic_uint32_t *p, u32 cmp) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
# elif SANITIZER_NETBSD
- sched_yield(); /* No userspace futex-like synchronization */
+ sched_yield(); /* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
# endif
@@ -737,7 +732,7 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
# elif SANITIZER_NETBSD
- /* No userspace futex-like synchronization */
+ /* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
# endif
@@ -749,26 +744,26 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
// The actual size of this structure is specified by d_reclen.
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
// Not used
-#else
+# else
struct linux_dirent {
# if SANITIZER_X32 || SANITIZER_LINUX
u64 d_ino;
u64 d_off;
# else
- unsigned long d_ino;
- unsigned long d_off;
+ unsigned long d_ino;
+ unsigned long d_off;
# endif
- unsigned short d_reclen;
+ unsigned short d_reclen;
# if SANITIZER_LINUX
- unsigned char d_type;
+ unsigned char d_type;
# endif
- char d_name[256];
+ char d_name[256];
};
-#endif
+# endif
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
@@ -780,24 +775,20 @@ uptr internal_waitpid(int pid, int *status, int options) {
0 /* rusage */);
}
-uptr internal_getpid() {
- return internal_syscall(SYSCALL(getpid));
-}
+uptr internal_getpid() { return internal_syscall(SYSCALL(getpid)); }
-uptr internal_getppid() {
- return internal_syscall(SYSCALL(getppid));
-}
+uptr internal_getppid() { return internal_syscall(SYSCALL(getppid)); }
int internal_dlinfo(void *handle, int request, void *p) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return dlinfo(handle, request, p);
-#else
+# else
UNIMPLEMENTED();
-#endif
+# endif
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
@@ -810,7 +801,7 @@ uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
@@ -839,7 +830,7 @@ int internal_fork() {
# endif
}
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
uptr *oldlenp, const void *newp, uptr newlen) {
return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,
@@ -854,11 +845,11 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
// followed by sysctl(). To avoid calling the intercepted version and
// asserting if this happens during startup, call the real sysctlnametomib()
// followed by internal_sysctl() if the syscall is not available.
-#ifdef SYS___sysctlbyname
+# ifdef SYS___sysctlbyname
return internal_syscall(SYSCALL(__sysctlbyname), sname,
internal_strlen(sname), oldp, (size_t *)oldlenp, newp,
(size_t)newlen);
-#else
+# else
static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;
if (!real_sysctlnametomib)
real_sysctlnametomib =
@@ -870,12 +861,12 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
if (real_sysctlnametomib(sname, oid, &len) == -1)
return (-1);
return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);
-#endif
+# endif
}
-#endif
+# endif
-#if SANITIZER_LINUX
-#define SA_RESTORER 0x04000000
+# if SANITIZER_LINUX
+# define SA_RESTORER 0x04000000
// Doesn't set sa_restorer if the caller did not set it, so use with caution
//(see below).
int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
@@ -899,15 +890,15 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
// rt_sigaction, so we need to do the same (we'll need to reimplement the
// restorers; for x86_64 the restorer address can be obtained from
// oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+# if !SANITIZER_ANDROID || !SANITIZER_MIPS32
k_act.sa_restorer = u_act->sa_restorer;
-#endif
+# endif
}
uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
- (uptr)(u_act ? &k_act : nullptr),
- (uptr)(u_oldact ? &k_oldact : nullptr),
- (uptr)sizeof(__sanitizer_kernel_sigset_t));
+ (uptr)(u_act ? &k_act : nullptr),
+ (uptr)(u_oldact ? &k_oldact : nullptr),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
if ((result == 0) && u_oldact) {
u_oldact->handler = k_oldact.handler;
@@ -915,24 +906,24 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
sizeof(__sanitizer_kernel_sigset_t));
u_oldact->sa_flags = k_oldact.sa_flags;
-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+# if !SANITIZER_ANDROID || !SANITIZER_MIPS32
u_oldact->sa_restorer = k_oldact.sa_restorer;
-#endif
+# endif
}
return result;
}
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
-#else
+# else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
__sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,
(uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));
-#endif
+# endif
}
void internal_sigfillset(__sanitizer_sigset_t *set) {
@@ -943,7 +934,7 @@ void internal_sigemptyset(__sanitizer_sigset_t *set) {
internal_memset(set, 0, sizeof(*set));
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
@@ -963,7 +954,7 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
return k_set->sig[idx] & ((uptr)1 << bit);
}
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
uptr internal_procctl(int type, int id, int cmd, void *data) {
return internal_syscall(SYSCALL(procctl), type, id, cmd, data);
}
@@ -977,10 +968,10 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
sigset_t *rset = reinterpret_cast<sigset_t *>(set);
return sigismember(rset, signum);
}
-#endif
-#endif // !SANITIZER_SOLARIS
+# endif
+# endif // !SANITIZER_SOLARIS
-#if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD
// ThreadLister implementation.
ThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) {
char task_directory_path[80];
@@ -1067,25 +1058,26 @@ ThreadLister::~ThreadLister() {
if (!internal_iserror(descriptor_))
internal_close(descriptor_);
}
-#endif
+# endif
-#if SANITIZER_WORDSIZE == 32
+# if SANITIZER_WORDSIZE == 32
// Take care of unusable kernel area in top gigabyte.
static uptr GetKernelAreaSize() {
-#if SANITIZER_LINUX && !SANITIZER_X32
+# if SANITIZER_LINUX && !SANITIZER_X32
const uptr gbyte = 1UL << 30;
// Firstly check if there are writable segments
// mapped to top gigabyte (e.g. stack).
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error())
return 0;
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
- if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
+ if ((segment.end >= 3 * gbyte) && segment.IsWritable())
+ return 0;
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
// Even if nothing is mapped, top Gb may still be accessible
// if we are running on 64-bit kernel.
// Uname may report misleading results if personality type
@@ -1095,21 +1087,21 @@ static uptr GetKernelAreaSize() {
if (!(pers & PER_MASK) && internal_uname(&uname_info) == 0 &&
internal_strstr(uname_info.machine, "64"))
return 0;
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
// Top gigabyte is reserved for kernel.
return gbyte;
-#else
+# else
return 0;
-#endif // SANITIZER_LINUX && !SANITIZER_X32
+# endif // SANITIZER_LINUX && !SANITIZER_X32
}
-#endif // SANITIZER_WORDSIZE == 32
+# endif // SANITIZER_WORDSIZE == 32
uptr GetMaxVirtualAddress() {
-#if SANITIZER_NETBSD && defined(__x86_64__)
+# if SANITIZER_NETBSD && defined(__x86_64__)
return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
-#elif SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
+# elif SANITIZER_WORDSIZE == 64
+# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -1119,96 +1111,97 @@ uptr GetMaxVirtualAddress() {
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
// loongarch64 also has multiple address space layouts: default is 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
return (1ULL << 38) - 1;
-# elif SANITIZER_MIPS64
+# elif SANITIZER_MIPS64
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
-# elif defined(__s390x__)
+# elif defined(__s390x__)
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
return ~(uptr)0;
-# else
+# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
-# endif
-#else // SANITIZER_WORDSIZE == 32
-# if defined(__s390__)
+# endif
+# else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
return (1ULL << 31) - 1; // 0x7fffffff;
-# else
+# else
return (1ULL << 32) - 1; // 0xffffffff;
-# endif
-#endif // SANITIZER_WORDSIZE
+# endif
+# endif // SANITIZER_WORDSIZE
}
uptr GetMaxUserVirtualAddress() {
uptr addr = GetMaxVirtualAddress();
-#if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
+# if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
if (!common_flags()->full_address_space)
addr -= GetKernelAreaSize();
CHECK_LT(reinterpret_cast<uptr>(&addr), addr);
-#endif
+# endif
return addr;
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
uptr GetPageSize() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \
- defined(EXEC_PAGESIZE)
+# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \
+ defined(EXEC_PAGESIZE)
return EXEC_PAGESIZE;
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
-// Use sysctl as sysconf can trigger interceptors internally.
+# elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+ // Use sysctl as sysconf can trigger interceptors internally.
int pz = 0;
uptr pzl = sizeof(pz);
int mib[2] = {CTL_HW, HW_PAGESIZE};
int rv = internal_sysctl(mib, 2, &pz, &pzl, nullptr, 0);
CHECK_EQ(rv, 0);
return (uptr)pz;
-#elif SANITIZER_USE_GETAUXVAL
+# elif SANITIZER_USE_GETAUXVAL
return getauxval(AT_PAGESZ);
-#else
+# else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
-#endif
+# endif
}
-#endif // !SANITIZER_ANDROID
+# endif // !SANITIZER_ANDROID
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
-#if SANITIZER_SOLARIS
+uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
+# if SANITIZER_SOLARIS
const char *default_module_name = getexecname();
CHECK_NE(default_module_name, NULL);
return internal_snprintf(buf, buf_len, "%s", default_module_name);
-#else
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
-#if SANITIZER_FREEBSD
+# else
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if SANITIZER_FREEBSD
const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
-#else
+# else
const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
-#endif
+# endif
const char *default_module_name = "kern.proc.pathname";
uptr Size = buf_len;
bool IsErr =
(internal_sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
int readlink_error = IsErr ? errno : 0;
uptr module_name_len = Size;
-#else
+# else
const char *default_module_name = "/proc/self/exe";
- uptr module_name_len = internal_readlink(
- default_module_name, buf, buf_len);
+ uptr module_name_len = internal_readlink(default_module_name, buf, buf_len);
int readlink_error;
bool IsErr = internal_iserror(module_name_len, &readlink_error);
-#endif // SANITIZER_SOLARIS
+# endif // SANITIZER_SOLARIS
if (IsErr) {
// We can't read binary name for some reason, assume it's unknown.
- Report("WARNING: reading executable name failed with errno %d, "
- "some stack frames may not be symbolized\n", readlink_error);
- module_name_len = internal_snprintf(buf, buf_len, "%s",
- default_module_name);
+ Report(
+ "WARNING: reading executable name failed with errno %d, "
+ "some stack frames may not be symbolized\n",
+ readlink_error);
+ module_name_len =
+ internal_snprintf(buf, buf_len, "%s", default_module_name);
CHECK_LT(module_name_len, buf_len);
}
return module_name_len;
-#endif
+# endif
}
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
char *tmpbuf;
uptr tmpsize;
uptr tmplen;
@@ -1218,7 +1211,7 @@ uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
UnmapOrDie(tmpbuf, tmpsize);
return internal_strlen(buf);
}
-#endif
+# endif
return ReadBinaryName(buf, buf_len);
}
@@ -1228,20 +1221,22 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
// Strip path.
while (*name != '\0') name++;
while (name > full_name && *name != '/') name--;
- if (*name == '/') name++;
+ if (*name == '/')
+ name++;
uptr base_name_length = internal_strlen(base_name);
- if (internal_strncmp(name, base_name, base_name_length)) return false;
+ if (internal_strncmp(name, base_name, base_name_length))
+ return false;
return (name[base_name_length] == '-' || name[base_name_length] == '.');
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
CHECK_NE(map, nullptr);
-#if !SANITIZER_FREEBSD
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
-#endif // !SANITIZER_FREEBSD
+# endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -1273,10 +1268,10 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
}
}
-#endif
+# endif
-#if SANITIZER_LINUX
-#if defined(__x86_64__)
+# if SANITIZER_LINUX
+# if defined(__x86_64__)
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -1295,50 +1290,46 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
- /* %rax = syscall(%rax = SYSCALL(clone),
- * %rdi = flags,
- * %rsi = child_stack,
- * %rdx = parent_tidptr,
- * %r8 = new_tls,
- * %r10 = child_tidptr)
- */
- "syscall\n"
-
- /* if (%rax != 0)
- * return;
- */
- "testq %%rax,%%rax\n"
- "jnz 1f\n"
-
- /* In the child. Terminate unwind chain. */
- // XXX: We should also terminate the CFI unwind chain
- // here. Unfortunately clang 3.2 doesn't support the
- // necessary CFI directives, so we skip that part.
- "xorq %%rbp,%%rbp\n"
-
- /* Call "fn(arg)". */
- "popq %%rax\n"
- "popq %%rdi\n"
- "call *%%rax\n"
-
- /* Call _exit(%rax). */
- "movq %%rax,%%rdi\n"
- "movq %2,%%rax\n"
- "syscall\n"
-
- /* Return to parent. */
- "1:\n"
- : "=a" (res)
- : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
- "S"(child_stack),
- "D"(flags),
- "d"(parent_tidptr),
- "r"(r8),
- "r"(r10)
- : "memory", "r11", "rcx");
+ /* %rax = syscall(%rax = SYSCALL(clone),
+ * %rdi = flags,
+ * %rsi = child_stack,
+ * %rdx = parent_tidptr,
+ * %r8 = new_tls,
+ * %r10 = child_tidptr)
+ */
+ "syscall\n"
+
+ /* if (%rax != 0)
+ * return;
+ */
+ "testq %%rax,%%rax\n"
+ "jnz 1f\n"
+
+ /* In the child. Terminate unwind chain. */
+ // XXX: We should also terminate the CFI unwind chain
+ // here. Unfortunately clang 3.2 doesn't support the
+ // necessary CFI directives, so we skip that part.
+ "xorq %%rbp,%%rbp\n"
+
+ /* Call "fn(arg)". */
+ "popq %%rax\n"
+ "popq %%rdi\n"
+ "call *%%rax\n"
+
+ /* Call _exit(%rax). */
+ "movq %%rax,%%rdi\n"
+ "movq %2,%%rax\n"
+ "syscall\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=a"(res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "S"(child_stack), "D"(flags),
+ "d"(parent_tidptr), "r"(r8), "r"(r10)
+ : "memory", "r11", "rcx");
return res;
}
-#elif defined(__mips__)
+# elif defined(__mips__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
@@ -1353,68 +1344,63 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
// We don't have proper CFI directives here because it requires alot of code
// for very marginal benefits.
__asm__ __volatile__(
- /* $v0 = syscall($v0 = __NR_clone,
- * $a0 = flags,
- * $a1 = child_stack,
- * $a2 = parent_tidptr,
- * $a3 = new_tls,
- * $a4 = child_tidptr)
- */
- ".cprestore 16;\n"
- "move $4,%1;\n"
- "move $5,%2;\n"
- "move $6,%3;\n"
- "move $7,%4;\n"
- /* Store the fifth argument on stack
- * if we are using 32-bit abi.
- */
-#if SANITIZER_WORDSIZE == 32
- "lw %5,16($29);\n"
-#else
- "move $8,%5;\n"
-#endif
- "li $2,%6;\n"
- "syscall;\n"
-
- /* if ($v0 != 0)
- * return;
- */
- "bnez $2,1f;\n"
-
- /* Call "fn(arg)". */
-#if SANITIZER_WORDSIZE == 32
-#ifdef __BIG_ENDIAN__
- "lw $25,4($29);\n"
- "lw $4,12($29);\n"
-#else
- "lw $25,0($29);\n"
- "lw $4,8($29);\n"
-#endif
-#else
- "ld $25,0($29);\n"
- "ld $4,8($29);\n"
-#endif
- "jal $25;\n"
-
- /* Call _exit($v0). */
- "move $4,$2;\n"
- "li $2,%7;\n"
- "syscall;\n"
-
- /* Return to parent. */
- "1:\n"
- : "=r" (res)
- : "r"(flags),
- "r"(child_stack),
- "r"(parent_tidptr),
- "r"(a3),
- "r"(a4),
- "i"(__NR_clone),
- "i"(__NR_exit)
- : "memory", "$29" );
+ /* $v0 = syscall($v0 = __NR_clone,
+ * $a0 = flags,
+ * $a1 = child_stack,
+ * $a2 = parent_tidptr,
+ * $a3 = new_tls,
+ * $a4 = child_tidptr)
+ */
+ ".cprestore 16;\n"
+ "move $4,%1;\n"
+ "move $5,%2;\n"
+ "move $6,%3;\n"
+ "move $7,%4;\n"
+ /* Store the fifth argument on stack
+ * if we are using 32-bit abi.
+ */
+# if SANITIZER_WORDSIZE == 32
+ "lw %5,16($29);\n"
+# else
+ "move $8,%5;\n"
+# endif
+ "li $2,%6;\n"
+ "syscall;\n"
+
+ /* if ($v0 != 0)
+ * return;
+ */
+ "bnez $2,1f;\n"
+
+ /* Call "fn(arg)". */
+# if SANITIZER_WORDSIZE == 32
+# ifdef __BIG_ENDIAN__
+ "lw $25,4($29);\n"
+ "lw $4,12($29);\n"
+# else
+ "lw $25,0($29);\n"
+ "lw $4,8($29);\n"
+# endif
+# else
+ "ld $25,0($29);\n"
+ "ld $4,8($29);\n"
+# endif
+ "jal $25;\n"
+
+ /* Call _exit($v0). */
+ "move $4,$2;\n"
+ "li $2,%7;\n"
+ "syscall;\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r"(res)
+ : "r"(flags), "r"(child_stack), "r"(parent_tidptr), "r"(a3), "r"(a4),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "memory", "$29");
return res;
}
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (!fn || !child_stack)
@@ -1455,7 +1441,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
register long long res __asm__("x0");
@@ -1466,47 +1452,45 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg;
- register int (*__fn)(void *) __asm__("x0") = fn;
+ register int (*__fn)(void *) __asm__("x0") = fn;
register void *__stack __asm__("x1") = child_stack;
- register int __flags __asm__("x2") = flags;
- register void *__arg __asm__("x3") = arg;
- register int *__ptid __asm__("x4") = parent_tidptr;
- register void *__tls __asm__("x5") = newtls;
- register int *__ctid __asm__("x6") = child_tidptr;
+ register int __flags __asm__("x2") = flags;
+ register void *__arg __asm__("x3") = arg;
+ register int *__ptid __asm__("x4") = parent_tidptr;
+ register void *__tls __asm__("x5") = newtls;
+ register int *__ctid __asm__("x6") = child_tidptr;
__asm__ __volatile__(
- "mov x0,x2\n" /* flags */
- "mov x2,x4\n" /* ptid */
- "mov x3,x5\n" /* tls */
- "mov x4,x6\n" /* ctid */
- "mov x8,%9\n" /* clone */
-
- "svc 0x0\n"
-
- /* if (%r0 != 0)
- * return %r0;
- */
- "cmp x0, #0\n"
- "bne 1f\n"
-
- /* In the child, now. Call "fn(arg)". */
- "ldp x1, x0, [sp], #16\n"
- "blr x1\n"
-
- /* Call _exit(%r0). */
- "mov x8, %10\n"
- "svc 0x0\n"
- "1:\n"
-
- : "=r" (res)
- : "i"(-EINVAL),
- "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
- "r"(__ptid), "r"(__tls), "r"(__ctid),
- "i"(__NR_clone), "i"(__NR_exit)
- : "x30", "memory");
+ "mov x0,x2\n" /* flags */
+ "mov x2,x4\n" /* ptid */
+ "mov x3,x5\n" /* tls */
+ "mov x4,x6\n" /* ctid */
+ "mov x8,%9\n" /* clone */
+
+ "svc 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp x0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldp x1, x0, [sp], #16\n"
+ "blr x1\n"
+
+ /* Call _exit(%r0). */
+ "mov x8, %10\n"
+ "svc 0x0\n"
+ "1:\n"
+
+ : "=r"(res)
+ : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
+ : "x30", "memory");
return res;
}
-#elif SANITIZER_LOONGARCH64
+# elif SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (!fn || !child_stack)
@@ -1544,119 +1528,110 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "=r"(res)
: "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls),
"r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
- : "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8");
+ : "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8");
return res;
}
-#elif defined(__powerpc64__)
+# elif defined(__powerpc64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
- int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
// Stack frame structure.
-#if SANITIZER_PPC64V1
-// Back chain == 0 (SP + 112)
-// Frame (112 bytes):
-// Parameter save area (SP + 48), 8 doublewords
-// TOC save area (SP + 40)
-// Link editor doubleword (SP + 32)
-// Compiler doubleword (SP + 24)
-// LR save area (SP + 16)
-// CR save area (SP + 8)
-// Back chain (SP + 0)
-# define FRAME_SIZE 112
-# define FRAME_TOC_SAVE_OFFSET 40
-#elif SANITIZER_PPC64V2
-// Back chain == 0 (SP + 32)
-// Frame (32 bytes):
-// TOC save area (SP + 24)
-// LR save area (SP + 16)
-// CR save area (SP + 8)
-// Back chain (SP + 0)
-# define FRAME_SIZE 32
-# define FRAME_TOC_SAVE_OFFSET 24
-#else
-# error "Unsupported PPC64 ABI"
-#endif
+# if SANITIZER_PPC64V1
+ // Back chain == 0 (SP + 112)
+ // Frame (112 bytes):
+ // Parameter save area (SP + 48), 8 doublewords
+ // TOC save area (SP + 40)
+ // Link editor doubleword (SP + 32)
+ // Compiler doubleword (SP + 24)
+ // LR save area (SP + 16)
+ // CR save area (SP + 8)
+ // Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+# elif SANITIZER_PPC64V2
+ // Back chain == 0 (SP + 32)
+ // Frame (32 bytes):
+ // TOC save area (SP + 24)
+ // LR save area (SP + 16)
+ // CR save area (SP + 8)
+ // Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
+# else
+# error "Unsupported PPC64 ABI"
+# endif
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
register int (*__fn)(void *) __asm__("r3") = fn;
- register void *__cstack __asm__("r4") = child_stack;
- register int __flags __asm__("r5") = flags;
- register void *__arg __asm__("r6") = arg;
- register int *__ptidptr __asm__("r7") = parent_tidptr;
- register void *__newtls __asm__("r8") = newtls;
- register int *__ctidptr __asm__("r9") = child_tidptr;
-
- __asm__ __volatile__(
- /* fn and arg are saved across the syscall */
- "mr 28, %5\n\t"
- "mr 27, %8\n\t"
-
- /* syscall
- r0 == __NR_clone
- r3 == flags
- r4 == child_stack
- r5 == parent_tidptr
- r6 == newtls
- r7 == child_tidptr */
- "mr 3, %7\n\t"
- "mr 5, %9\n\t"
- "mr 6, %10\n\t"
- "mr 7, %11\n\t"
- "li 0, %3\n\t"
- "sc\n\t"
-
- /* Test if syscall was successful */
- "cmpdi cr1, 3, 0\n\t"
- "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
- "bne- cr1, 1f\n\t"
-
- /* Set up stack frame */
- "li 29, 0\n\t"
- "stdu 29, -8(1)\n\t"
- "stdu 1, -%12(1)\n\t"
- /* Do the function call */
- "std 2, %13(1)\n\t"
-#if SANITIZER_PPC64V1
- "ld 0, 0(28)\n\t"
- "ld 2, 8(28)\n\t"
- "mtctr 0\n\t"
-#elif SANITIZER_PPC64V2
- "mr 12, 28\n\t"
- "mtctr 12\n\t"
-#else
-# error "Unsupported PPC64 ABI"
-#endif
- "mr 3, 27\n\t"
- "bctrl\n\t"
- "ld 2, %13(1)\n\t"
-
- /* Call _exit(r3) */
- "li 0, %4\n\t"
- "sc\n\t"
-
- /* Return to parent */
- "1:\n\t"
- "mr %0, 3\n\t"
- : "=r" (res)
- : "0" (-1),
- "i" (EINVAL),
- "i" (__NR_clone),
- "i" (__NR_exit),
- "r" (__fn),
- "r" (__cstack),
- "r" (__flags),
- "r" (__arg),
- "r" (__ptidptr),
- "r" (__newtls),
- "r" (__ctidptr),
- "i" (FRAME_SIZE),
- "i" (FRAME_TOC_SAVE_OFFSET)
- : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ register void *__cstack __asm__("r4") = child_stack;
+ register int __flags __asm__("r5") = flags;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+ /* fn and arg are saved across the syscall */
+ "mr 28, %5\n\t"
+ "mr 27, %8\n\t"
+
+ /* syscall
+ r0 == __NR_clone
+ r3 == flags
+ r4 == child_stack
+ r5 == parent_tidptr
+ r6 == newtls
+ r7 == child_tidptr */
+ "mr 3, %7\n\t"
+ "mr 5, %9\n\t"
+ "mr 6, %10\n\t"
+ "mr 7, %11\n\t"
+ "li 0, %3\n\t"
+ "sc\n\t"
+
+ /* Test if syscall was successful */
+ "cmpdi cr1, 3, 0\n\t"
+ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+ "bne- cr1, 1f\n\t"
+
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
+ /* Do the function call */
+ "std 2, %13(1)\n\t"
+# if SANITIZER_PPC64V1
+ "ld 0, 0(28)\n\t"
+ "ld 2, 8(28)\n\t"
+ "mtctr 0\n\t"
+# elif SANITIZER_PPC64V2
+ "mr 12, 28\n\t"
+ "mtctr 12\n\t"
+# else
+# error "Unsupported PPC64 ABI"
+# endif
+ "mr 3, 27\n\t"
+ "bctrl\n\t"
+ "ld 2, %13(1)\n\t"
+
+ /* Call _exit(r3) */
+ "li 0, %4\n\t"
+ "sc\n\t"
+
+ /* Return to parent */
+ "1:\n\t"
+ "mr %0, 3\n\t"
+ : "=r"(res)
+ : "0"(-1), "i"(EINVAL), "i"(__NR_clone), "i"(__NR_exit), "r"(__fn),
+ "r"(__cstack), "r"(__flags), "r"(__arg), "r"(__ptidptr), "r"(__newtls),
+ "r"(__ctidptr), "i"(FRAME_SIZE), "i"(FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
return res;
}
-#elif defined(__i386__)
+# elif defined(__i386__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
int res;
@@ -1669,59 +1644,56 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
((unsigned int *)child_stack)[2] = (uptr)fn;
((unsigned int *)child_stack)[3] = (uptr)arg;
__asm__ __volatile__(
- /* %eax = syscall(%eax = SYSCALL(clone),
- * %ebx = flags,
- * %ecx = child_stack,
- * %edx = parent_tidptr,
- * %esi = new_tls,
- * %edi = child_tidptr)
- */
-
- /* Obtain flags */
- "movl (%%ecx), %%ebx\n"
- /* Do the system call */
- "pushl %%ebx\n"
- "pushl %%esi\n"
- "pushl %%edi\n"
- /* Remember the flag value. */
- "movl %%ebx, (%%ecx)\n"
- "int $0x80\n"
- "popl %%edi\n"
- "popl %%esi\n"
- "popl %%ebx\n"
-
- /* if (%eax != 0)
- * return;
- */
-
- "test %%eax,%%eax\n"
- "jnz 1f\n"
-
- /* terminate the stack frame */
- "xorl %%ebp,%%ebp\n"
- /* Call FN. */
- "call *%%ebx\n"
-#ifdef PIC
- "call here\n"
- "here:\n"
- "popl %%ebx\n"
- "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
-#endif
- /* Call exit */
- "movl %%eax, %%ebx\n"
- "movl %2, %%eax\n"
- "int $0x80\n"
- "1:\n"
- : "=a" (res)
- : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
- "c"(child_stack),
- "d"(parent_tidptr),
- "S"(newtls),
- "D"(child_tidptr)
- : "memory");
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+# ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+# endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a"(res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "c"(child_stack),
+ "d"(parent_tidptr), "S"(newtls), "D"(child_tidptr)
+ : "memory");
return res;
}
-#elif defined(__arm__)
+# elif defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
unsigned int res;
@@ -1737,70 +1709,68 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register int *r4 __asm__("r4") = child_tidptr;
register int r7 __asm__("r7") = __NR_clone;
-#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
-# define ARCH_HAS_BX
-#endif
-#if __ARM_ARCH > 4
-# define ARCH_HAS_BLX
-#endif
+# if __ARM_ARCH > 4 || defined(__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+# endif
+# if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+# endif
-#ifdef ARCH_HAS_BX
-# ifdef ARCH_HAS_BLX
-# define BLX(R) "blx " #R "\n"
-# else
-# define BLX(R) "mov lr, pc; bx " #R "\n"
-# endif
-#else
-# define BLX(R) "mov lr, pc; mov pc," #R "\n"
-#endif
+# ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+# else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+# endif
__asm__ __volatile__(
- /* %r0 = syscall(%r7 = SYSCALL(clone),
- * %r0 = flags,
- * %r1 = child_stack,
- * %r2 = parent_tidptr,
- * %r3 = new_tls,
- * %r4 = child_tidptr)
- */
-
- /* Do the system call */
- "swi 0x0\n"
-
- /* if (%r0 != 0)
- * return %r0;
- */
- "cmp r0, #0\n"
- "bne 1f\n"
-
- /* In the child, now. Call "fn(arg)". */
- "ldr r0, [sp, #4]\n"
- "ldr ip, [sp], #8\n"
- BLX(ip)
- /* Call _exit(%r0). */
- "mov r7, %7\n"
- "swi 0x0\n"
- "1:\n"
- "mov %0, r0\n"
- : "=r"(res)
- : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
- "i"(__NR_exit)
- : "memory");
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n" BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7), "i"(__NR_exit)
+ : "memory");
return res;
}
-#endif
-#endif // SANITIZER_LINUX
+# endif
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
int internal_uname(struct utsname *buf) {
return internal_syscall(SYSCALL(uname), buf);
}
-#endif
+# endif
-#if SANITIZER_ANDROID
-#if __ANDROID_API__ < 21
+# if SANITIZER_ANDROID
+# if __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
-#endif
+# endif
static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
void *data) {
@@ -1817,40 +1787,41 @@ static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
static atomic_uint32_t android_api_level;
static AndroidApiLevel AndroidDetectApiLevelStatic() {
-#if __ANDROID_API__ <= 19
+# if __ANDROID_API__ <= 19
return ANDROID_KITKAT;
-#elif __ANDROID_API__ <= 22
+# elif __ANDROID_API__ <= 22
return ANDROID_LOLLIPOP_MR1;
-#else
+# else
return ANDROID_POST_LOLLIPOP;
-#endif
+# endif
}
static AndroidApiLevel AndroidDetectApiLevel() {
if (!&dl_iterate_phdr)
- return ANDROID_KITKAT; // K or lower
+ return ANDROID_KITKAT; // K or lower
bool base_name_seen = false;
dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
if (base_name_seen)
- return ANDROID_LOLLIPOP_MR1; // L MR1
+ return ANDROID_LOLLIPOP_MR1; // L MR1
return ANDROID_POST_LOLLIPOP; // post-L
// Plain L (API level 21) is completely broken wrt ASan and not very
// interesting to detect.
}
-extern "C" __attribute__((weak)) void* _DYNAMIC;
+extern "C" __attribute__((weak)) void *_DYNAMIC;
AndroidApiLevel AndroidGetApiLevel() {
AndroidApiLevel level =
(AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
- if (level) return level;
+ if (level)
+ return level;
level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()
: AndroidDetectApiLevel();
atomic_store(&android_api_level, level, memory_order_relaxed);
return level;
}
-#endif
+# endif
static HandleSignalMode GetHandleSignalModeImpl(int signum) {
switch (signum) {
@@ -1877,7 +1848,7 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return result;
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
if (&real_pthread_create == 0)
return nullptr;
@@ -1892,13 +1863,13 @@ void internal_join_thread(void *th) {
if (&real_pthread_join)
real_pthread_join(th, nullptr);
}
-#else
+# else
void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
-#endif
+# endif
-#if SANITIZER_LINUX && defined(__aarch64__)
+# if SANITIZER_LINUX && defined(__aarch64__)
// Android headers in the older NDK releases miss this definition.
struct __sanitizer_esr_context {
struct _aarch64_ctx head;
@@ -1910,7 +1881,8 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);
while (true) {
_aarch64_ctx *ctx = (_aarch64_ctx *)aux;
- if (ctx->size == 0) break;
+ if (ctx->size == 0)
+ break;
if (ctx->magic == kEsrMagic) {
*esr = ((__sanitizer_esr_context *)ctx)->esr;
return true;
@@ -1919,31 +1891,29 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
}
return false;
}
-#elif SANITIZER_FREEBSD && defined(__aarch64__)
+# elif SANITIZER_FREEBSD && defined(__aarch64__)
// FreeBSD doesn't provide ESR in the ucontext.
-static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
- return false;
-}
-#endif
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) { return false; }
+# endif
using Context = ucontext_t;
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
Context *ucontext = (Context *)context;
-#if defined(__x86_64__) || defined(__i386__)
+# if defined(__x86_64__) || defined(__i386__)
static const uptr PF_WRITE = 1U << 1;
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
uptr err = ucontext->uc_mcontext.mc_err;
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
-#elif SANITIZER_SOLARIS && defined(__i386__)
+# elif SANITIZER_SOLARIS && defined(__i386__)
const int Err = 13;
uptr err = ucontext->uc_mcontext.gregs[Err];
-#else
+# else
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
-#endif // SANITIZER_FREEBSD
+# endif // SANITIZER_FREEBSD
return err & PF_WRITE ? Write : Read;
-#elif defined(__mips__)
+# elif defined(__mips__)
uint32_t *exception_source;
uint32_t faulty_instruction;
uint32_t op_code;
@@ -1959,12 +1929,12 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x29: // sh
case 0x2b: // sw
case 0x3f: // sd
-#if __mips_isa_rev < 6
+# if __mips_isa_rev < 6
case 0x2c: // sdl
case 0x2d: // sdr
case 0x2a: // swl
case 0x2e: // swr
-#endif
+# endif
return SignalContext::Write;
case 0x20: // lb
@@ -1974,14 +1944,14 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x23: // lw
case 0x27: // lwu
case 0x37: // ld
-#if __mips_isa_rev < 6
+# if __mips_isa_rev < 6
case 0x1a: // ldl
case 0x1b: // ldr
case 0x22: // lwl
case 0x26: // lwr
-#endif
+# endif
return SignalContext::Read;
-#if __mips_isa_rev == 6
+# if __mips_isa_rev == 6
case 0x3b: // pcrel
op_code = (faulty_instruction >> 19) & 0x3;
switch (op_code) {
@@ -1989,50 +1959,51 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x2: // lwupc
return SignalContext::Read;
}
-#endif
+# endif
}
return SignalContext::Unknown;
-#elif defined(__arm__)
+# elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
return fsr & FSR_WRITE ? Write : Read;
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
u64 esr;
- if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
+ if (!Aarch64GetESR(ucontext, &esr))
+ return Unknown;
return esr & ESR_ELx_WNR ? Write : Read;
-#elif defined(__loongarch__)
+# elif defined(__loongarch__)
u32 flags = ucontext->uc_mcontext.__flags;
if (flags & SC_ADDRERR_RD)
return SignalContext::Read;
if (flags & SC_ADDRERR_WR)
return SignalContext::Write;
return SignalContext::Unknown;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
// Decode the instruction to determine the access type.
// From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
-#if SANITIZER_SOLARIS
+# if SANITIZER_SOLARIS
uptr pc = ucontext->uc_mcontext.gregs[REG_PC];
-#else
+# else
// Historical BSDism here.
struct sigcontext *scontext = (struct sigcontext *)context;
-#if defined(__arch64__)
+# if defined(__arch64__)
uptr pc = scontext->sigc_regs.tpc;
-#else
+# else
uptr pc = scontext->si_regs.pc;
-#endif
-#endif
+# endif
+# endif
u32 instr = *(u32 *)pc;
- return (instr >> 21) & 1 ? Write: Read;
-#elif defined(__riscv)
-#if SANITIZER_FREEBSD
+ return (instr >> 21) & 1 ? Write : Read;
+# elif defined(__riscv)
+# if SANITIZER_FREEBSD
unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
-#else
+# else
unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
-#endif
+# endif
unsigned faulty_instruction = *(uint16_t *)pc;
-#if defined(__riscv_compressed)
+# if defined(__riscv_compressed)
if ((faulty_instruction & 0x3) != 0x3) { // it's a compressed instruction
// set op_bits to the instruction bits [1, 0, 15, 14, 13]
unsigned op_bits =
@@ -2040,38 +2011,38 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
unsigned rd = faulty_instruction & 0xF80; // bits 7-11, inclusive
switch (op_bits) {
case 0b10'010: // c.lwsp (rd != x0)
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b10'011: // c.ldsp (rd != x0)
-#endif
+# endif
return rd ? SignalContext::Read : SignalContext::Unknown;
case 0b00'010: // c.lw
-#if __riscv_flen >= 32 && __riscv_xlen == 32
+# if __riscv_flen >= 32 && __riscv_xlen == 32
case 0b10'011: // c.flwsp
-#endif
-#if __riscv_flen >= 32 || __riscv_xlen == 64
+# endif
+# if __riscv_flen >= 32 || __riscv_xlen == 64
case 0b00'011: // c.flw / c.ld
-#endif
-#if __riscv_flen == 64
+# endif
+# if __riscv_flen == 64
case 0b00'001: // c.fld
case 0b10'001: // c.fldsp
-#endif
+# endif
return SignalContext::Read;
case 0b00'110: // c.sw
case 0b10'110: // c.swsp
-#if __riscv_flen >= 32 || __riscv_xlen == 64
+# if __riscv_flen >= 32 || __riscv_xlen == 64
case 0b00'111: // c.fsw / c.sd
case 0b10'111: // c.fswsp / c.sdsp
-#endif
-#if __riscv_flen == 64
+# endif
+# if __riscv_flen == 64
case 0b00'101: // c.fsd
case 0b10'101: // c.fsdsp
-#endif
+# endif
return SignalContext::Write;
default:
return SignalContext::Unknown;
}
}
-#endif
+# endif
unsigned opcode = faulty_instruction & 0x7f; // lower 7 bits
unsigned funct3 = (faulty_instruction >> 12) & 0x7; // bits 12-14, inclusive
@@ -2081,9 +2052,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b000: // lb
case 0b001: // lh
case 0b010: // lw
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b011: // ld
-#endif
+# endif
case 0b100: // lbu
case 0b101: // lhu
return SignalContext::Read;
@@ -2095,20 +2066,20 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b000: // sb
case 0b001: // sh
case 0b010: // sw
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b011: // sd
-#endif
+# endif
return SignalContext::Write;
default:
return SignalContext::Unknown;
}
-#if __riscv_flen >= 32
+# if __riscv_flen >= 32
case 0b0000111: // floating-point loads
switch (funct3) {
case 0b010: // flw
-#if __riscv_flen == 64
+# if __riscv_flen == 64
case 0b011: // fld
-#endif
+# endif
return SignalContext::Read;
default:
return SignalContext::Unknown;
@@ -2116,21 +2087,21 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b0100111: // floating-point stores
switch (funct3) {
case 0b010: // fsw
-#if __riscv_flen == 64
+# if __riscv_flen == 64
case 0b011: // fsd
-#endif
+# endif
return SignalContext::Write;
default:
return SignalContext::Unknown;
}
-#endif
+# endif
default:
return SignalContext::Unknown;
}
-#else
+# else
(void)ucontext;
return Unknown; // FIXME: Implement.
-#endif
+# endif
}
bool SignalContext::IsTrueFaultingAddress() const {
@@ -2144,124 +2115,124 @@ void SignalContext::DumpAllRegisters(void *context) {
}
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
// This covers all NetBSD architectures
ucontext_t *ucontext = (ucontext_t *)context;
*pc = _UC_MACHINE_PC(ucontext);
*bp = _UC_MACHINE_FP(ucontext);
*sp = _UC_MACHINE_SP(ucontext);
-#elif defined(__arm__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
-#elif defined(__aarch64__)
-# if SANITIZER_FREEBSD
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__aarch64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_gpregs.gp_elr;
*bp = ucontext->uc_mcontext.mc_gpregs.gp_x[29];
*sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
-# else
- ucontext_t *ucontext = (ucontext_t*)context;
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
-# endif
-#elif defined(__hppa__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# endif
+# elif defined(__hppa__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
-#elif defined(__x86_64__)
-# if SANITIZER_FREEBSD
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
-# else
- ucontext_t *ucontext = (ucontext_t*)context;
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
-# endif
-#elif defined(__i386__)
-# if SANITIZER_FREEBSD
- ucontext_t *ucontext = (ucontext_t*)context;
+# endif
+# elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
-# else
- ucontext_t *ucontext = (ucontext_t*)context;
-# if SANITIZER_SOLARIS
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
+# if SANITIZER_SOLARIS
/* Use the numeric values: the symbolic ones are undefined by llvm
include/llvm/Support/Solaris.h. */
-# ifndef REG_EIP
-# define REG_EIP 14 // REG_PC
-# endif
-# ifndef REG_EBP
-# define REG_EBP 6 // REG_FP
-# endif
-# ifndef REG_UESP
-# define REG_UESP 17 // REG_SP
-# endif
-# endif
+# ifndef REG_EIP
+# define REG_EIP 14 // REG_PC
+# endif
+# ifndef REG_EBP
+# define REG_EBP 6 // REG_FP
+# endif
+# ifndef REG_UESP
+# define REG_UESP 17 // REG_SP
+# endif
+# endif
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_UESP];
-# endif
-#elif defined(__powerpc__) || defined(__powerpc64__)
+# endif
+# elif defined(__powerpc__) || defined(__powerpc64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_srr0;
*sp = ucontext->uc_mcontext.mc_frame[1];
*bp = ucontext->uc_mcontext.mc_frame[31];
# else
- ucontext_t *ucontext = (ucontext_t*)context;
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
# endif
-#elif defined(__sparc__)
-#if defined(__arch64__) || defined(__sparcv9)
-#define STACK_BIAS 2047
-#else
-#define STACK_BIAS 0
-# endif
-# if SANITIZER_SOLARIS
+# elif defined(__sparc__)
+# if defined(__arch64__) || defined(__sparcv9)
+# define STACK_BIAS 2047
+# else
+# define STACK_BIAS 0
+# endif
+# if SANITIZER_SOLARIS
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_PC];
*sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;
-#else
+# else
// Historical BSDism here.
struct sigcontext *scontext = (struct sigcontext *)context;
-#if defined(__arch64__)
+# if defined(__arch64__)
*pc = scontext->sigc_regs.tpc;
*sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS;
-#else
+# else
*pc = scontext->si_regs.pc;
*sp = scontext->si_regs.u_regs[14];
-#endif
-# endif
+# endif
+# endif
*bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;
-#elif defined(__mips__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__mips__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
-#elif defined(__s390__)
- ucontext_t *ucontext = (ucontext_t*)context;
-# if defined(__s390x__)
+# elif defined(__s390__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+# if defined(__s390x__)
*pc = ucontext->uc_mcontext.psw.addr;
-# else
+# else
*pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;
-# endif
+# endif
*bp = ucontext->uc_mcontext.gregs[11];
*sp = ucontext->uc_mcontext.gregs[15];
-#elif defined(__riscv)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__riscv)
+ ucontext_t *ucontext = (ucontext_t *)context;
# if SANITIZER_FREEBSD
*pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
*bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];
@@ -2293,7 +2264,7 @@ void InitializePlatformEarly() {
}
void CheckASLR() {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
int mib[3];
int paxflags;
uptr len = sizeof(paxflags);
@@ -2308,12 +2279,13 @@ void CheckASLR() {
}
if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) {
- Printf("This sanitizer is not compatible with enabled ASLR.\n"
- "To disable ASLR, please run \"paxctl +a %s\" and try again.\n",
- GetArgv()[0]);
+ Printf(
+ "This sanitizer is not compatible with enabled ASLR.\n"
+ "To disable ASLR, please run \"paxctl +a %s\" and try again.\n",
+ GetArgv()[0]);
Die();
}
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
int aslr_status;
int r = internal_procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status);
if (UNLIKELY(r == -1)) {
@@ -2323,9 +2295,13 @@ void CheckASLR() {
return;
}
if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {
- Printf("This sanitizer is not compatible with enabled ASLR "
- "and binaries compiled with PIE\n");
- Die();
+ VReport(1,
+ "This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ int aslr_ctl = PROC_ASLR_FORCE_DISABLE;
+ CHECK_NE(internal_procctl(P_PID, 0, PROC_ASLR_CTL, &aslr_ctl), -1);
+ ReExec();
}
# elif SANITIZER_PPC64V2
// Disable ASLR for Linux PPC64LE.
@@ -2345,7 +2321,7 @@ void CheckASLR() {
}
void CheckMPROTECT() {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
int mib[3];
int paxflags;
uptr len = sizeof(paxflags);
@@ -2363,13 +2339,13 @@ void CheckMPROTECT() {
Printf("This sanitizer is not compatible with enabled MPROTECT\n");
Die();
}
-#else
+# else
// Do nothing
-#endif
+# endif
}
void CheckNoDeepBind(const char *filename, int flag) {
-#ifdef RTLD_DEEPBIND
+# ifdef RTLD_DEEPBIND
if (flag & RTLD_DEEPBIND) {
Report(
"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
@@ -2380,7 +2356,7 @@ void CheckNoDeepBind(const char *filename, int flag) {
filename, filename);
Die();
}
-#endif
+# endif
}
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
@@ -2393,16 +2369,16 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
bool GetRandom(void *buffer, uptr length, bool blocking) {
if (!buffer || !length || length > 256)
return false;
-#if SANITIZER_USE_GETENTROPY
+# if SANITIZER_USE_GETENTROPY
uptr rnd = getentropy(buffer, length);
int rverrno = 0;
if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT)
return false;
else if (rnd == 0)
return true;
-#endif // SANITIZER_USE_GETENTROPY
+# endif // SANITIZER_USE_GETENTROPY
-#if SANITIZER_USE_GETRANDOM
+# if SANITIZER_USE_GETRANDOM
static atomic_uint8_t skip_getrandom_syscall;
if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
// Up to 256 bytes, getrandom will not be interrupted.
@@ -2414,7 +2390,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
else if (res == length)
return true;
}
-#endif // SANITIZER_USE_GETRANDOM
+# endif // SANITIZER_USE_GETRANDOM
// Up to 256 bytes, a read off /dev/urandom will not be interrupted.
// blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
uptr fd = internal_open("/dev/urandom", O_RDONLY);
@@ -2427,6 +2403,6 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
return true;
}
-} // namespace __sanitizer
+} // namespace __sanitizer
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index 7454369fa419..c30f0326793d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -13,15 +13,15 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_freebsd.h"
-#include "sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_platform_limits_solaris.h"
-#include "sanitizer_posix.h"
+# include "sanitizer_common.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform_limits_freebsd.h"
+# include "sanitizer_platform_limits_netbsd.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_platform_limits_solaris.h"
+# include "sanitizer_posix.h"
struct link_map; // Opaque type returned by dlopen().
struct utsname;
@@ -46,9 +46,9 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_sigaltstack(const void* ss, void* oss);
+uptr internal_sigaltstack(const void *ss, void *oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
- __sanitizer_sigset_t *oldset);
+ __sanitizer_sigset_t *oldset);
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
@@ -65,10 +65,10 @@ struct ScopedBlockSignals {
# if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
-#endif
+# endif
// Linux-only syscalls.
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
# if defined(__x86_64__)
uptr internal_arch_prctl(int option, uptr arg2);
@@ -83,15 +83,15 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
-#endif
+# endif
int internal_uname(struct utsname *buf);
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
uptr internal_procctl(int type, int id, int cmd, void *data);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@@ -135,36 +135,60 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
ReleaseMemoryPagesToOS(beg, end);
}
-#if SANITIZER_ANDROID
-
-#if defined(__aarch64__)
-# define __get_tls() \
- ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
-#elif defined(__arm__)
-# define __get_tls() \
- ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
-#elif defined(__mips__)
+# if SANITIZER_ANDROID
+
+# if defined(__aarch64__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__arm__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__mips__)
// On mips32r1, this goes via a kernel illegal instruction trap that's
// optimized for v1.
-# define __get_tls() \
- ({ register void** __v asm("v1"); \
- __asm__(".set push\n" \
- ".set mips32r2\n" \
- "rdhwr %0,$29\n" \
- ".set pop\n" : "=r"(__v)); \
- __v; })
-#elif defined (__riscv)
-# define __get_tls() \
- ({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
-#elif defined(__i386__)
-# define __get_tls() \
- ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
-#elif defined(__x86_64__)
-# define __get_tls() \
- ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
-#else
-#error "Unsupported architecture."
-#endif
+# define __get_tls() \
+ ({ \
+ register void **__v asm("v1"); \
+ __asm__( \
+ ".set push\n" \
+ ".set mips32r2\n" \
+ "rdhwr %0,$29\n" \
+ ".set pop\n" \
+ : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__riscv)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mv %0, tp" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__i386__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("movl %%gs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__x86_64__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mov %%fs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+# else
+# error "Unsupported architecture."
+# endif
// The Android Bionic team has allocated a TLS slot for sanitizers starting
// with Q, given that Android currently doesn't support ELF TLS. It is used to
@@ -175,7 +199,7 @@ ALWAYS_INLINE uptr *get_android_tls_ptr() {
return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 42013f471870..8e942b69e6a7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -16,87 +16,89 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_freebsd.h"
-#include "sanitizer_getauxval.h"
-#include "sanitizer_glibc_version.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_solaris.h"
-
-#if SANITIZER_NETBSD
-#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
-#endif
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_atomic.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_freebsd.h"
+# include "sanitizer_getauxval.h"
+# include "sanitizer_glibc_version.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_solaris.h"
+
+# if SANITIZER_NETBSD
+# define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+# endif
-#include <dlfcn.h> // for dlsym()
-#include <link.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <syslog.h>
+# include <dlfcn.h> // for dlsym()
+# include <link.h>
+# include <pthread.h>
+# include <signal.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <syslog.h>
-#if !defined(ElfW)
-#define ElfW(type) Elf_##type
-#endif
+# if !defined(ElfW)
+# define ElfW(type) Elf_##type
+# endif
-#if SANITIZER_FREEBSD
-#include <pthread_np.h>
-#include <osreldate.h>
-#include <sys/sysctl.h>
-#define pthread_getattr_np pthread_attr_get_np
+# if SANITIZER_FREEBSD
+# include <osreldate.h>
+# include <pthread_np.h>
+# include <sys/auxv.h>
+# include <sys/sysctl.h>
+# define pthread_getattr_np pthread_attr_get_np
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
-#undef MAP_NORESERVE
-#define MAP_NORESERVE 0
-#endif
+# undef MAP_NORESERVE
+# define MAP_NORESERVE 0
+extern const Elf_Auxinfo *__elf_aux_vector;
+# endif
-#if SANITIZER_NETBSD
-#include <sys/sysctl.h>
-#include <sys/tls.h>
-#include <lwp.h>
-#endif
+# if SANITIZER_NETBSD
+# include <lwp.h>
+# include <sys/sysctl.h>
+# include <sys/tls.h>
+# endif
-#if SANITIZER_SOLARIS
-#include <stddef.h>
-#include <stdlib.h>
-#include <thread.h>
-#endif
+# if SANITIZER_SOLARIS
+# include <stddef.h>
+# include <stdlib.h>
+# include <thread.h>
+# endif
-#if SANITIZER_ANDROID
-#include <android/api-level.h>
-#if !defined(CPU_COUNT) && !defined(__aarch64__)
-#include <dirent.h>
-#include <fcntl.h>
+# if SANITIZER_ANDROID
+# include <android/api-level.h>
+# if !defined(CPU_COUNT) && !defined(__aarch64__)
+# include <dirent.h>
+# include <fcntl.h>
struct __sanitizer::linux_dirent {
- long d_ino;
- off_t d_off;
+ long d_ino;
+ off_t d_off;
unsigned short d_reclen;
- char d_name[];
+ char d_name[];
};
-#endif
-#endif
+# endif
+# endif
-#if !SANITIZER_ANDROID
-#include <elf.h>
-#include <unistd.h>
-#endif
+# if !SANITIZER_ANDROID
+# include <elf.h>
+# include <unistd.h>
+# endif
namespace __sanitizer {
-SANITIZER_WEAK_ATTRIBUTE int
-real_sigaction(int signum, const void *act, void *oldact);
+SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act,
+ void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
-#if !SANITIZER_GO
+# if !SANITIZER_GO
if (&real_sigaction)
return real_sigaction(signum, act, oldact);
-#endif
+# endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
@@ -111,7 +113,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error()) {
*stack_top = *stack_bottom = 0;
return;
@@ -119,7 +121,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
MemoryMappedSegment segment;
uptr prev_end = 0;
while (proc_maps.Next(&segment)) {
- if ((uptr)&rl < segment.end) break;
+ if ((uptr)&rl < segment.end)
+ break;
prev_end = segment.end;
}
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
@@ -127,7 +130,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
- if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
+ if (stacksize > segment.end - prev_end)
+ stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
@@ -139,39 +143,39 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
}
uptr stacksize = 0;
void *stackaddr = nullptr;
-#if SANITIZER_SOLARIS
+# if SANITIZER_SOLARIS
stack_t ss;
CHECK_EQ(thr_stksegment(&ss), 0);
stacksize = ss.ss_size;
stackaddr = (char *)ss.ss_sp - stacksize;
-#else // !SANITIZER_SOLARIS
+# else // !SANITIZER_SOLARIS
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
-#endif // SANITIZER_SOLARIS
+# endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (!f)
return false;
- typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
+ typedef int (*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
-#endif
+# endif
__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
int *patch) {
-#ifdef _CS_GNU_LIBC_VERSION
+# ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len >= sizeof(buf))
@@ -185,9 +189,9 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
*minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
*patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
return true;
-#else
+# else
return false;
-#endif
+# endif
}
// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
@@ -198,42 +202,42 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
__attribute__((unused)) static int g_use_dlpi_tls_data;
-#if SANITIZER_GLIBC && !SANITIZER_GO
+# if SANITIZER_GLIBC && !SANITIZER_GO
__attribute__((unused)) static size_t g_tls_size;
void InitTlsSize() {
int major, minor, patch;
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \
- defined(__loongarch__)
+# if defined(__aarch64__) || defined(__x86_64__) || \
+ defined(__powerpc64__) || defined(__loongarch__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
-#endif
+# endif
}
-#else
-void InitTlsSize() { }
-#endif // SANITIZER_GLIBC && !SANITIZER_GO
+# else
+void InitTlsSize() {}
+# endif // SANITIZER_GLIBC && !SANITIZER_GO
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
// to get the pointer to thread-specific data keys in the thread control block.
-#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
- !SANITIZER_ANDROID && !SANITIZER_GO
+# if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
+ !SANITIZER_ANDROID && !SANITIZER_GO
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
static uptr ThreadDescriptorSizeFallback() {
uptr val = 0;
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
+# if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
int patch;
if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
- val = 1728; // Assume only one particular version for x32.
+ val = 1728; // Assume only one particular version for x32.
// For ARM sizeof(struct pthread) changed in Glibc 2.23.
else if (SANITIZER_ARM)
val = minor <= 22 ? 1120 : 1216;
@@ -256,19 +260,19 @@ static uptr ThreadDescriptorSizeFallback() {
else // minor == 32
val = FIRST_32_SECOND_64(1344, 2496);
}
-#elif defined(__s390__) || defined(__sparc__)
+# elif defined(__s390__) || defined(__sparc__)
// The size of a prefix of TCB including pthread::{specific_1stblock,specific}
// suffices. Just return offsetof(struct pthread, specific_used), which hasn't
// changed since 2007-05. Technically this applies to i386/x86_64 as well but
// we call _dl_get_tls_static_info and need the precise size of struct
// pthread.
return FIRST_32_SECOND_64(524, 1552);
-#elif defined(__mips__)
+# elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
-#elif SANITIZER_LOONGARCH64
- val = 1856; // from glibc 2.36
-#elif SANITIZER_RISCV64
+# elif SANITIZER_LOONGARCH64
+ val = 1856; // from glibc 2.36
+# elif SANITIZER_RISCV64
int major;
int minor;
int patch;
@@ -283,12 +287,12 @@ static uptr ThreadDescriptorSizeFallback() {
val = 1936; // tested against glibc 2.32
}
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
-#elif defined(__powerpc64__)
- val = 1776; // from glibc.ppc64le 2.20-8.fc21
-#endif
+# elif defined(__powerpc64__)
+ val = 1776; // from glibc.ppc64le 2.20-8.fc21
+# endif
return val;
}
@@ -307,26 +311,26 @@ uptr ThreadDescriptorSize() {
return val;
}
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
- SANITIZER_LOONGARCH64
+# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
+ SANITIZER_LOONGARCH64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
-#if defined(__mips__)
- const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-#elif defined(__powerpc64__)
- const uptr kTcbHead = 88; // sizeof (tcbhead_t)
-#elif SANITIZER_RISCV64
+# if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-#elif SANITIZER_LOONGARCH64
+# elif defined(__powerpc64__)
+ const uptr kTcbHead = 88; // sizeof (tcbhead_t)
+# elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-#endif
+# elif SANITIZER_LOONGARCH64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
-#endif
+# endif
namespace {
struct TlsBlock {
@@ -336,7 +340,7 @@ struct TlsBlock {
};
} // namespace
-#ifdef __s390__
+# ifdef __s390__
extern "C" uptr __tls_get_offset(void *arg);
static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
@@ -354,16 +358,16 @@ static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
: "memory", "cc", "0", "1", "3", "4", "5", "14");
return r2;
}
-#else
+# else
extern "C" void *__tls_get_addr(size_t *);
-#endif
+# endif
static size_t main_tls_modid;
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
size_t tls_modid;
-#if SANITIZER_SOLARIS
+# if SANITIZER_SOLARIS
// dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
// dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
// 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
@@ -376,27 +380,26 @@ static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
Rt_map *map;
dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
tls_modid = map->rt_tlsmodid;
-#else
+# else
main_tls_modid = 1;
tls_modid = info->dlpi_tls_modid;
-#endif
+# endif
if (tls_modid < main_tls_modid)
return 0;
uptr begin;
-#if !SANITIZER_SOLARIS
+# if !SANITIZER_SOLARIS
begin = (uptr)info->dlpi_tls_data;
-#endif
+# endif
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
-#ifdef __s390__
- begin = (uptr)__builtin_thread_pointer() +
- TlsGetOffset(tls_modid, 0);
-#else
+# ifdef __s390__
+ begin = (uptr)__builtin_thread_pointer() + TlsGetOffset(tls_modid, 0);
+# else
size_t mod_and_off[2] = {tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
-#endif
+# endif
}
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
if (info->dlpi_phdr[i].p_type == PT_TLS) {
@@ -439,23 +442,21 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
}
-#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
- // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
+# endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
+ // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
-#if SANITIZER_NETBSD
-static struct tls_tcb * ThreadSelfTlsTcb() {
+# if SANITIZER_NETBSD
+static struct tls_tcb *ThreadSelfTlsTcb() {
struct tls_tcb *tcb = nullptr;
-#ifdef __HAVE___LWP_GETTCB_FAST
+# ifdef __HAVE___LWP_GETTCB_FAST
tcb = (struct tls_tcb *)__lwp_gettcb_fast();
-#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
+# elif defined(__HAVE___LWP_GETPRIVATE_FAST)
tcb = (struct tls_tcb *)__lwp_getprivate_fast();
-#endif
+# endif
return tcb;
}
-uptr ThreadSelf() {
- return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
-}
+uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; }
int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
const Elf_Phdr *hdr = info->dlpi_phdr;
@@ -463,23 +464,23 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
for (; hdr != last_hdr; ++hdr) {
if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
- *(uptr*)data = hdr->p_memsz;
+ *(uptr *)data = hdr->p_memsz;
break;
}
}
return 0;
}
-#endif // SANITIZER_NETBSD
+# endif // SANITIZER_NETBSD
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
// Bionic provides this API since S.
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,
void **);
-#endif
+# endif
-#if !SANITIZER_GO
+# if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds) {
void *start_addr;
void *end_addr;
@@ -491,48 +492,48 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0;
*size = 0;
}
-#elif SANITIZER_GLIBC && defined(__x86_64__)
+# elif SANITIZER_GLIBC && defined(__x86_64__)
// For aarch64 and x86-64, use an O(1) approach which requires relatively
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
-# if SANITIZER_X32
+# if SANITIZER_X32
asm("mov %%fs:8,%0" : "=r"(*addr));
-# else
+# else
asm("mov %%fs:16,%0" : "=r"(*addr));
-# endif
+# endif
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
-#elif SANITIZER_GLIBC && defined(__aarch64__)
+# elif SANITIZER_GLIBC && defined(__aarch64__)
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
*size = g_tls_size + ThreadDescriptorSize();
-#elif SANITIZER_GLIBC && defined(__loongarch__)
-# ifdef __clang__
+# elif SANITIZER_GLIBC && defined(__loongarch__)
+# ifdef __clang__
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
-# else
+# else
asm("or %0,$tp,$zero" : "=r"(*addr));
*addr -= ThreadDescriptorSize();
-# endif
+# endif
*size = g_tls_size + ThreadDescriptorSize();
-#elif SANITIZER_GLIBC && defined(__powerpc64__)
+# elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
asm("addi %0,13,-0x7000" : "=r"(tp));
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
-#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
+# elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
uptr align;
GetStaticTlsBoundary(addr, size, &align);
-#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
- defined(__sparc__)
+# if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
+ defined(__sparc__)
if (SANITIZER_GLIBC) {
-#if defined(__x86_64__) || defined(__i386__)
+# if defined(__x86_64__) || defined(__i386__)
align = Max<uptr>(align, 64);
-#else
+# else
align = Max<uptr>(align, 16);
-#endif
+# endif
}
const uptr tp = RoundUpTo(*addr + *size, align);
@@ -551,26 +552,26 @@ static void GetTls(uptr *addr, uptr *size) {
// because the number of bytes after pthread::specific is larger.
*addr = tp - RoundUpTo(*size, align);
*size = tp - *addr + ThreadDescriptorSize();
-#else
+# else
if (SANITIZER_GLIBC)
*size += 1664;
else if (SANITIZER_FREEBSD)
*size += 128; // RTLD_STATIC_TLS_EXTRA
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
const uptr pre_tcb_size = TlsPreTcbSize();
*addr -= pre_tcb_size;
*size += pre_tcb_size;
-#else
+# else
// arm and aarch64 reserve two words at TP, so this underestimates the range.
// However, this is sufficient for the purpose of finding the pointers to
// thread-specific data keys.
const uptr tcb_size = ThreadDescriptorSize();
*addr -= tcb_size;
*size += tcb_size;
-#endif
-#endif
-#elif SANITIZER_NETBSD
- struct tls_tcb * const tcb = ThreadSelfTlsTcb();
+# endif
+# endif
+# elif SANITIZER_NETBSD
+ struct tls_tcb *const tcb = ThreadSelfTlsTcb();
*addr = 0;
*size = 0;
if (tcb != 0) {
@@ -583,31 +584,31 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
-#else
-#error "Unknown OS"
-#endif
+# else
+# error "Unknown OS"
+# endif
}
-#endif
+# endif
-#if !SANITIZER_GO
+# if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#else
+# else
return 0;
-#endif
+# endif
}
-#endif
+# endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#if SANITIZER_GO
+# if SANITIZER_GO
// Stub implementation for Go.
*stk_addr = *stk_size = *tls_addr = *tls_size = 0;
-#else
+# else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
@@ -623,16 +624,16 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*stk_size = *tls_addr - *stk_addr;
}
}
-#endif
+# endif
}
-#if !SANITIZER_FREEBSD
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
-#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
-#define Elf_Phdr XElf32_Phdr
-#define dl_phdr_info xdl_phdr_info
-#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
-#endif // !SANITIZER_FREEBSD
+# elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+# define Elf_Phdr XElf32_Phdr
+# define dl_phdr_info xdl_phdr_info
+# define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
+# endif // !SANITIZER_FREEBSD
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
@@ -652,8 +653,7 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W;
- cur_module.addAddressRange(cur_beg, cur_end, executable,
- writable);
+ cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
} else if (phdr->p_type == PT_NOTE) {
# ifdef NT_GNU_BUILD_ID
uptr off = 0;
@@ -698,33 +698,30 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
return AddModuleSegments(module_name.data(), info, data->modules);
}
- if (info->dlpi_name) {
- InternalScopedString module_name;
- module_name.append("%s", info->dlpi_name);
- return AddModuleSegments(module_name.data(), info, data->modules);
- }
+ if (info->dlpi_name)
+ return AddModuleSegments(info->dlpi_name, info, data->modules);
return 0;
}
-#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+# if SANITIZER_ANDROID && __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
-#endif
+# endif
static bool requiresProcmaps() {
-#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
+# if SANITIZER_ANDROID && __ANDROID_API__ <= 22
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
// The runtime check allows the same library to work with
// both K and L (and future) Android releases.
return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
-#else
+# else
return false;
-#endif
+# endif
}
static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
- MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
+ MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
memory_mapping.DumpListOfModules(modules);
}
@@ -776,22 +773,19 @@ uptr GetRSS() {
// We need the second number which is RSS in pages.
char *pos = buf;
// Skip the first number.
- while (*pos >= '0' && *pos <= '9')
- pos++;
+ while (*pos >= '0' && *pos <= '9') pos++;
// Skip whitespaces.
- while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
- pos++;
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++;
// Read the number.
uptr rss = 0;
- while (*pos >= '0' && *pos <= '9')
- rss = rss * 10 + *pos++ - '0';
+ while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached();
}
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
// they allocate memory.
u32 GetNumberOfCPUs() {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
u32 ncpu;
int req[2];
uptr len = sizeof(ncpu);
@@ -799,7 +793,7 @@ u32 GetNumberOfCPUs() {
req[1] = HW_NCPU;
CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
return ncpu;
-#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
+# elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
// Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
// exist in sched.h. That is the case for toolchains generated with older
// NDKs.
@@ -827,26 +821,26 @@ u32 GetNumberOfCPUs() {
break;
if (entry->d_ino != 0 && *d_type == DT_DIR) {
if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
- entry->d_name[2] == 'u' &&
- entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
+ entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
+ entry->d_name[3] <= '9')
n_cpus++;
}
entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
}
internal_close(fd);
return n_cpus;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
-#else
+# else
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-#endif
+# endif
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
@@ -858,13 +852,15 @@ static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire);
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int async_safe_write_log(int pri, const char* tag, const char* msg);
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int __android_log_write(int prio, const char* tag, const char* msg);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri,
+ const char *tag,
+ const char *msg);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio,
+ const char *tag,
+ const char *msg);
// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
-#define SANITIZER_ANDROID_LOG_INFO 4
+# define SANITIZER_ANDROID_LOG_INFO 4
// async_safe_write_log is a new public version of __libc_write_log that is
// used behind syslog. It is preferable to syslog as it will not do any dynamic
@@ -883,14 +879,14 @@ void WriteOneLineToSyslog(const char *s) {
}
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-void android_set_abort_message(const char *);
+extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message(
+ const char *);
void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
-#else
+# else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
@@ -898,16 +894,16 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
WriteToSyslog(str);
}
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_GLIBC && !SANITIZER_GO
+# if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
@@ -918,8 +914,8 @@ inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
// intercepted, so define it weakly and use it if available.
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int real_clock_gettime(u32 clk_id, void *tp);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id,
+ void *tp);
u64 MonotonicNanoTime() {
timespec ts;
if (CanUseVDSO()) {
@@ -932,19 +928,26 @@ u64 MonotonicNanoTime() {
}
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#else
+# else
// Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() {
timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#endif // SANITIZER_GLIBC && !SANITIZER_GO
+# endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() {
const char *pathname = "/proc/self/exe";
-#if SANITIZER_NETBSD
+# if SANITIZER_FREEBSD
+ for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) {
+ if (aux->a_type == AT_EXECPATH) {
+ pathname = static_cast<const char *>(aux->a_un.a_ptr);
+ break;
+ }
+ }
+# elif SANITIZER_NETBSD
static const int name[] = {
CTL_KERN,
KERN_PROC_ARGS,
@@ -957,14 +960,14 @@ void ReExec() {
len = sizeof(path);
if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
pathname = path;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
pathname = getexecname();
CHECK_NE(pathname, NULL);
-#elif SANITIZER_USE_GETAUXVAL
+# elif SANITIZER_USE_GETAUXVAL
// Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
// rely on that will fail to load shared libraries. Query AT_EXECFN instead.
pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
-#endif
+# endif
uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
int rverrno;
@@ -1016,14 +1019,14 @@ static uptr MmapSharedNoReserve(uptr addr, uptr size) {
static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
uptr alias_size) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
reinterpret_cast<void *>(alias_addr));
-#else
+# else
CHECK(false && "mremap is not supported outside of Linux");
return 0;
-#endif
+# endif
}
static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
@@ -1068,12 +1071,12 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
}
void InitializePlatformCommonFlags(CommonFlags *cf) {
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr)
cf->detect_leaks = false;
-#endif
+# endif
}
-} // namespace __sanitizer
+} // namespace __sanitizer
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
index 74db831b0aad..8523b540f2e5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
@@ -15,14 +15,14 @@
#if SANITIZER_LINUX && SANITIZER_S390
-#include <dlfcn.h>
-#include <errno.h>
-#include <sys/syscall.h>
-#include <sys/utsname.h>
-#include <unistd.h>
+# include <dlfcn.h>
+# include <errno.h>
+# include <sys/syscall.h>
+# include <sys/utsname.h>
+# include <unistd.h>
-#include "sanitizer_libc.h"
-#include "sanitizer_linux.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_linux.h"
namespace __sanitizer {
@@ -37,22 +37,19 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
unsigned long fd;
unsigned long offset;
} params = {
- (unsigned long)addr,
- (unsigned long)length,
- (unsigned long)prot,
- (unsigned long)flags,
- (unsigned long)fd,
-# ifdef __s390x__
- (unsigned long)offset,
-# else
+ (unsigned long)addr, (unsigned long)length, (unsigned long)prot,
+ (unsigned long)flags, (unsigned long)fd,
+# ifdef __s390x__
+ (unsigned long)offset,
+# else
(unsigned long)(offset / 4096),
-# endif
+# endif
};
-# ifdef __s390x__
+# ifdef __s390x__
return syscall(__NR_mmap, &params);
-# else
+# else
return syscall(__NR_mmap2, &params);
-# endif
+# endif
}
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
@@ -63,58 +60,54 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
}
CHECK_EQ(0, (uptr)child_stack % 16);
// Minimum frame size.
-#ifdef __s390x__
+# ifdef __s390x__
child_stack = (char *)child_stack - 160;
-#else
+# else
child_stack = (char *)child_stack - 96;
-#endif
+# endif
// Terminate unwind chain.
((unsigned long *)child_stack)[0] = 0;
// And pass parameters.
((unsigned long *)child_stack)[1] = (uptr)fn;
((unsigned long *)child_stack)[2] = (uptr)arg;
register uptr res __asm__("r2");
- register void *__cstack __asm__("r2") = child_stack;
- register long __flags __asm__("r3") = flags;
- register int * __ptidptr __asm__("r4") = parent_tidptr;
- register int * __ctidptr __asm__("r5") = child_tidptr;
- register void * __newtls __asm__("r6") = newtls;
+ register void *__cstack __asm__("r2") = child_stack;
+ register long __flags __asm__("r3") = flags;
+ register int *__ptidptr __asm__("r4") = parent_tidptr;
+ register int *__ctidptr __asm__("r5") = child_tidptr;
+ register void *__newtls __asm__("r6") = newtls;
__asm__ __volatile__(
- /* Clone. */
- "svc %1\n"
-
- /* if (%r2 != 0)
- * return;
- */
-#ifdef __s390x__
- "cghi %%r2, 0\n"
-#else
- "chi %%r2, 0\n"
-#endif
- "jne 1f\n"
-
- /* Call "fn(arg)". */
-#ifdef __s390x__
- "lmg %%r1, %%r2, 8(%%r15)\n"
-#else
- "lm %%r1, %%r2, 4(%%r15)\n"
-#endif
- "basr %%r14, %%r1\n"
-
- /* Call _exit(%r2). */
- "svc %2\n"
-
- /* Return to parent. */
- "1:\n"
- : "=r" (res)
- : "i"(__NR_clone), "i"(__NR_exit),
- "r"(__cstack),
- "r"(__flags),
- "r"(__ptidptr),
- "r"(__ctidptr),
- "r"(__newtls)
- : "memory", "cc");
+ /* Clone. */
+ "svc %1\n"
+
+ /* if (%r2 != 0)
+ * return;
+ */
+# ifdef __s390x__
+ "cghi %%r2, 0\n"
+# else
+ "chi %%r2, 0\n"
+# endif
+ "jne 1f\n"
+
+ /* Call "fn(arg)". */
+# ifdef __s390x__
+ "lmg %%r1, %%r2, 8(%%r15)\n"
+# else
+ "lm %%r1, %%r2, 4(%%r15)\n"
+# endif
+ "basr %%r14, %%r1\n"
+
+ /* Call _exit(%r2). */
+ "svc %2\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r"(res)
+ : "i"(__NR_clone), "i"(__NR_exit), "r"(__cstack), "r"(__flags),
+ "r"(__ptidptr), "r"(__ctidptr), "r"(__newtls)
+ : "memory", "cc");
if (res >= (uptr)-4095) {
errno = -res;
return -1;
@@ -122,7 +115,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
return res;
}
-#if SANITIZER_S390_64
+# if SANITIZER_S390_64
static bool FixedCVE_2016_2143() {
// Try to determine if the running kernel has a fix for CVE-2016-2143,
// return false if in doubt (better safe than sorry). Distros may want to
@@ -137,20 +130,20 @@ static bool FixedCVE_2016_2143() {
// At least first 2 should be matched.
if (ptr[0] != '.')
return false;
- minor = internal_simple_strtoll(ptr+1, &ptr, 10);
+ minor = internal_simple_strtoll(ptr + 1, &ptr, 10);
// Third is optional.
if (ptr[0] == '.')
- patch = internal_simple_strtoll(ptr+1, &ptr, 10);
+ patch = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (major < 3) {
if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
internal_strstr(ptr, ".el6")) {
// Check RHEL6
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 657) // 2.6.32-657.el6 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 657) // 2.6.32-657.el6 or later
return true;
if (r1 == 642 && ptr[0] == '.') {
- int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
+ int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
return true;
}
}
@@ -166,12 +159,12 @@ static bool FixedCVE_2016_2143() {
if (minor == 10 && patch == 0 && ptr[0] == '-' &&
internal_strstr(ptr, ".el7")) {
// Check RHEL7
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 426) // 3.10.0-426.el7 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 426) // 3.10.0-426.el7 or later
return true;
if (r1 == 327 && ptr[0] == '.') {
- int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
+ int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
return true;
}
}
@@ -187,8 +180,8 @@ static bool FixedCVE_2016_2143() {
if (minor == 4 && patch == 0 && ptr[0] == '-' &&
internal_strstr(buf.version, "Ubuntu")) {
// Check Ubuntu 16.04
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 13) // 4.4.0-13 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 13) // 4.4.0-13 or later
return true;
}
// Otherwise, OK if 4.5+.
@@ -211,18 +204,19 @@ void AvoidCVE_2016_2143() {
if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
return;
Report(
- "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
- "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
- "machine, or worse.\n"
- "\n"
- "If you are certain your kernel is not vulnerable (you have compiled it\n"
- "yourself, or are using an unrecognized distribution kernel), you can\n"
- "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
- "with any value.\n");
+ "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using "
+ "ASan,\n"
+ "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
+ "machine, or worse.\n"
+ "\n"
+ "If you are certain your kernel is not vulnerable (you have compiled it\n"
+ "yourself, or are using an unrecognized distribution kernel), you can\n"
+ "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
+ "with any value.\n");
Die();
}
-#endif
+# endif
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_LINUX && SANITIZER_S390
+#endif // SANITIZER_LINUX && SANITIZER_S390
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h b/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
index 4e58c02df835..1c07e68e55a7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
@@ -31,6 +31,10 @@ struct __sanitizer_struct_mallinfo {
int v[10];
};
+struct __sanitizer_struct_mallinfo2 {
+ uptr v[10];
+};
+
#endif
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc b/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
index fe76b3f8aa05..6343eb284afb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -123,7 +123,7 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER();
InternalScopedString new_name;
if (name && zone->introspect == sanitizer_zone.introspect) {
- new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
+ new_name.AppendF(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index 764996e57355..596528155505 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -260,6 +260,17 @@
# define SANITIZER_ARM64 0
#endif
+#if SANITIZER_WINDOWS64 && SANITIZER_ARM64
+# define SANITIZER_WINDOWS_ARM64 1
+# define SANITIZER_WINDOWS_x64 0
+#elif SANITIZER_WINDOWS64 && !SANITIZER_ARM64
+# define SANITIZER_WINDOWS_ARM64 0
+# define SANITIZER_WINDOWS_x64 1
+#else
+# define SANITIZER_WINDOWS_ARM64 0
+# define SANITIZER_WINDOWS_x64 0
+#endif
+
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
# define SANITIZER_SOLARIS32 1
#else
@@ -284,7 +295,8 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if SANITIZER_RISCV64 || SANITIZER_IOS
+# if (SANITIZER_RISCV64 && !SANITIZER_FUCHSIA) || SANITIZER_IOS || \
+ SANITIZER_DRIVERKIT
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# elif defined(__mips64) || defined(__hexagon__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
@@ -303,7 +315,15 @@
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
# endif
#elif SANITIZER_RISCV64
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
+// FIXME: Rather than hardcoding the VMA here, we should rely on
+// GetMaxUserVirtualAddress(). This will require some refactoring though since
+// many places either hardcode some value or SANITIZER_MMAP_RANGE_SIZE is
+// assumed to be some constant integer.
+# if SANITIZER_FUCHSIA
+# define SANITIZER_MMAP_RANGE_SIZE (1ULL << 38)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# endif
#elif defined(__aarch64__)
# if SANITIZER_APPLE
# if SANITIZER_OSX || SANITIZER_IOSSIM
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index c740778b6228..8c7c00de6d12 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -575,12 +575,12 @@
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETRANDOM \
- ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
#define SANITIZER_INTERCEPT_GETENTROPY \
- ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
+++ /dev/null
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
+++ /dev/null
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
index 3a9e366d2df9..62c1cf4abe42 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
@@ -337,7 +337,14 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
-void InternalScopedString::append(const char *format, ...) {
+void InternalScopedString::Append(const char *str) {
+ uptr prev_len = length();
+ uptr str_len = internal_strlen(str);
+ buffer_.resize(prev_len + str_len + 1);
+ internal_memcpy(buffer_.data() + prev_len, str, str_len + 1);
+}
+
+void InternalScopedString::AppendF(const char *format, ...) {
uptr prev_len = length();
while (true) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h b/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
index 6649ff5844f5..d24b179ef320 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
@@ -11,16 +11,19 @@
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COMMON_NO_REDEFINE_BUILTINS
-#ifndef SANITIZER_REDEFINE_BUILTINS_H
-#define SANITIZER_REDEFINE_BUILTINS_H
+# ifndef SANITIZER_REDEFINE_BUILTINS_H
+# define SANITIZER_REDEFINE_BUILTINS_H
// The asm hack only works with GCC and Clang.
-#if !defined(_WIN32)
+# if !defined(_WIN32)
asm("memcpy = __sanitizer_internal_memcpy");
asm("memmove = __sanitizer_internal_memmove");
asm("memset = __sanitizer_internal_memset");
+# if defined(__cplusplus) && \
+ !defined(SANITIZER_COMMON_REDEFINE_BUILTINS_IN_STD)
+
// The builtins should not be redefined in source files that make use of C++
// standard libraries, in particular where C++STL headers with inline functions
// are used. The redefinition in such cases would lead to ODR violations.
@@ -46,7 +49,8 @@ using unordered_set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
} // namespace std
-#endif // !_WIN32
+# endif // __cpluplus
+# endif // !_WIN32
-#endif // SANITIZER_REDEFINE_BUILTINS_H
-#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+# endif // SANITIZER_REDEFINE_BUILTINS_H
+#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
index f22e40cac284..6222a958b116 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
@@ -47,7 +47,9 @@ class RingBuffer {
void push(T t) {
*next_ = t;
next_--;
- // The condition below works only if sizeof(T) is divisible by sizeof(T*).
+ static_assert((sizeof(T) % sizeof(T *)) == 0,
+ "The condition below works only if sizeof(T) is divisible by "
+ "sizeof(T*).");
if (next_ <= reinterpret_cast<T*>(&next_))
next_ = last_;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
index 47983ee7ec71..9a4c80fcfdd1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -29,7 +29,8 @@ class StackTraceTextPrinter {
frame_delimiter_(frame_delimiter),
output_(output),
dedup_token_(dedup_token),
- symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
+ symbolize_(StackTracePrinter::GetOrInit()->RenderNeedsSymbolization(
+ stack_trace_fmt)) {}
bool ProcessAddressFrames(uptr pc) {
SymbolizedStack *frames = symbolize_
@@ -40,13 +41,13 @@ class StackTraceTextPrinter {
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
uptr prev_len = output_->length();
- RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
- symbolize_ ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ output_, stack_trace_fmt_, frame_num_++, cur->info.address,
+ symbolize_ ? &cur->info : nullptr, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
if (prev_len != output_->length())
- output_->append("%c", frame_delimiter_);
+ output_->AppendF("%c", frame_delimiter_);
ExtendDedupToken(cur);
}
@@ -62,9 +63,9 @@ class StackTraceTextPrinter {
if (dedup_frames_-- > 0) {
if (dedup_token_->length())
- dedup_token_->append("--");
- if (stack->info.function != nullptr)
- dedup_token_->append("%s", stack->info.function);
+ dedup_token_->AppendF("--");
+ if (stack->info.function)
+ dedup_token_->Append(stack->info.function);
}
}
@@ -98,7 +99,7 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
output, &dedup_token);
if (trace == nullptr || size == 0) {
- output->append(" <empty stack>\n\n");
+ output->AppendF(" <empty stack>\n\n");
return;
}
@@ -110,11 +111,11 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
}
// Always add a trailing empty line after stack trace.
- output->append("\n");
+ output->AppendF("\n");
// Append deduplication token, if non-empty.
if (dedup_token.length())
- output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
+ output->AppendF("DEDUP_TOKEN: %s\n", dedup_token.data());
}
uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
@@ -197,7 +198,7 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
if (!printer.ProcessAddressFrames(pc)) {
output.clear();
- output.append("<can't symbolize>");
+ output.AppendF("<can't symbolize>");
}
CopyStringToBuffer(output, out_buf, out_buf_size);
}
@@ -210,7 +211,8 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
InternalScopedString data_desc;
- RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderData(&data_desc, fmt, &DI,
+ common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
index 45c480d225c7..748d832ccc21 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -12,13 +12,28 @@
#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_fuchsia.h"
+#include "sanitizer_symbolizer_markup.h"
namespace __sanitizer {
-const char *StripFunctionName(const char *function) {
+StackTracePrinter *StackTracePrinter::GetOrInit() {
+ static StackTracePrinter *stacktrace_printer;
+ static StaticSpinMutex init_mu;
+ SpinMutexLock l(&init_mu);
+ if (stacktrace_printer)
+ return stacktrace_printer;
+
+ stacktrace_printer = StackTracePrinter::NewStackTracePrinter();
+
+ CHECK(stacktrace_printer);
+ return stacktrace_printer;
+}
+
+const char *StackTracePrinter::StripFunctionName(const char *function) {
if (!common_flags()->demangle)
return function;
if (!function)
@@ -47,6 +62,13 @@ const char *StripFunctionName(const char *function) {
// sanitizer_symbolizer_markup.cpp implements these differently.
#if !SANITIZER_SYMBOLIZER_MARKUP
+StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
+ if (common_flags()->enable_symbolizer_markup)
+ return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
+
+ return new (GetGlobalLowLevelAllocator()) FormattedStackTracePrinter();
+}
+
static const char *DemangleFunctionName(const char *function) {
if (!common_flags()->demangle)
return function;
@@ -130,20 +152,23 @@ static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,
InternalScopedString *buffer) {
if (info.uuid_size) {
if (PrefixSpace)
- buffer->append(" ");
- buffer->append("(BuildId: ");
+ buffer->AppendF(" ");
+ buffer->AppendF("(BuildId: ");
for (uptr i = 0; i < info.uuid_size; ++i) {
- buffer->append("%02x", info.uuid[i]);
+ buffer->AppendF("%02x", info.uuid[i]);
}
- buffer->append(")");
+ buffer->AppendF(")");
}
}
static const char kDefaultFormat[] = " #%n %p %F %L";
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix) {
+void FormattedStackTracePrinter::RenderFrame(InternalScopedString *buffer,
+ const char *format, int frame_no,
+ uptr address,
+ const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
@@ -154,56 +179,56 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
- buffer->append("%c", *p);
+ buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
- buffer->append("%%");
+ buffer->Append("%");
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
- buffer->append("%u", frame_no);
+ buffer->AppendF("%u", frame_no);
break;
case 'p':
- buffer->append("0x%zx", address);
+ buffer->AppendF("0x%zx", address);
break;
case 'm':
- buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
- buffer->append("0x%zx", info->module_offset);
+ buffer->AppendF("0x%zx", info->module_offset);
break;
case 'b':
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
break;
case 'f':
- buffer->append("%s",
- DemangleFunctionName(StripFunctionName(info->function)));
+ buffer->AppendF("%s",
+ DemangleFunctionName(StripFunctionName(info->function)));
break;
case 'q':
- buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
- ? info->function_offset
- : 0x0);
+ buffer->AppendF("0x%zx", info->function_offset != AddressInfo::kUnknown
+ ? info->function_offset
+ : 0x0);
break;
case 's':
- buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", info->line);
+ buffer->AppendF("%d", info->line);
break;
case 'c':
- buffer->append("%d", info->column);
+ buffer->AppendF("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info->function) {
- buffer->append("in %s",
- DemangleFunctionName(StripFunctionName(info->function)));
+ buffer->AppendF(
+ "in %s", DemangleFunctionName(StripFunctionName(info->function)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
- buffer->append("+0x%zx", info->function_offset);
+ buffer->AppendF("+0x%zx", info->function_offset);
}
break;
case 'S':
@@ -224,7 +249,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
#endif
} else {
- buffer->append("(<unknown module>)");
+ buffer->AppendF("(<unknown module>)");
}
break;
case 'M':
@@ -239,7 +264,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
#endif
} else {
- buffer->append("(%p)", (void *)address);
+ buffer->AppendF("(%p)", (void *)address);
}
break;
default:
@@ -250,7 +275,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
-bool RenderNeedsSymbolization(const char *format) {
+bool FormattedStackTracePrinter::RenderNeedsSymbolization(const char *format) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
@@ -273,26 +298,28 @@ bool RenderNeedsSymbolization(const char *format) {
return false;
}
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix) {
+void FormattedStackTracePrinter::RenderData(InternalScopedString *buffer,
+ const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
- buffer->append("%c", *p);
+ buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
- buffer->append("%%");
+ buffer->Append("%");
break;
case 's':
- buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(DI->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%zu", DI->line);
+ buffer->AppendF("%zu", DI->line);
break;
case 'g':
- buffer->append("%s", DI->name);
+ buffer->AppendF("%s", DI->name);
break;
default:
Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
@@ -304,33 +331,35 @@ void RenderData(InternalScopedString *buffer, const char *format,
#endif // !SANITIZER_SYMBOLIZER_MARKUP
-void RenderSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column, bool vs_style,
- const char *strip_path_prefix) {
+void StackTracePrinter::RenderSourceLocation(InternalScopedString *buffer,
+ const char *file, int line,
+ int column, bool vs_style,
+ const char *strip_path_prefix) {
if (vs_style && line > 0) {
- buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
+ buffer->AppendF("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
if (column > 0)
- buffer->append(",%d", column);
- buffer->append(")");
+ buffer->AppendF(",%d", column);
+ buffer->AppendF(")");
return;
}
- buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(file, strip_path_prefix));
if (line > 0) {
- buffer->append(":%d", line);
+ buffer->AppendF(":%d", line);
if (column > 0)
- buffer->append(":%d", column);
+ buffer->AppendF(":%d", column);
}
}
-void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, ModuleArch arch,
- const char *strip_path_prefix) {
- buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
+void StackTracePrinter::RenderModuleLocation(InternalScopedString *buffer,
+ const char *module, uptr offset,
+ ModuleArch arch,
+ const char *strip_path_prefix) {
+ buffer->AppendF("(%s", StripPathPrefix(module, strip_path_prefix));
if (arch != kModuleArchUnknown) {
- buffer->append(":%s", ModuleArchToString(arch));
+ buffer->AppendF(":%s", ModuleArchToString(arch));
}
- buffer->append("+0x%zx)", offset);
+ buffer->AppendF("+0x%zx)", offset);
}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index bf2755a2e8f4..10361a320344 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -13,61 +13,94 @@
#define SANITIZER_STACKTRACE_PRINTER_H
#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-// Strip interceptor prefixes from function name.
-const char *StripFunctionName(const char *function);
-
-// Render the contents of "info" structure, which represents the contents of
-// stack frame "frame_no" and appends it to the "buffer". "format" is a
-// string with placeholders, which is copied to the output with
-// placeholders substituted with the contents of "info". For example,
-// format string
-// " frame %n: function %F at %S"
-// will be turned into
-// " frame 10: function foo::bar() at my/file.cc:10"
-// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
-// source files and modules.
-// Here's the full list of available placeholders:
-// %% - represents a '%' character;
-// %n - frame number (copy of frame_no);
-// %p - PC in hex format;
-// %m - path to module (binary or shared object);
-// %o - offset in the module in hex format;
-// %f - function name;
-// %q - offset in the function in hex format (*if available*);
-// %s - path to source file;
-// %l - line in the source file;
-// %c - column in the source file;
-// %F - if function is known to be <foo>, prints "in <foo>", possibly
-// followed by the offset in this function, but only if source file
-// is unknown;
-// %S - prints file/line/column information;
-// %L - prints location information: file/line/column, if it is known, or
-// module+offset if it is known, or (<unknown module>) string.
-// %M - prints module basename and offset, if it is known, or PC.
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix = "");
-
-bool RenderNeedsSymbolization(const char *format);
-
-void RenderSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column, bool vs_style,
- const char *strip_path_prefix);
-
-void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, ModuleArch arch,
- const char *strip_path_prefix);
-
-// Same as RenderFrame, but for data section (global variables).
-// Accepts %s, %l from above.
-// Also accepts:
-// %g - name of the global variable.
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix = "");
+// StacktracePrinter is an interface that is implemented by
+// classes that can perform rendering of the different parts
+// of a stacktrace.
+class StackTracePrinter {
+ public:
+ static StackTracePrinter *GetOrInit();
+
+ // Strip interceptor prefixes from function name.
+ const char *StripFunctionName(const char *function);
+
+ virtual void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix = "") = 0;
+
+ virtual bool RenderNeedsSymbolization(const char *format) = 0;
+
+ void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix);
+
+ void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix);
+ virtual void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") = 0;
+
+ private:
+ // To be called from StackTracePrinter::GetOrInit
+ static StackTracePrinter *NewStackTracePrinter();
+
+ protected:
+ ~StackTracePrinter() {}
+};
+
+class FormattedStackTracePrinter : public StackTracePrinter {
+ public:
+ // Render the contents of "info" structure, which represents the contents of
+ // stack frame "frame_no" and appends it to the "buffer". "format" is a
+ // string with placeholders, which is copied to the output with
+ // placeholders substituted with the contents of "info". For example,
+ // format string
+ // " frame %n: function %F at %S"
+ // will be turned into
+ // " frame 10: function foo::bar() at my/file.cc:10"
+ // You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+ // source files and modules.
+ // Here's the full list of available placeholders:
+ // %% - represents a '%' character;
+ // %n - frame number (copy of frame_no);
+ // %p - PC in hex format;
+ // %m - path to module (binary or shared object);
+ // %o - offset in the module in hex format;
+ // %f - function name;
+ // %q - offset in the function in hex format (*if available*);
+ // %s - path to source file;
+ // %l - line in the source file;
+ // %c - column in the source file;
+ // %F - if function is known to be <foo>, prints "in <foo>", possibly
+ // followed by the offset in this function, but only if source file
+ // is unknown;
+ // %S - prints file/line/column information;
+ // %L - prints location information: file/line/column, if it is known, or
+ // module+offset if it is known, or (<unknown module>) string.
+ // %M - prints module basename and offset, if it is known, or PC.
+ void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style, const char *strip_path_prefix = "") override;
+
+ bool RenderNeedsSymbolization(const char *format) override;
+
+ // Same as RenderFrame, but for data section (global variables).
+ // Accepts %s, %l from above.
+ // Also accepts:
+ // %g - name of the global variable.
+ void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") override;
+
+ protected:
+ ~FormattedStackTracePrinter() {}
+};
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
index 1e635a66978f..a2000798a390 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
@@ -30,13 +30,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
-#if defined(__GNUC__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
// Flush register windows to memory
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
index 13b90ce9bf51..25c4af708560 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
@@ -565,7 +565,7 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
- auto append = [&](uptr regset) {
+ auto AppendF = [&](uptr regset) {
uptr size = buffer->size();
// NT_X86_XSTATE requires 64bit alignment.
uptr size_up = RoundUpTo(size, 8 / uptr_sz);
@@ -596,11 +596,11 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
};
buffer->clear();
- bool fail = !append(NT_PRSTATUS);
+ bool fail = !AppendF(NT_PRSTATUS);
if (!fail) {
// Accept the first available and do not report errors.
for (uptr regs : kExtraRegs)
- if (regs && append(regs))
+ if (regs && AppendF(regs))
break;
}
#else
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
index d3cffaa6eeff..519f768f8969 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
@@ -10,6 +10,8 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include <errno.h>
+
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
@@ -128,7 +130,7 @@ Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
- : sym_(sym) {
+ : sym_(sym), errno_(errno) {
if (sym_->start_hook_)
sym_->start_hook_();
}
@@ -136,6 +138,7 @@ Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
Symbolizer::SymbolizerScope::~SymbolizerScope() {
if (sym_->end_hook_)
sym_->end_hook_();
+ errno = errno_;
}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
index bad4761e345f..82cd9bc22791 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -136,7 +136,7 @@ class Symbolizer final {
// Release internal caches (if any).
void Flush();
- // Attempts to demangle the provided C++ mangled name.
+ // Attempts to demangle the provided C++ mangled name. Never returns nullptr.
const char *Demangle(const char *name);
// Allow user to install hooks that would be called before/after Symbolizer
@@ -154,6 +154,8 @@ class Symbolizer final {
void InvalidateModuleList();
+ const ListOfModules &GetRefreshedListOfModules();
+
private:
// GetModuleNameAndOffsetForPC has to return a string to the caller.
// Since the corresponding module might get unloaded later, we should create
@@ -187,7 +189,7 @@ class Symbolizer final {
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
- // Platform-specific default demangler, must not return nullptr.
+ // Platform-specific default demangler, returns nullptr on failure.
const char *PlatformDemangle(const char *name);
static Symbolizer *symbolizer_;
@@ -212,6 +214,7 @@ class Symbolizer final {
~SymbolizerScope();
private:
const Symbolizer *sym_;
+ int errno_; // Backup errno in case symbolizer change the value.
};
};
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
index 3ec4d80105a2..2345aee98554 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -160,6 +160,15 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
// Used by LLVMSymbolizer and InternalSymbolizer.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
+// Parses repeated strings in the following format:
+// <function_name>
+// <var_name>
+// <file_name>:<line_number>[:<column_number>]
+// [<frame_offset>|??] [<size>|??] [<tag_offset>|??]
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeFrameOutput(const char *str,
+ InternalMmapVector<LocalInfo> *locals);
+
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_INTERNAL_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
index cc02c77bccdc..d78dab93487f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
@@ -199,7 +199,7 @@ static char *DemangleAlloc(const char *name, bool always_alloc) {
#endif
if (always_alloc)
return internal_strdup(name);
- return 0;
+ return nullptr;
}
const char *LibbacktraceSymbolizer::Demangle(const char *name) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index a6f82ced2036..74458028ae8f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -117,7 +117,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
}
- return true;
+ return false;
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
@@ -133,7 +133,7 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
return true;
}
}
- return true;
+ return false;
}
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
@@ -159,13 +159,16 @@ void Symbolizer::Flush() {
}
const char *Symbolizer::Demangle(const char *name) {
+ CHECK(name);
Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))
return demangled;
}
- return PlatformDemangle(name);
+ if (const char *demangled = PlatformDemangle(name))
+ return demangled;
+ return name;
}
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
@@ -188,6 +191,13 @@ void Symbolizer::RefreshModules() {
modules_fresh_ = true;
}
+const ListOfModules &Symbolizer::GetRefreshedListOfModules() {
+ if (!modules_fresh_)
+ RefreshModules();
+
+ return modules_;
+}
+
static const LoadedModule *SearchForModule(const ListOfModules &modules,
uptr address) {
for (uptr i = 0; i < modules.size(); i++) {
@@ -382,8 +392,8 @@ void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
str = ExtractUptr(str, "\n", &info->line);
}
-static void ParseSymbolizeFrameOutput(const char *str,
- InternalMmapVector<LocalInfo> *locals) {
+void ParseSymbolizeFrameOutput(const char *str,
+ InternalMmapVector<LocalInfo> *locals) {
if (internal_strncmp(str, "??", 2) == 0)
return;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index a9c958b2d100..f1cc0b5e1e8a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -42,7 +42,8 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
}
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
- if (!demangled) return false;
+ if (!demangled)
+ demangled = info.dli_sname;
stack->info.function = internal_strdup(demangled);
return true;
}
@@ -52,6 +53,8 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
+ if (!demangled)
+ demangled = info.dli_sname;
datainfo->name = internal_strdup(demangled);
datainfo->start = (uptr)info.dli_saddr;
return true;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index c8c10de10d03..b2a1069a9a61 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -8,143 +8,153 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
-// Implementation of offline markup symbolizer.
+// This generic support for offline symbolizing is based on the
+// Fuchsia port. We don't do any actual symbolization per se.
+// Instead, we emit text containing raw addresses and raw linkage
+// symbol names, embedded in Fuchsia's symbolization markup format.
+// See the spec at:
+// https://llvm.org/docs/SymbolizerMarkupFormat.html
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
-#if SANITIZER_SYMBOLIZER_MARKUP
-
-#if SANITIZER_FUCHSIA
-#include "sanitizer_symbolizer_fuchsia.h"
-# endif
+#include "sanitizer_symbolizer_markup.h"
-# include <limits.h>
-# include <unwind.h>
-
-# include "sanitizer_stacktrace.h"
-# include "sanitizer_symbolizer.h"
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_markup_constants.h"
namespace __sanitizer {
-// This generic support for offline symbolizing is based on the
-// Fuchsia port. We don't do any actual symbolization per se.
-// Instead, we emit text containing raw addresses and raw linkage
-// symbol names, embedded in Fuchsia's symbolization markup format.
-// Fuchsia's logging infrastructure emits enough information about
-// process memory layout that a post-processing filter can do the
-// symbolization and pretty-print the markup. See the spec at:
-// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
-
-// This is used by UBSan for type names, and by ASan for global variable names.
-// It's expected to return a static buffer that will be reused on each call.
-const char *Symbolizer::Demangle(const char *name) {
- static char buffer[kFormatDemangleMax];
- internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
- return buffer;
+void MarkupStackTracePrinter::RenderData(InternalScopedString *buffer,
+ const char *format, const DataInfo *DI,
+ const char *strip_path_prefix) {
+ RenderContext(buffer);
+ buffer->AppendF(kFormatData, DI->start);
}
-// This is used mostly for suppression matching. Making it work
-// would enable "interceptor_via_lib" suppressions. It's also used
-// once in UBSan to say "in module ..." in a message that also
-// includes an address in the module, so post-processing can already
-// pretty-print that so as to indicate the module.
-bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
- uptr *module_address) {
+bool MarkupStackTracePrinter::RenderNeedsSymbolization(const char *format) {
return false;
}
-// This is mainly used by hwasan for online symbolization. This isn't needed
-// since hwasan can always just dump stack frames for offline symbolization.
-bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
-
-// This is used in some places for suppression checking, which we
-// don't really support for Fuchsia. It's also used in UBSan to
-// identify a PC location to a function name, so we always fill in
-// the function member with a string containing markup around the PC
-// value.
-// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
-// to render stack frames, but that should be changed to use
-// RenderStackFrame.
-SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
- SymbolizedStack *s = SymbolizedStack::New(addr);
+// We don't support the stack_trace_format flag at all.
+void MarkupStackTracePrinter::RenderFrame(InternalScopedString *buffer,
+ const char *format, int frame_no,
+ uptr address, const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix) {
+ CHECK(!RenderNeedsSymbolization(format));
+ RenderContext(buffer);
+ buffer->AppendF(kFormatFrame, frame_no, address);
+}
+
+bool MarkupSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *stack) {
char buffer[kFormatFunctionMax];
internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
- s->info.function = internal_strdup(buffer);
- return s;
+ stack->info.function = internal_strdup(buffer);
+ return true;
}
-// Always claim we succeeded, so that RenderDataInfo will be called.
-bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+bool MarkupSymbolizerTool::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->start = addr;
return true;
}
-// We ignore the format argument to __sanitizer_symbolize_global.
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix) {
- buffer->append(kFormatData, DI->start);
+const char *MarkupSymbolizerTool::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
}
-bool RenderNeedsSymbolization(const char *format) { return false; }
-
-// We don't support the stack_trace_format flag at all.
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix) {
- CHECK(!RenderNeedsSymbolization(format));
- buffer->append(kFormatFrame, frame_no, address);
+// Fuchsia's implementation of symbolizer markup doesn't need to emit contextual
+// elements at this point.
+// Fuchsia's logging infrastructure emits enough information about
+// process memory layout that a post-processing filter can do the
+// symbolization and pretty-print the markup.
+#if !SANITIZER_FUCHSIA
+
+static bool ModulesEq(const LoadedModule &module,
+ const RenderedModule &renderedModule) {
+ return module.base_address() == renderedModule.base_address &&
+ internal_memcmp(module.uuid(), renderedModule.uuid,
+ module.uuid_size()) == 0 &&
+ internal_strcmp(module.full_name(), renderedModule.full_name) == 0;
}
-Symbolizer *Symbolizer::PlatformInit() {
- return new (symbolizer_allocator_) Symbolizer({});
+static bool ModuleHasBeenRendered(
+ const LoadedModule &module,
+ const InternalMmapVectorNoCtor<RenderedModule> &renderedModules) {
+ for (const auto &renderedModule : renderedModules)
+ if (ModulesEq(module, renderedModule))
+ return true;
+
+ return false;
}
-void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
-
-void StartReportDeadlySignal() {}
-void ReportDeadlySignal(const SignalContext &sig, u32 tid,
- UnwindSignalStackCallbackType unwind,
- const void *unwind_context) {}
-
-#if SANITIZER_CAN_SLOW_UNWIND
-struct UnwindTraceArg {
- BufferedStackTrace *stack;
- u32 max_depth;
-};
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
- CHECK_LT(arg->stack->size, arg->max_depth);
- uptr pc = _Unwind_GetIP(ctx);
- if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
- arg->stack->trace_buffer[arg->stack->size++] = pc;
- return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
- : _URC_NO_REASON);
+static void RenderModule(InternalScopedString *buffer,
+ const LoadedModule &module, uptr moduleId) {
+ InternalScopedString buildIdBuffer;
+ for (uptr i = 0; i < module.uuid_size(); i++)
+ buildIdBuffer.AppendF("%02x", module.uuid()[i]);
+
+ buffer->AppendF(kFormatModule, moduleId, module.full_name(),
+ buildIdBuffer.data());
+ buffer->Append("\n");
}
-void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
- CHECK_GE(max_depth, 2);
- size = 0;
- UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
- _Unwind_Backtrace(Unwind_Trace, &arg);
- CHECK_GT(size, 0);
- // We need to pop a few frames so that pc is on top.
- uptr to_pop = LocatePcInTrace(pc);
- // trace_buffer[0] belongs to the current function so we always pop it,
- // unless there is only 1 frame in the stack trace (1 frame is always better
- // than 0!).
- PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
- trace_buffer[0] = pc;
+static void RenderMmaps(InternalScopedString *buffer,
+ const LoadedModule &module, uptr moduleId) {
+ InternalScopedString accessBuffer;
+
+ // All module mmaps are readable at least
+ for (const auto &range : module.ranges()) {
+ accessBuffer.Append("r");
+ if (range.writable)
+ accessBuffer.Append("w");
+ if (range.executable)
+ accessBuffer.Append("x");
+
+ //{{{mmap:%starting_addr:%size_in_hex:load:%moduleId:r%(w|x):%relative_addr}}}
+
+ // module.base_address == dlpi_addr
+ // range.beg == dlpi_addr + p_vaddr
+ // relative address == p_vaddr == range.beg - module.base_address
+ buffer->AppendF(kFormatMmap, range.beg, range.end - range.beg, moduleId,
+ accessBuffer.data(), range.beg - module.base_address());
+
+ buffer->Append("\n");
+ accessBuffer.clear();
+ }
}
-void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
- CHECK(context);
- CHECK_GE(max_depth, 2);
- UNREACHABLE("signal context doesn't exist");
+void MarkupStackTracePrinter::RenderContext(InternalScopedString *buffer) {
+ if (renderedModules_.size() == 0)
+ buffer->Append("{{{reset}}}\n");
+
+ const auto &modules = Symbolizer::GetOrInit()->GetRefreshedListOfModules();
+
+ for (const auto &module : modules) {
+ if (ModuleHasBeenRendered(module, renderedModules_))
+ continue;
+
+ // symbolizer markup id, used to refer to this modules from other contextual
+ // elements
+ uptr moduleId = renderedModules_.size();
+
+ RenderModule(buffer, module, moduleId);
+ RenderMmaps(buffer, module, moduleId);
+
+ renderedModules_.push_back({
+ internal_strdup(module.full_name()),
+ module.base_address(),
+ {},
+ });
+
+ // kModuleUUIDSize is the size of curModule.uuid
+ CHECK_GE(kModuleUUIDSize, module.uuid_size());
+ internal_memcpy(renderedModules_.back().uuid, module.uuid(),
+ module.uuid_size());
+ }
}
-#endif // SANITIZER_CAN_SLOW_UNWIND
+#endif // !SANITIZER_FUCHSIA
} // namespace __sanitizer
-
-#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h
new file mode 100644
index 000000000000..bc2ab24d625b
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h
@@ -0,0 +1,79 @@
+//===-- sanitizer_symbolizer_markup.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Header for the offline markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_MARKUP_H
+#define SANITIZER_SYMBOLIZER_MARKUP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+// Simplier view of a LoadedModule. It only holds information necessary to
+// identify unique modules.
+struct RenderedModule {
+ char *full_name;
+ uptr base_address;
+ u8 uuid[kModuleUUIDSize]; // BuildId
+};
+
+class MarkupStackTracePrinter : public StackTracePrinter {
+ public:
+ // We don't support the stack_trace_format flag at all.
+ void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style, const char *strip_path_prefix = "") override;
+
+ bool RenderNeedsSymbolization(const char *format) override;
+
+ // We ignore the format argument to __sanitizer_symbolize_global.
+ void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") override;
+
+ private:
+ // Keeps track of the modules that have been rendered to avoid re-rendering
+ // them
+ InternalMmapVector<RenderedModule> renderedModules_;
+ void RenderContext(InternalScopedString *buffer);
+
+ protected:
+ ~MarkupStackTracePrinter() {}
+};
+
+class MarkupSymbolizerTool final : public SymbolizerTool {
+ public:
+ // This is used in some places for suppression checking, which we
+ // don't really support for Fuchsia. It's also used in UBSan to
+ // identify a PC location to a function name, so we always fill in
+ // the function member with a string containing markup around the PC
+ // value.
+ // TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+ // to render stack frames, but that should be changed to use
+ // RenderStackFrame.
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+
+ // Always claim we succeeded, so that RenderDataInfo will be called.
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ // May return NULL if demangling failed.
+ // This is used by UBSan for type names, and by ASan for global variable
+ // names. It's expected to return a static buffer that will be reused on each
+ // call.
+ const char *Demangle(const char *name) override;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h
index c4061e38c6a4..83643504e128 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h
@@ -1,4 +1,5 @@
-//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//
+//===-- sanitizer_symbolizer_markup_constants.h
+//-----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -8,10 +9,10 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
-// Define Fuchsia's string formats and limits for the markup symbolizer.
+// Define string formats and limits for the markup symbolizer.
//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H
-#define SANITIZER_SYMBOLIZER_FUCHSIA_H
+#ifndef SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
+#define SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
#include "sanitizer_internal_defs.h"
@@ -34,9 +35,15 @@ constexpr const char *kFormatData = "{{{data:%p}}}";
// One frame in a backtrace (printed on a line by itself).
constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
+// Module contextual element.
+constexpr const char *kFormatModule = "{{{module:%d:%s:elf:%s}}}";
+
+// mmap for a module segment.
+constexpr const char *kFormatMmap = "{{{mmap:%p:0x%x:load:%d:%s:0x%x}}}";
+
// Dump trigger element.
#define FORMAT_DUMPFILE "{{{dumpfile:%s:%s}}}"
} // namespace __sanitizer
-#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H
+#endif // SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp
new file mode 100644
index 000000000000..08b06c2faf30
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp
@@ -0,0 +1,85 @@
+//===-- sanitizer_symbolizer_markup_fuchsia.cpp ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Fuchsia specific implementation of offline markup symbolizer.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SYMBOLIZER_MARKUP
+
+# include "sanitizer_common.h"
+# include "sanitizer_stacktrace_printer.h"
+# include "sanitizer_symbolizer.h"
+# include "sanitizer_symbolizer_markup.h"
+# include "sanitizer_symbolizer_markup_constants.h"
+
+namespace __sanitizer {
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+// It's expected to return a static buffer that will be reused on each call.
+const char *Symbolizer::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
+}
+
+// This is used mostly for suppression matching. Making it work
+// would enable "interceptor_via_lib" suppressions. It's also used
+// once in UBSan to say "in module ..." in a message that also
+// includes an address in the module, so post-processing can already
+// pretty-print that so as to indicate the module.
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ return false;
+}
+
+// This is mainly used by hwasan for online symbolization. This isn't needed
+// since hwasan can always just dump stack frames for offline symbolization.
+bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
+
+// This is used in some places for suppression checking, which we
+// don't really support for Fuchsia. It's also used in UBSan to
+// identify a PC location to a function name, so we always fill in
+// the function member with a string containing markup around the PC
+// value.
+// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+// to render stack frames, but that should be changed to use
+// RenderStackFrame.
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
+ char buffer[kFormatFunctionMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
+ s->info.function = internal_strdup(buffer);
+ return s;
+}
+
+// Always claim we succeeded, so that RenderDataInfo will be called.
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ info->Clear();
+ info->start = addr;
+ return true;
+}
+
+// Fuchsia only uses MarkupStackTracePrinter
+StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
+ return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
+}
+
+void MarkupStackTracePrinter::RenderContext(InternalScopedString *) {}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ return new (symbolizer_allocator_) Symbolizer({});
+}
+
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index 1a5e38faea88..28f11352a6b5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
+#include "sanitizer_symbolizer_markup.h"
#if SANITIZER_POSIX
# include <dlfcn.h> // for dlsym()
# include <errno.h>
@@ -56,7 +57,7 @@ const char *DemangleCXXABI(const char *name) {
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
- return name;
+ return nullptr;
}
// As of now, there are no headers for the Swift runtime. Once they are
@@ -324,9 +325,12 @@ __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_frame(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_symbolize_flush();
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_set_demangle(bool Demangle);
@@ -337,19 +341,20 @@ __sanitizer_symbolize_set_inline_frames(bool InlineFrames);
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
- if (__sanitizer_symbolize_set_demangle)
+ if (&__sanitizer_symbolize_set_demangle)
CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
- if (__sanitizer_symbolize_set_inline_frames)
+ if (&__sanitizer_symbolize_set_inline_frames)
CHECK(__sanitizer_symbolize_set_inline_frames(
common_flags()->symbolize_inline_frames));
- if (__sanitizer_symbolize_code && __sanitizer_symbolize_data)
+ // These are essential, we don't have InternalSymbolizer without them.
+ if (&__sanitizer_symbolize_code && &__sanitizer_symbolize_data)
return new (*alloc) InternalSymbolizer();
return 0;
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ stack->info.module, stack->info.module_offset, buffer_, sizeof(buffer_));
if (result)
ParseSymbolizePCOutput(buffer_, stack);
return result;
@@ -357,7 +362,7 @@ class InternalSymbolizer final : public SymbolizerTool {
bool SymbolizeData(uptr addr, DataInfo *info) override {
bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
- buffer_, kBufferSize);
+ buffer_, sizeof(buffer_));
if (result) {
ParseSymbolizeDataOutput(buffer_, info);
info->start += (addr - info->module_offset); // Add the base address.
@@ -365,34 +370,35 @@ class InternalSymbolizer final : public SymbolizerTool {
return result;
}
+ bool SymbolizeFrame(uptr addr, FrameInfo *info) override {
+ if (&__sanitizer_symbolize_frame == nullptr)
+ return false;
+ bool result = __sanitizer_symbolize_frame(info->module, info->module_offset,
+ buffer_, sizeof(buffer_));
+ if (result)
+ ParseSymbolizeFrameOutput(buffer_, &info->locals);
+ return result;
+ }
+
void Flush() override {
- if (__sanitizer_symbolize_flush)
+ if (&__sanitizer_symbolize_flush)
__sanitizer_symbolize_flush();
}
const char *Demangle(const char *name) override {
- if (__sanitizer_symbolize_demangle) {
- for (uptr res_length = 1024;
- res_length <= InternalSizeClassMap::kMaxSize;) {
- char *res_buff = static_cast<char *>(InternalAlloc(res_length));
- uptr req_length =
- __sanitizer_symbolize_demangle(name, res_buff, res_length);
- if (req_length > res_length) {
- res_length = req_length + 1;
- InternalFree(res_buff);
- continue;
- }
- return res_buff;
- }
+ if (&__sanitizer_symbolize_demangle &&
+ __sanitizer_symbolize_demangle(name, buffer_, sizeof(buffer_))) {
+ char *res_buff = nullptr;
+ ExtractToken(buffer_, "", &res_buff);
+ return res_buff;
}
- return name;
+ return nullptr;
}
private:
InternalSymbolizer() {}
- static const int kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
+ char buffer_[16 * 1024];
};
# else // SANITIZER_SUPPORTS_WEAK_HOOKS
@@ -470,6 +476,12 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
+ if (common_flags()->enable_symbolizer_markup) {
+ VReport(2, "Using symbolizer markup");
+ SymbolizerTool *tool = new (*allocator) MarkupSymbolizerTool();
+ CHECK(tool);
+ list->push_back(tool);
+ }
if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index 73915715c5ba..3e4417ae3f57 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -32,10 +32,10 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
InternalScopedString buff;
- buff.append("%s ", error_type);
- RenderFrame(&buff, "%L %F", 0, info.address, &info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ buff.AppendF("%s ", error_type);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &buff, "%L %F", 0, info.address, &info,
+ common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
@@ -148,22 +148,22 @@ static void MaybeReportNonExecRegion(uptr pc) {
static void PrintMemoryByte(InternalScopedString *str, const char *before,
u8 byte) {
SanitizerCommonDecorator d;
- str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
- d.Default());
+ str->AppendF("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
+ d.Default());
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
InternalScopedString str;
- str.append("First 16 instruction bytes at pc: ");
+ str.AppendF("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
}
- str.append("\n");
+ str.AppendF("\n");
} else {
- str.append("unaccessible\n");
+ str.AppendF("unaccessible\n");
}
Report("%s", str.data());
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp
new file mode 100644
index 000000000000..fb49cfbb3047
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp
@@ -0,0 +1,33 @@
+//===-- sanitizer_symbolizer_report_fuchsia.cpp
+//-----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the report functions for fuchsia.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SYMBOLIZER_MARKUP
+
+# include "sanitizer_common.h"
+
+namespace __sanitizer {
+void StartReportDeadlySignal() {}
+
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index ae2d3be19ef3..aae3e76ea229 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -175,9 +175,7 @@ const char *WinSymbolizerTool::Demangle(const char *name) {
return name;
}
-const char *Symbolizer::PlatformDemangle(const char *name) {
- return name;
-}
+const char *Symbolizer::PlatformDemangle(const char *name) { return nullptr; }
namespace {
struct ScopedHandle {
@@ -233,7 +231,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
- command_line.append("\"%s\" ", arg);
+ command_line.AppendF("\"%s\" ", arg);
}
VReport(3, "Launching symbolizer command: %s\n", command_line.data());
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp
new file mode 100644
index 000000000000..f3eb8591dcbc
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp
@@ -0,0 +1,66 @@
+//===------------------ sanitizer_unwind_fuchsia.cpp
+//---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Sanitizer unwind Fuchsia specific functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+# include <limits.h>
+# include <unwind.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+# if SANITIZER_CAN_SLOW_UNWIND
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = _Unwind_GetIP(ctx);
+ if (pc < GetPageSizeCached())
+ return _URC_NORMAL_STOP;
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
+ : _URC_NO_REASON);
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ CHECK_GT(size, 0);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
+ CHECK(context);
+ CHECK_GE(max_depth, 2);
+ UNREACHABLE("signal context doesn't exist");
+}
+# endif // SANITIZER_CAN_SLOW_UNWIND
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
index 72f025a7d307..6a8e82e2e213 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
@@ -139,13 +139,7 @@ void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
if (to_pop == 0 && size > 1)
to_pop = 1;
PopStackFrames(to_pop);
-#if defined(__GNUC__) && defined(__sparc__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
index 2bc0444050f8..4bdf75332bf3 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
@@ -17,6 +17,7 @@
#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+#include "llvm/Demangle/Demangle.h"
static llvm::symbolize::LLVMSymbolizer *Symbolizer = nullptr;
static bool Demangle = true;
@@ -27,6 +28,7 @@ static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
return Symbolizer;
llvm::symbolize::LLVMSymbolizer::Options Opts;
Opts.Demangle = Demangle;
+ Opts.UntagAddresses = true;
Symbolizer = new llvm::symbolize::LLVMSymbolizer(Opts);
return Symbolizer;
}
@@ -52,8 +54,7 @@ static llvm::symbolize::ErrorHandler symbolize_error_handler(
}
namespace __sanitizer {
-int internal_snprintf(char *buffer, uintptr_t length, const char *format,
- ...);
+int internal_snprintf(char *buffer, uintptr_t length, const char *format, ...);
} // namespace __sanitizer
extern "C" {
@@ -76,13 +77,16 @@ bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request,
- ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
} else {
auto ResOrErr = getDefaultSymbolizer()->symbolizeCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DILineInfo());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
}
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
@@ -104,7 +108,32 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
auto ResOrErr = getDefaultSymbolizer()->symbolizeData(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
+ }
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
+}
+
+bool __sanitizer_symbolize_frame(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::Request Request{ModuleName, ModuleOffset};
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
+
+ // TODO: it is neccessary to set proper SectionIndex here.
+ // object::SectionedAddress::UndefSection works for only absolute addresses.
+ auto ResOrErr = getDefaultSymbolizer()->symbolizeFrame(
+ ModuleName,
+ {ModuleOffset, llvm::object::SectionedAddress::UndefSection});
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
Result.c_str()) < MaxLength;
@@ -115,14 +144,13 @@ void __sanitizer_symbolize_flush() {
Symbolizer->flush();
}
-int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
- int MaxLength) {
- std::string Result =
- llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
+bool __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength) {
+ std::string Result;
+ if (!llvm::nonMicrosoftDemangle(Name, Result))
+ return false;
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
- Result.c_str()) < MaxLength
- ? static_cast<int>(Result.size() + 1)
- : 0;
+ Result.c_str()) < MaxLength;
}
bool __sanitizer_symbolize_set_demangle(bool Value) {
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index 835685686100..c9e31be61cb7 100755
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -1,29 +1,19 @@
#!/usr/bin/env bash
#
-# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \
-# build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/
+# Run as: CLANG=bin/clang build_symbolizer.sh out.o
+# If you want to use a local copy of zlib, set ZLIB_SRC.
# zlib can be downloaded from http://www.zlib.net.
#
-# Script compiles self-contained object file with symbolization code and injects
-# it into the given set of runtime libraries. Script updates only libraries
-# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.
-# Object file is be compiled from LLVM sources with dependencies like libc++ and
-# zlib. Then it internalizes symbols in the file, so that it can be linked
-# into arbitrary programs, avoiding conflicts with the program own symbols and
-# avoiding dependencies on any program symbols. The only acceptable dependencies
-# are libc and __sanitizer::internal_* from sanitizer runtime.
+# Script compiles self-contained object file with symbolization code.
#
# Symbols exported by the object file will be used by Sanitizer runtime
# libraries to symbolize code/data in-process.
#
-# The script will modify the output directory which is given as the first
-# argument to the script.
-#
# FIXME: We should really be using a simpler approach to building this object
# file, and it should be available as a regular cmake rule. Conceptually, we
# want to be doing "ld -r" followed by "objcopy -G" to create a relocatable
# object file with only our entry points exposed. However, this does not work at
-# present, see PR30750.
+# present, see https://github.com/llvm/llvm-project/issues/30098.
set -x
set -e
@@ -31,26 +21,20 @@ set -u
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
SRC_DIR=$(readlink -f $SCRIPT_DIR/..)
-TARGE_DIR=$(readlink -f $1)
-COMPILER_RT_SRC=$(readlink -f ${SCRIPT_DIR}/../../../..)
-LLVM_SRC=${LLVM_SRC:-${COMPILER_RT_SRC}/../llvm}
-LLVM_SRC=$(readlink -f $LLVM_SRC)
-if [[ "$ZLIB_SRC" == "" ||
- ! -x "${ZLIB_SRC}/configure" ||
- ! -f "${ZLIB_SRC}/zlib.h" ]]; then
- echo "Missing or incomplete ZLIB_SRC"
+if [[ $# -ne 1 ]]; then
+ echo "Missing output file"
exit 1
fi
-ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+
+OUTPUT=$(readlink -f $1)
+COMPILER_RT_SRC=$(readlink -f ${SCRIPT_DIR}/../../../..)
+LLVM_SRC=${LLVM_SRC:-${COMPILER_RT_SRC}/../llvm}
+LLVM_SRC=$(readlink -f $LLVM_SRC)
CLANG="${CLANG:-`which clang`}"
CLANG_DIR=$(readlink -f $(dirname "$CLANG"))
-BUILD_DIR=$(readlink -f ./symbolizer)
-mkdir -p $BUILD_DIR
-cd $BUILD_DIR
-
CC=$CLANG_DIR/clang
CXX=$CLANG_DIR/clang++
TBLGEN=$CLANG_DIR/llvm-tblgen
@@ -65,37 +49,57 @@ for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
fi
done
+BUILD_DIR=${PWD}/symbolizer
+mkdir -p $BUILD_DIR
+cd $BUILD_DIR
+
ZLIB_BUILD=${BUILD_DIR}/zlib
LIBCXX_BUILD=${BUILD_DIR}/libcxx
LLVM_BUILD=${BUILD_DIR}/llvm
SYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer
FLAGS=${FLAGS:-}
+ZLIB_SRC=${ZLIB_SRC:-}
TARGET_TRIPLE=$($CC -print-target-triple $FLAGS)
if [[ "$FLAGS" =~ "-m32" ]] ; then
# Avoid new wrappers.
FLAGS+=" -U_FILE_OFFSET_BITS"
fi
FLAGS+=" -fPIC -flto -Oz -g0 -DNDEBUG -target $TARGET_TRIPLE -Wno-unused-command-line-argument"
+FLAGS+=" -include ${SRC_DIR}/../sanitizer_redefine_builtins.h -DSANITIZER_COMMON_REDEFINE_BUILTINS_IN_STD -Wno-language-extension-token"
+
LINKFLAGS="-fuse-ld=lld -target $TARGET_TRIPLE"
# Build zlib.
-mkdir -p ${ZLIB_BUILD}
+if [[ ! -d ${ZLIB_BUILD} ]]; then
+ if [[ -z "${ZLIB_SRC}" ]]; then
+ git clone https://github.com/madler/zlib ${ZLIB_BUILD}
+ else
+ ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+ mkdir -p ${ZLIB_BUILD}
+ cp -r ${ZLIB_SRC}/* ${ZLIB_BUILD}/
+ fi
+fi
+
cd ${ZLIB_BUILD}
-cp -r ${ZLIB_SRC}/* .
AR="${AR}" CC="${CC}" CFLAGS="$FLAGS -Wno-deprecated-non-prototype" RANLIB=/bin/true ./configure --static
make -j libz.a
# Build and install libcxxabi and libcxx.
-if [[ ! -d ${LIBCXX_BUILD} ]]; then
+if [[ ! -f ${LLVM_BUILD}/build.ninja ]]; then
+ rm -rf ${LIBCXX_BUILD}
mkdir -p ${LIBCXX_BUILD}
cd ${LIBCXX_BUILD}
LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined"
cmake -GNinja \
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi" \
-DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER_WORKS=ON \
+ -DCMAKE_CXX_COMPILER_WORKS=ON \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
+ -DLIBCXX_ABI_NAMESPACE=__InternalSymbolizer \
+ '-DLIBCXX_EXTRA_SITE_DEFINES=_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS;_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS' \
-DCMAKE_C_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
-DCMAKE_CXX_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
-DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
@@ -104,6 +108,8 @@ if [[ ! -d ${LIBCXX_BUILD} ]]; then
-DLIBCXX_ENABLE_EXCEPTIONS=OFF \
-DLIBCXX_ENABLE_RTTI=OFF \
-DCMAKE_SHARED_LINKER_FLAGS="$LINKFLAGS" \
+ -DLIBCXX_ENABLE_SHARED=OFF \
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
$LLVM_SRC/../runtimes
fi
cd ${LIBCXX_BUILD}
@@ -114,18 +120,24 @@ LLVM_CFLAGS="${FLAGS} -Wno-global-constructors"
LLVM_CXXFLAGS="${LLVM_CFLAGS} -nostdinc++ -I${ZLIB_BUILD} -isystem ${LIBCXX_BUILD}/include -isystem ${LIBCXX_BUILD}/include/c++/v1"
# Build LLVM.
-if [[ ! -d ${LLVM_BUILD} ]]; then
+if [[ ! -f ${LLVM_BUILD}/build.ninja ]]; then
+ rm -rf ${LLVM_BUILD}
mkdir -p ${LLVM_BUILD}
cd ${LLVM_BUILD}
cmake -GNinja \
-DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER_WORKS=ON \
+ -DCMAKE_CXX_COMPILER_WORKS=ON \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
- -DCMAKE_C_FLAGS="${LLVM_CFLAGS}" \
- -DCMAKE_CXX_FLAGS="${LLVM_CXXFLAGS}" \
+ -DLLVM_ENABLE_LIBCXX=ON \
+ -DCMAKE_C_FLAGS_RELEASE="${LLVM_CFLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_CXXFLAGS}" \
-DCMAKE_EXE_LINKER_FLAGS="$LINKFLAGS -stdlib=libc++ -L${LIBCXX_BUILD}/lib" \
-DLLVM_TABLEGEN=$TBLGEN \
+ -DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_ENABLE_ZLIB=ON \
+ -DLLVM_ENABLE_ZSTD=OFF \
-DLLVM_ENABLE_TERMINFO=OFF \
-DLLVM_ENABLE_THREADS=OFF \
$LLVM_SRC
@@ -145,6 +157,7 @@ $AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o
SYMBOLIZER_API_LIST=__sanitizer_symbolize_code
SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_data
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_frame
SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_flush
SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_demangle
SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_demangle
@@ -181,20 +194,6 @@ nm -f posix -g symbolizer.o | cut -f 1,2 -d \ | LC_COLLATE=C sort -u > undefine
(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E "^\+[^+]") && \
(echo "Failed: unexpected symbols"; exit 1)
-arch() {
- objdump -f $1 | grep -m1 -Po "(?<=file format ).*$"
-}
-
-SYMBOLIZER_FORMAT=$(arch symbolizer.o)
-echo "Injecting $SYMBOLIZER_FORMAT symbolizer..."
-for A in $TARGE_DIR/libclang_rt.*san*.a; do
- A_FORMAT=$(arch $A)
- if [[ "$A_FORMAT" != "$SYMBOLIZER_FORMAT" ]] ; then
- continue
- fi
- (nm -u $A 2>/dev/null | grep -E "__sanitizer_symbolize_code" >/dev/null) || continue
- echo "$A"
- $AR rcs $A symbolizer.o
-done
+cp -f symbolizer.o $OUTPUT
echo "Success!"
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index 509e3f19fe38..0a4bc6989a0d 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -11,6 +11,13 @@ _ZN11__sanitizer16internal_iserrorEjPi U
_ZN11__sanitizer16internal_iserrorEmPi U
_ZN11__sanitizer17internal_snprintfEPcjPKcz U
_ZN11__sanitizer17internal_snprintfEPcmPKcz U
+__aarch64_cas8_acq_rel U
+__aarch64_ldadd4_acq_rel U
+__aarch64_ldadd8_acq_rel U
+__aarch64_ldadd8_relax U
+__aarch64_swp8_acq_rel U
+__ashldi3 U
+__ashrdi3 U
__ctype_b_loc U
__ctype_get_mb_cur_max U
__cxa_atexit U
@@ -34,13 +41,25 @@ __interceptor_pthread_setspecific w
__interceptor_read w
__interceptor_realpath w
__isinf U
+__isoc23_sscanf U
+__isoc23_strtol U
+__isoc23_strtoll U
+__isoc23_strtoll_l U
+__isoc23_strtoull U
+__isoc23_strtoull_l U
+__isoc23_vsscanf U
__isoc99_sscanf U
__isoc99_vsscanf U
+__lshrdi3 U
__moddi3 U
+__sanitizer_internal_memcpy U
+__sanitizer_internal_memmove U
+__sanitizer_internal_memset U
__sanitizer_symbolize_code T
__sanitizer_symbolize_data T
__sanitizer_symbolize_demangle T
__sanitizer_symbolize_flush T
+__sanitizer_symbolize_frame T
__sanitizer_symbolize_set_demangle T
__sanitizer_symbolize_set_inline_frames T
__strdup U
diff --git a/compiler-rt/lib/sanitizer_common/weak_symbols.txt b/compiler-rt/lib/sanitizer_common/weak_symbols.txt
index d07f81bc8c12..1eb1ce8d6b9c 100644
--- a/compiler-rt/lib/sanitizer_common/weak_symbols.txt
+++ b/compiler-rt/lib/sanitizer_common/weak_symbols.txt
@@ -4,6 +4,7 @@ ___sanitizer_report_error_summary
___sanitizer_sandbox_on_notify
___sanitizer_symbolize_code
___sanitizer_symbolize_data
+___sanitizer_symbolize_frame
___sanitizer_symbolize_demangle
___sanitizer_symbolize_flush
___sanitizer_symbolize_set_demangle
diff --git a/compiler-rt/lib/scudo/standalone/allocator_common.h b/compiler-rt/lib/scudo/standalone/allocator_common.h
new file mode 100644
index 000000000000..95f4776ac596
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -0,0 +1,85 @@
+//===-- allocator_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMMON_H_
+#define SCUDO_ALLOCATOR_COMMON_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct TransferBatch {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+ static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached);
+ Count = N;
+ memcpy(Batch, Array, sizeof(Batch[0]) * Count);
+ }
+ void appendFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ }
+ void appendFromTransferBatch(TransferBatch *B, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ DCHECK_GE(B->Count, N);
+ // Append from the back of `B`.
+ memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ B->Count = static_cast<u16>(B->Count - N);
+ }
+ void clear() { Count = 0; }
+ void add(CompactPtrT P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void moveToArray(CompactPtrT *Array) {
+ memcpy(Array, Batch, sizeof(Batch[0]) * Count);
+ clear();
+ }
+ u16 getCount() const { return Count; }
+ bool isEmpty() const { return Count == 0U; }
+ CompactPtrT get(u16 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ TransferBatch *Next;
+
+private:
+ CompactPtrT Batch[MaxNumCached];
+ u16 Count;
+};
+
+// A BatchGroup is used to collect blocks. Each group has a group id to
+// identify the group kind of contained blocks.
+template <class SizeClassAllocator> struct BatchGroup {
+ // `Next` is used by IntrusiveList.
+ BatchGroup *Next;
+ // The compact base address of each group
+ uptr CompactPtrGroupBase;
+ // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
+ u16 MaxCachedPerBatch;
+ // Number of blocks pushed into this group. This is an increment-only
+ // counter.
+ uptr PushedBlocks;
+ // This is used to track how many bytes are not in-use since last time we
+ // tried to release pages.
+ uptr BytesInBGAtLastCheckpoint;
+ // Blocks are managed by TransferBatch in a list.
+ SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_COMMON_H_
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 315a04f7635d..3c6aa3acb0e4 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -11,6 +11,7 @@
#include "combined.h"
#include "common.h"
+#include "condition_variable.h"
#include "flags.h"
#include "primary32.h"
#include "primary64.h"
@@ -82,6 +83,14 @@ namespace scudo {
// // Defines the minimal & maximal release interval that can be set.
// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+//
+// // Use condition variable to shorten the waiting time of refillment of
+// // freelist. Note that this depends on the implementation of condition
+// // variable on each platform and the performance may vary so that it
+// // doesn't guarantee a performance benefit.
+// // Note that both variables have to be defined to enable it.
+// static const bool UseConditionVariable = true;
+// using ConditionVariableT = ConditionVariableLinux;
// };
// // Defines the type of Primary allocator to use.
// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
@@ -195,50 +204,6 @@ struct AndroidConfig {
template <typename Config> using SecondaryT = MapAllocator<Config>;
};
-struct AndroidSvelteConfig {
- static const bool MaySupportMemoryTagging = false;
- template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
-
- struct Primary {
- using SizeClassMap = SvelteSizeClassMap;
-#if SCUDO_CAN_USE_PRIMARY64
- static const uptr RegionSizeLog = 27U;
- typedef u32 CompactPtrT;
- static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const uptr GroupSizeLog = 18U;
- static const bool EnableRandomOffset = true;
- static const uptr MapSizeIncrement = 1UL << 18;
-#else
- static const uptr RegionSizeLog = 16U;
- static const uptr GroupSizeLog = 16U;
- typedef uptr CompactPtrT;
-#endif
- static const s32 MinReleaseToOsIntervalMs = 1000;
- static const s32 MaxReleaseToOsIntervalMs = 1000;
- };
-
-#if SCUDO_CAN_USE_PRIMARY64
- template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-#else
- template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
-#endif
-
- struct Secondary {
- struct Cache {
- static const u32 EntriesArraySize = 16U;
- static const u32 QuarantineSize = 32U;
- static const u32 DefaultMaxEntriesCount = 4U;
- static const uptr DefaultMaxEntrySize = 1UL << 18;
- static const s32 MinReleaseToOsIntervalMs = 0;
- static const s32 MaxReleaseToOsIntervalMs = 0;
- };
- template <typename Config> using CacheT = MapAllocatorCache<Config>;
- };
-
- template <typename Config> using SecondaryT = MapAllocator<Config>;
-};
-
#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
static const bool MaySupportMemoryTagging = false;
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index d88f5d7be642..a68ffd16291c 100644
--- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -133,10 +133,10 @@ inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
}
template <typename T>
-inline typename T::Type atomic_compare_exchange(volatile T *A,
- typename T::Type Cmp,
- typename T::Type Xchg) {
- atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+inline typename T::Type
+atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
+ typename T::Type Xchg, memory_order MO) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
return Cmp;
}
diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h
index 32874a8df642..9228df047189 100644
--- a/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/compiler-rt/lib/scudo/standalone/chunk.h
@@ -128,19 +128,6 @@ inline void loadHeader(u32 Cookie, const void *Ptr,
reportHeaderCorruption(const_cast<void *>(Ptr));
}
-inline void compareExchangeHeader(u32 Cookie, void *Ptr,
- UnpackedHeader *NewUnpackedHeader,
- UnpackedHeader *OldUnpackedHeader) {
- NewUnpackedHeader->Checksum =
- computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
- if (UNLIKELY(!atomic_compare_exchange_strong(
- getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed)))
- reportHeaderRace(Ptr);
-}
-
inline bool isValid(u32 Cookie, const void *Ptr,
UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index b17acc71f892..65ddc488370a 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -14,11 +14,11 @@
#include "flags.h"
#include "flags_parser.h"
#include "local_cache.h"
+#include "mem_map.h"
#include "memtag.h"
#include "options.h"
#include "quarantine.h"
#include "report.h"
-#include "rss_limit_checker.h"
#include "secondary.h"
#include "stack_depot.h"
#include "string_utils.h"
@@ -68,14 +68,13 @@ public:
if (UNLIKELY(Header.State != Chunk::State::Quarantined))
reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
- Chunk::UnpackedHeader NewHeader = Header;
- NewHeader.State = Chunk::State::Available;
- Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
- void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
- Cache.deallocate(NewHeader.ClassId, BlockBegin);
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
+ Cache.deallocate(Header.ClassId, BlockBegin);
}
// We take a shortcut when allocating a quarantine batch by working with the
@@ -118,9 +117,8 @@ public:
DCHECK_EQ(Header.Offset, 0);
DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
- Chunk::UnpackedHeader NewHeader = Header;
- NewHeader.State = Chunk::State::Available;
- Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
Cache.deallocate(QuarantineClassId,
reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
Chunk::getHeaderSize()));
@@ -149,9 +147,6 @@ public:
initFlags();
reportUnrecognizedFlags();
- RssChecker.init(scudo::getFlags()->soft_rss_limit_mb,
- scudo::getFlags()->hard_rss_limit_mb);
-
// Store some flags locally.
if (getFlags()->may_return_null)
Primary.Options.set(OptionBit::MayReturnNull);
@@ -251,12 +246,14 @@ public:
// - unlinking the local stats from the global ones (destroying the cache does
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
+ TSD->assertLocked(/*BypassCheck=*/true);
Quarantine.drain(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()));
TSD->getCache().destroy(&Stats);
}
void drainCache(TSD<ThisT> *TSD) {
+ TSD->assertLocked(/*BypassCheck=*/true);
Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()));
TSD->getCache().drain();
@@ -299,7 +296,7 @@ public:
#endif
}
- uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
+ uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
uptr ClassId) {
if (!Options.get(OptionBit::UseOddEvenTags))
return 0;
@@ -329,8 +326,6 @@ public:
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.shouldSample())) {
if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
- if (UNLIKELY(&__scudo_allocate_hook))
- __scudo_allocate_hook(Ptr, Size);
Stats.lock();
Stats.add(StatAllocated, GuardedAllocSlotSize);
Stats.sub(StatFree, GuardedAllocSlotSize);
@@ -363,19 +358,6 @@ public:
}
DCHECK_LE(Size, NeededSize);
- switch (RssChecker.getRssLimitExceeded()) {
- case RssLimitChecker::Neither:
- break;
- case RssLimitChecker::Soft:
- if (Options.get(OptionBit::MayReturnNull))
- return nullptr;
- reportSoftRSSLimit(RssChecker.getSoftRssLimit());
- break;
- case RssLimitChecker::Hard:
- reportHardRSSLimit(RssChecker.getHardRssLimit());
- break;
- }
-
void *Block = nullptr;
uptr ClassId = 0;
uptr SecondaryBlockEnd = 0;
@@ -384,11 +366,11 @@ public:
DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
Block = TSD->getCache().allocate(ClassId);
- // If the allocation failed, the most likely reason with a 32-bit primary
- // is the region being full. In that event, retry in each successively
- // larger class until it fits. If it fails to fit in the largest class,
- // fallback to the Secondary.
+ // If the allocation failed, retry in each successively larger class until
+ // it fits. If it fails to fit in the largest class, fallback to the
+ // Secondary.
if (UNLIKELY(!Block)) {
while (ClassId < SizeClassMap::LargestClassId && !Block)
Block = TSD->getCache().allocate(++ClassId);
@@ -406,6 +388,7 @@ public:
if (UNLIKELY(!Block)) {
if (Options.get(OptionBit::MayReturnNull))
return nullptr;
+ printStats();
reportOutOfMemory(NeededSize);
}
@@ -535,14 +518,14 @@ public:
Chunk::SizeOrUnusedBytesMask;
Chunk::storeHeader(Cookie, Ptr, &Header);
- if (UNLIKELY(&__scudo_allocate_hook))
- __scudo_allocate_hook(TaggedPtr, Size);
-
return TaggedPtr;
}
NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
UNUSED uptr Alignment = MinAlignment) {
+ if (UNLIKELY(!Ptr))
+ return;
+
// For a deallocation, we only ensure minimal initialization, meaning thread
// local data will be left uninitialized for now (when using ELF TLS). The
// fallback cache will be used instead. This is a workaround for a situation
@@ -551,12 +534,6 @@ public:
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
- if (UNLIKELY(&__scudo_deallocate_hook))
- __scudo_deallocate_hook(Ptr);
-
- if (UNLIKELY(!Ptr))
- return;
-
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
GuardedAlloc.deallocate(Ptr);
@@ -635,47 +612,46 @@ public:
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
- Chunk::UnpackedHeader OldHeader;
- Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, OldPtr, &Header);
- if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
// Pointer has to be allocated with a malloc-type function. Some
// applications think that it is OK to realloc a memalign'ed pointer, which
// will trigger this check. It really isn't.
if (Options.get(OptionBit::DeallocTypeMismatch)) {
- if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
+ if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
- OldHeader.OriginOrWasZeroed,
+ Header.OriginOrWasZeroed,
Chunk::Origin::Malloc);
}
- void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
+ void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
uptr BlockEnd;
uptr OldSize;
- const uptr ClassId = OldHeader.ClassId;
+ const uptr ClassId = Header.ClassId;
if (LIKELY(ClassId)) {
BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
SizeClassMap::getSizeByClassId(ClassId);
- OldSize = OldHeader.SizeOrUnusedBytes;
+ OldSize = Header.SizeOrUnusedBytes;
} else {
BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
- OldHeader.SizeOrUnusedBytes);
+ Header.SizeOrUnusedBytes);
}
// If the new chunk still fits in the previously allocated block (with a
// reasonable delta), we just keep the old block, and update the chunk
// header to reflect the size change.
if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
- Chunk::UnpackedHeader NewHeader = OldHeader;
- NewHeader.SizeOrUnusedBytes =
+ Header.SizeOrUnusedBytes =
(ClassId ? NewSize
: BlockEnd -
(reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
- Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
+ Chunk::storeHeader(Cookie, OldPtr, &Header);
if (UNLIKELY(useMemoryTagging<Config>(Options))) {
if (ClassId) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
@@ -697,9 +673,7 @@ public:
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
if (LIKELY(NewPtr)) {
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
- if (UNLIKELY(&__scudo_deallocate_hook))
- __scudo_deallocate_hook(OldTaggedPtr);
- quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
+ quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
}
return NewPtr;
}
@@ -754,6 +728,13 @@ public:
Str.output();
}
+ void printFragmentationInfo() {
+ ScopedString Str;
+ Primary.getFragmentationInfo(&Str);
+ // Secondary allocator dumps the fragmentation data in getStats().
+ Str.output();
+ }
+
void releaseToOS(ReleaseToOS ReleaseType) {
initThreadMaybe();
if (ReleaseType == ReleaseToOS::ForceAll)
@@ -847,10 +828,15 @@ public:
// for it, which then forces realloc to copy the usable size of a chunk as
// opposed to its actual size.
uptr getUsableSize(const void *Ptr) {
- initThreadMaybe();
if (UNLIKELY(!Ptr))
return 0;
+ return getAllocSize(Ptr);
+ }
+
+ uptr getAllocSize(const void *Ptr) {
+ initThreadMaybe();
+
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
return GuardedAlloc.getSize(Ptr);
@@ -859,9 +845,11 @@ public:
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
- // Getting the usable size of a chunk only makes sense if it's allocated.
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+
return getSize(Ptr, &Header);
}
@@ -887,13 +875,6 @@ public:
Header.State == Chunk::State::Allocated;
}
- void setRssLimitsTestOnly(int SoftRssLimitMb, int HardRssLimitMb,
- bool MayReturnNull) {
- RssChecker.init(SoftRssLimitMb, HardRssLimitMb);
- if (MayReturnNull)
- Primary.Options.set(OptionBit::MayReturnNull);
- }
-
bool useMemoryTaggingTestOnly() const {
return useMemoryTagging<Config>(Primary.Options.load());
}
@@ -913,7 +894,7 @@ public:
void setTrackAllocationStacks(bool Track) {
initThreadMaybe();
- if (getFlags()->allocation_ring_buffer_size == 0) {
+ if (getFlags()->allocation_ring_buffer_size <= 0) {
DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
return;
}
@@ -955,8 +936,7 @@ public:
uptr getRingBufferSize() {
initThreadMaybe();
- auto *RingBuffer = getRingBuffer();
- return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
+ return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
}
static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
@@ -986,8 +966,9 @@ public:
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
const char *RegionInfoPtr, const char *RingBufferPtr,
- const char *Memory, const char *MemoryTags,
- uintptr_t MemoryAddr, size_t MemorySize) {
+ size_t RingBufferSize, const char *Memory,
+ const char *MemoryTags, uintptr_t MemoryAddr,
+ size_t MemorySize) {
*ErrorInfo = {};
if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
@@ -1006,7 +987,7 @@ public:
// Check the ring buffer. For primary allocations this will only find UAF;
// for secondary allocations we can find either UAF or OOB.
getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
- RingBufferPtr);
+ RingBufferPtr, RingBufferSize);
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
// Beyond that we are likely to hit false positives.
@@ -1053,7 +1034,6 @@ private:
QuarantineT Quarantine;
TSDRegistryT TSDRegistry;
pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
- RssLimitChecker RssChecker;
#ifdef GWP_ASAN_HOOKS
gwp_asan::GuardedPoolAllocator GuardedAlloc;
@@ -1073,13 +1053,14 @@ private:
};
atomic_uptr Pos;
- u32 Size;
// An array of Size (at least one) elements of type Entry is immediately
// following to this struct.
};
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
// and immediately followed by Size elements of type Entry.
char *RawRingBuffer = {};
+ u32 RingBufferElements = 0;
+ MemMapT RawRingBufferMap;
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {
@@ -1134,35 +1115,34 @@ private:
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
- void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
+ void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
Chunk::UnpackedHeader *Header,
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
- Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
const bool BypassQuarantine = !Quarantine.getCacheSize() ||
((Size - 1) >= QuarantineMaxChunkSize) ||
- !NewHeader.ClassId;
+ !Header->ClassId;
if (BypassQuarantine)
- NewHeader.State = Chunk::State::Available;
+ Header->State = Chunk::State::Available;
else
- NewHeader.State = Chunk::State::Quarantined;
- NewHeader.OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
- NewHeader.ClassId &&
- !TSDRegistry.getDisableMemInit();
- Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ Header->State = Chunk::State::Quarantined;
+ Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
+ Header->ClassId &&
+ !TSDRegistry.getDisableMemInit();
+ Chunk::storeHeader(Cookie, Ptr, Header);
if (UNLIKELY(useMemoryTagging<Config>(Options))) {
u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
- if (NewHeader.ClassId) {
+ if (Header->ClassId) {
if (!TSDRegistry.getDisableMemInit()) {
uptr TaggedBegin, TaggedEnd;
const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
- Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
- NewHeader.ClassId);
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
+ Header->ClassId);
// Exclude the previous tag so that immediate use after free is
// detected 100% of the time.
setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
@@ -1173,11 +1153,12 @@ private:
if (BypassQuarantine) {
if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
- void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
- const uptr ClassId = NewHeader.ClassId;
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ const uptr ClassId = Header->ClassId;
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
const bool CacheDrained =
TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
@@ -1197,6 +1178,7 @@ private:
} else {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
Quarantine.put(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
if (UnlockRequired)
@@ -1273,7 +1255,7 @@ private:
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
}
- void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
+ void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1286,7 +1268,7 @@ private:
u32 DeallocationTid) {
uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
typename AllocationRingBuffer::Entry *Entry =
- getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
+ getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
// First invalidate our entry so that we don't attempt to interpret a
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1305,7 +1287,7 @@ private:
atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
}
- void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
+ void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
@@ -1320,8 +1302,8 @@ private:
storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
}
- void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
- uptr Size) {
+ void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
+ u8 PrevTag, uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
@@ -1427,17 +1409,19 @@ private:
size_t &NextErrorReport,
uintptr_t FaultAddr,
const StackDepot *Depot,
- const char *RingBufferPtr) {
+ const char *RingBufferPtr,
+ size_t RingBufferSize) {
auto *RingBuffer =
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
- if (!RingBuffer || RingBuffer->Size == 0)
+ size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
+ if (!RingBuffer || RingBufferElements == 0)
return;
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
- for (uptr I = Pos - 1;
- I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
+ for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
+ NextErrorReport != NumErrorReports;
--I) {
- auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
+ auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
if (!EntryPtr)
continue;
@@ -1516,17 +1500,19 @@ private:
}
void mapAndInitializeRingBuffer() {
+ if (getFlags()->allocation_ring_buffer_size <= 0)
+ return;
u32 AllocationRingBufferSize =
static_cast<u32>(getFlags()->allocation_ring_buffer_size);
- if (AllocationRingBufferSize < 1)
- return;
- RawRingBuffer = static_cast<char *>(
- map(/*Addr=*/nullptr,
- roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
- getPageSizeCached()),
- "AllocatorRingBuffer"));
- auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
- RingBuffer->Size = AllocationRingBufferSize;
+ MemMapT MemMap;
+ MemMap.map(
+ /*Addr=*/0U,
+ roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
+ getPageSizeCached()),
+ "scudo:ring_buffer");
+ RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
+ RawRingBufferMap = MemMap;
+ RingBufferElements = AllocationRingBufferSize;
static_assert(sizeof(AllocationRingBuffer) %
alignof(typename AllocationRingBuffer::Entry) ==
0,
@@ -1534,14 +1520,25 @@ private:
}
void unmapRingBuffer() {
- unmap(RawRingBuffer, roundUp(getRingBufferSize(), getPageSizeCached()));
+ auto *RingBuffer = getRingBuffer();
+ if (RingBuffer != nullptr) {
+ RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
+ RawRingBufferMap.getCapacity());
+ }
RawRingBuffer = nullptr;
}
- static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
+ static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
return sizeof(AllocationRingBuffer) +
- AllocationRingBufferSize *
- sizeof(typename AllocationRingBuffer::Entry);
+ RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
+ }
+
+ static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
+ if (Bytes < sizeof(AllocationRingBuffer)) {
+ return 0;
+ }
+ return (Bytes - sizeof(AllocationRingBuffer)) /
+ sizeof(typename AllocationRingBuffer::Entry);
}
inline AllocationRingBuffer *getRingBuffer() {
diff --git a/compiler-rt/lib/scudo/standalone/common.cpp b/compiler-rt/lib/scudo/standalone/common.cpp
index 9f14faeef283..06e930638f6f 100644
--- a/compiler-rt/lib/scudo/standalone/common.cpp
+++ b/compiler-rt/lib/scudo/standalone/common.cpp
@@ -21,22 +21,4 @@ uptr getPageSizeSlow() {
return PageSizeCached;
}
-// Fatal internal map() or unmap() error (potentially OOM related).
-void NORETURN dieOnMapUnmapError(uptr SizeIfOOM) {
- char Error[128] = "Scudo ERROR: internal map or unmap failure\n";
- if (SizeIfOOM) {
- formatString(
- Error, sizeof(Error),
- "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
- SizeIfOOM >> 10);
- }
- outputRaw(Error);
- setAbortMessage(Error);
- die();
-}
-
-#if !SCUDO_LINUX
-uptr GetRSS() { return 0; }
-#endif
-
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h
index 82e6cf4aee61..3581c946d160 100644
--- a/compiler-rt/lib/scudo/standalone/common.h
+++ b/compiler-rt/lib/scudo/standalone/common.h
@@ -17,6 +17,7 @@
#include <stddef.h>
#include <string.h>
+#include <unistd.h>
namespace scudo {
@@ -111,29 +112,15 @@ template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
*RandState = State;
}
-// Hardware specific inlinable functions.
-
-inline void yieldProcessor(UNUSED u8 Count) {
-#if defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("" ::: "memory");
- for (u8 I = 0; I < Count; I++)
- __asm__ __volatile__("pause");
-#elif defined(__aarch64__) || defined(__arm__)
- __asm__ __volatile__("" ::: "memory");
- for (u8 I = 0; I < Count; I++)
- __asm__ __volatile__("yield");
-#endif
- __asm__ __volatile__("" ::: "memory");
-}
-
// Platform specific functions.
extern uptr PageSizeCached;
uptr getPageSizeSlow();
inline uptr getPageSizeCached() {
- // Bionic uses a hardcoded value.
- if (SCUDO_ANDROID)
- return 4096U;
+#if SCUDO_ANDROID && defined(PAGE_SIZE)
+ // Most Android builds have a build-time constant page size.
+ return PAGE_SIZE;
+#endif
if (LIKELY(PageSizeCached))
return PageSizeCached;
return getPageSizeSlow();
@@ -144,8 +131,6 @@ u32 getNumberOfCPUs();
const char *getEnv(const char *Name);
-uptr GetRSS();
-
u64 getMonotonicTime();
// Gets the time faster but with less accuracy. Can call getMonotonicTime
// if no fast version is available.
@@ -190,10 +175,6 @@ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data = nullptr);
-// Internal map & unmap fatal error. This must not call map(). SizeIfOOM shall
-// hold the requested size on an out-of-memory error, 0 otherwise.
-void NORETURN dieOnMapUnmapError(uptr SizeIfOOM = 0);
-
// Logging related functions.
void setAbortMessage(const char *Message);
diff --git a/compiler-rt/lib/scudo/standalone/condition_variable.h b/compiler-rt/lib/scudo/standalone/condition_variable.h
new file mode 100644
index 000000000000..549f6e9f787b
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/condition_variable.h
@@ -0,0 +1,60 @@
+//===-- condition_variable.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_H_
+#define SCUDO_CONDITION_VARIABLE_H_
+
+#include "condition_variable_base.h"
+
+#include "common.h"
+#include "platform.h"
+
+#include "condition_variable_linux.h"
+
+namespace scudo {
+
+// A default implementation of default condition variable. It doesn't do a real
+// `wait`, instead it spins a short amount of time only.
+class ConditionVariableDummy
+ : public ConditionVariableBase<ConditionVariableDummy> {
+public:
+ void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
+
+ void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
+ M.unlock();
+
+ constexpr u32 SpinTimes = 64;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+
+ M.lock();
+ }
+};
+
+template <typename Config, typename = const bool>
+struct ConditionVariableState {
+ static constexpr bool enabled() { return false; }
+ // This is only used for compilation purpose so that we won't end up having
+ // many conditional compilations. If you want to use `ConditionVariableDummy`,
+ // define `ConditionVariableT` in your allocator configuration. See
+ // allocator_config.h for more details.
+ using ConditionVariableT = ConditionVariableDummy;
+};
+
+template <typename Config>
+struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
+ static constexpr bool enabled() { return true; }
+ using ConditionVariableT = typename Config::ConditionVariableT;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_H_
diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_base.h b/compiler-rt/lib/scudo/standalone/condition_variable_base.h
new file mode 100644
index 000000000000..416c327fed49
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/condition_variable_base.h
@@ -0,0 +1,56 @@
+//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
+#define SCUDO_CONDITION_VARIABLE_BASE_H_
+
+#include "mutex.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+template <typename Derived> class ConditionVariableBase {
+public:
+ constexpr ConditionVariableBase() = default;
+
+ void bindTestOnly(HybridMutex &Mutex) {
+#if SCUDO_DEBUG
+ boundMutex = &Mutex;
+#else
+ (void)Mutex;
+#endif
+ }
+
+ void notifyAll(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->notifyAllImpl(M);
+ }
+
+ void wait(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->waitImpl(M);
+ }
+
+protected:
+ Derived *getDerived() { return static_cast<Derived *>(this); }
+
+#if SCUDO_DEBUG
+ // Because thread-safety analysis doesn't support pointer aliasing, we are not
+ // able to mark the proper annotations without false positive. Instead, we
+ // pass the lock and do the same-lock check separately.
+ HybridMutex *boundMutex = nullptr;
+#endif
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp b/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
new file mode 100644
index 000000000000..e6d9bd1771a4
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
@@ -0,0 +1,52 @@
+//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "condition_variable_linux.h"
+
+#include "atomic_helpers.h"
+
+#include <limits.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+namespace scudo {
+
+void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter);
+ atomic_store_relaxed(&Counter, V + 1);
+
+ // TODO(chiahungduan): Move the waiters from the futex waiting queue
+ // `Counter` to futex waiting queue `M` so that the awoken threads won't be
+ // blocked again due to locked `M` by current thread.
+ if (LastNotifyAll != V) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
+ INT_MAX, nullptr, nullptr, 0);
+ }
+
+ LastNotifyAll = V + 1;
+}
+
+void ConditionVariableLinux::waitImpl(HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter) + 1;
+ atomic_store_relaxed(&Counter, V);
+
+ // TODO: Use ScopedUnlock when it's supported.
+ M.unlock();
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
+ nullptr, nullptr, 0);
+ M.lock();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_linux.h b/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
new file mode 100644
index 000000000000..cd073287326d
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
@@ -0,0 +1,38 @@
+//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
+#define SCUDO_CONDITION_VARIABLE_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "atomic_helpers.h"
+#include "condition_variable_base.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+class ConditionVariableLinux
+ : public ConditionVariableBase<ConditionVariableLinux> {
+public:
+ void notifyAllImpl(HybridMutex &M) REQUIRES(M);
+
+ void waitImpl(HybridMutex &M) REQUIRES(M);
+
+private:
+ u32 LastNotifyAll = 0;
+ atomic_u32 Counter = {};
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
diff --git a/compiler-rt/lib/scudo/standalone/flags.cpp b/compiler-rt/lib/scudo/standalone/flags.cpp
index de5153b288b1..f498edfbd326 100644
--- a/compiler-rt/lib/scudo/standalone/flags.cpp
+++ b/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -68,6 +68,9 @@ void initFlags() {
Parser.parseString(getCompileDefinitionScudoDefaultOptions());
Parser.parseString(getScudoDefaultOptions());
Parser.parseString(getEnv("SCUDO_OPTIONS"));
+ if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
+ Parser.parseStringPair("allocation_ring_buffer_size", V);
+ }
}
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/flags.inc b/compiler-rt/lib/scudo/standalone/flags.inc
index c1f153bafdd9..f5a2bab5057a 100644
--- a/compiler-rt/lib/scudo/standalone/flags.inc
+++ b/compiler-rt/lib/scudo/standalone/flags.inc
@@ -46,14 +46,6 @@ SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
"Interval (in milliseconds) at which to attempt release of unused "
"memory to the OS. Negative values disable the feature.")
-SCUDO_FLAG(int, hard_rss_limit_mb, 0,
- "Hard RSS Limit in Mb. If non-zero, once the limit is achieved, "
- "abort the process")
-
-SCUDO_FLAG(int, soft_rss_limit_mb, 0,
- "Soft RSS Limit in Mb. If non-zero, once the limit is reached, all "
- "subsequent calls will fail or return NULL until the RSS goes below "
- "the soft limit")
-
SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
- "Entries to keep in the allocation ring buffer for scudo.")
+ "Entries to keep in the allocation ring buffer for scudo. "
+ "Values less or equal to zero disable the buffer.")
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
index be39fcd4f887..6f9b23ea90e2 100644
--- a/compiler-rt/lib/scudo/standalone/flags_parser.cpp
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.cpp
@@ -10,6 +10,7 @@
#include "common.h"
#include "report.h"
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
@@ -80,7 +81,7 @@ void FlagParser::parseFlag() {
++Pos;
Value = Buffer + ValueStart;
}
- if (!runHandler(Name, Value))
+ if (!runHandler(Name, Value, '='))
reportError("flag parsing failed.");
}
@@ -122,10 +123,16 @@ inline bool parseBool(const char *Value, bool *b) {
return false;
}
-bool FlagParser::runHandler(const char *Name, const char *Value) {
+void FlagParser::parseStringPair(const char *Name, const char *Value) {
+ if (!runHandler(Name, Value, '\0'))
+ reportError("flag parsing failed.");
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value,
+ const char Sep) {
for (u32 I = 0; I < NumberOfFlags; ++I) {
const uptr Len = strlen(Flags[I].Name);
- if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
continue;
bool Ok = false;
switch (Flags[I].Type) {
@@ -136,8 +143,15 @@ bool FlagParser::runHandler(const char *Name, const char *Value) {
break;
case FlagType::FT_int:
char *ValueEnd;
- *reinterpret_cast<int *>(Flags[I].Var) =
- static_cast<int>(strtol(Value, &ValueEnd, 10));
+ long V = strtol(Value, &ValueEnd, 10);
+ // strtol returns LONG_MAX on overflow and LONG_MIN on underflow.
+ // This is why we compare-equal here (and lose INT_MIN and INT_MAX as a
+ // value, but that's okay)
+ if (V >= INT_MAX || V <= INT_MIN) {
+ reportInvalidFlag("int", Value);
+ return false;
+ }
+ *reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
Ok =
*ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
if (!Ok)
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.h b/compiler-rt/lib/scudo/standalone/flags_parser.h
index ba832adbd909..ded496fda3b9 100644
--- a/compiler-rt/lib/scudo/standalone/flags_parser.h
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -27,6 +27,7 @@ public:
void *Var);
void parseString(const char *S);
void printFlagDescriptions();
+ void parseStringPair(const char *Name, const char *Value);
private:
static const u32 MaxFlags = 20;
@@ -45,7 +46,7 @@ private:
void skipWhitespace();
void parseFlags();
void parseFlag();
- bool runHandler(const char *Name, const char *Value);
+ bool runHandler(const char *Name, const char *Value, char Sep);
};
void reportUnrecognizedFlags();
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 74456450a476..5b01ebe11c09 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -46,14 +46,11 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
}
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
- // RingBuffer is too short.
- if (!AllocatorT::setRingBufferSizeForBuffer(RingBufferBytes.data(),
- RingBufferBytes.size()))
- return 0;
scudo_error_info ErrorInfo;
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
- RegionInfo.data(), RingBufferBytes.data(), Memory,
- MemoryTags, MemoryAddr, MemorySize);
+ RegionInfo.data(), RingBufferBytes.data(),
+ RingBufferBytes.size(), Memory, MemoryTags,
+ MemoryAddr, MemorySize);
return 0;
}
diff --git a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
index 6c0c521f8d82..a2dedea910cc 100644
--- a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
+++ b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -17,10 +17,22 @@ extern "C" {
__attribute__((weak)) const char *__scudo_default_options(void);
// Post-allocation & pre-deallocation hooks.
-// They must be thread-safe and not use heap related functions.
__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
+// `realloc` involves both deallocation and allocation but they are not reported
+// atomically. In one specific case which may keep taking a snapshot right in
+// the middle of `realloc` reporting the deallocation and allocation, it may
+// confuse the user by missing memory from `realloc`. To alleviate that case,
+// define the two `realloc` hooks to get the knowledge of the bundled hook
+// calls. These hooks are optional and should only be used when a hooks user
+// wants to track reallocs more closely.
+//
+// See more details in the comment of `realloc` in wrapper_c.inc.
+__attribute__((weak)) void
+__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
+__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
+
void __scudo_print_stats(void);
typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
@@ -73,7 +85,8 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
// pointer.
void __scudo_get_error_info(struct scudo_error_info *error_info,
uintptr_t fault_addr, const char *stack_depot,
- const char *region_info, const char *ring_buffer,
+ size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size,
const char *memory, const char *memory_tags,
uintptr_t memory_addr, size_t memory_size);
diff --git a/compiler-rt/lib/scudo/standalone/linux.cpp b/compiler-rt/lib/scudo/standalone/linux.cpp
index e285d8a3d2d2..274695108109 100644
--- a/compiler-rt/lib/scudo/standalone/linux.cpp
+++ b/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -14,6 +14,7 @@
#include "internal_defs.h"
#include "linux.h"
#include "mutex.h"
+#include "report_linux.h"
#include "string_utils.h"
#include <errno.h>
@@ -43,6 +44,7 @@ uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
void NORETURN die() { abort(); }
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
@@ -65,7 +67,7 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
if (P == MAP_FAILED) {
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
- dieOnMapUnmapError(errno == ENOMEM ? Size : 0);
+ reportMapError(errno == ENOMEM ? Size : 0);
return nullptr;
}
#if SCUDO_ANDROID
@@ -75,19 +77,22 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
return P;
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {
if (munmap(Addr, Size) != 0)
- dieOnMapUnmapError();
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
UNUSED MapPlatformData *Data) {
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
- dieOnMapUnmapError();
+ reportProtectError(Addr, Size, Prot);
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
UNUSED MapPlatformData *Data) {
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
@@ -104,12 +109,14 @@ enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
}
bool HybridMutex::tryLock() {
- return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+ return atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire) == Unlocked;
}
// The following is based on https://akkadia.org/drepper/futex.pdf.
void HybridMutex::lockSlow() {
- u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire);
if (V == Unlocked)
return;
if (V != Sleeping)
@@ -197,39 +204,6 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
extern "C" WEAK int async_safe_write_log(int pri, const char *tag,
const char *msg);
-static uptr GetRSSFromBuffer(const char *Buf) {
- // The format of the file is:
- // 1084 89 69 11 0 79 0
- // We need the second number which is RSS in pages.
- const char *Pos = Buf;
- // Skip the first number.
- while (*Pos >= '0' && *Pos <= '9')
- Pos++;
- // Skip whitespaces.
- while (!(*Pos >= '0' && *Pos <= '9') && *Pos != 0)
- Pos++;
- // Read the number.
- u64 Rss = 0;
- for (; *Pos >= '0' && *Pos <= '9'; Pos++)
- Rss = Rss * 10 + static_cast<u64>(*Pos) - '0';
- return static_cast<uptr>(Rss * getPageSizeCached());
-}
-
-uptr GetRSS() {
- // TODO: We currently use sanitizer_common's GetRSS which reads the
- // RSS from /proc/self/statm by default. We might want to
- // call getrusage directly, even if it's less accurate.
- auto Fd = open("/proc/self/statm", O_RDONLY);
- char Buf[64];
- s64 Len = read(Fd, Buf, sizeof(Buf) - 1);
- close(Fd);
- if (Len <= 0)
- return 0;
- Buf[Len] = 0;
-
- return GetRSSFromBuffer(Buf);
-}
-
void outputRaw(const char *Buffer) {
if (&async_safe_write_log) {
constexpr s32 AndroidLogInfo = 4;
diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h
index 1095eb5f186d..46d6affdc033 100644
--- a/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -22,80 +22,13 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
- struct TransferBatch {
- static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
- void setFromArray(CompactPtrT *Array, u16 N) {
- DCHECK_LE(N, MaxNumCached);
- Count = N;
- memcpy(Batch, Array, sizeof(Batch[0]) * Count);
- }
- void appendFromArray(CompactPtrT *Array, u16 N) {
- DCHECK_LE(N, MaxNumCached - Count);
- memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
- // u16 will be promoted to int by arithmetic type conversion.
- Count = static_cast<u16>(Count + N);
- }
- void appendFromTransferBatch(TransferBatch *B, u16 N) {
- DCHECK_LE(N, MaxNumCached - Count);
- DCHECK_GE(B->Count, N);
- // Append from the back of `B`.
- memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
- // u16 will be promoted to int by arithmetic type conversion.
- Count = static_cast<u16>(Count + N);
- B->Count = static_cast<u16>(B->Count - N);
- }
- void clear() { Count = 0; }
- void add(CompactPtrT P) {
- DCHECK_LT(Count, MaxNumCached);
- Batch[Count++] = P;
- }
- void copyToArray(CompactPtrT *Array) const {
- memcpy(Array, Batch, sizeof(Batch[0]) * Count);
- }
- u16 getCount() const { return Count; }
- bool isEmpty() const { return Count == 0U; }
- CompactPtrT get(u16 I) const {
- DCHECK_LE(I, Count);
- return Batch[I];
- }
- static u16 getMaxCached(uptr Size) {
- return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
- }
- TransferBatch *Next;
-
- private:
- CompactPtrT Batch[MaxNumCached];
- u16 Count;
- };
-
- // A BatchGroup is used to collect blocks. Each group has a group id to
- // identify the group kind of contained blocks.
- struct BatchGroup {
- // `Next` is used by IntrusiveList.
- BatchGroup *Next;
- // The compact base address of each group
- uptr CompactPtrGroupBase;
- // Cache value of TransferBatch::getMaxCached()
- u16 MaxCachedPerBatch;
- // Number of blocks pushed into this group. This is an increment-only
- // counter.
- uptr PushedBlocks;
- // This is used to track how many bytes are not in-use since last time we
- // tried to release pages.
- uptr BytesInBGAtLastCheckpoint;
- // Blocks are managed by TransferBatch in a list.
- SinglyLinkedList<TransferBatch> Batches;
- };
-
- static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
- "BatchGroup uses the same class size as TransferBatch");
-
void init(GlobalStats *S, SizeClassAllocator *A) {
DCHECK(isEmpty());
Stats.init();
if (LIKELY(S))
S->link(&Stats);
Allocator = A;
+ initCache();
}
void destroy(GlobalStats *S) {
@@ -108,7 +41,9 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
DCHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
if (C->Count == 0) {
- if (UNLIKELY(!refill(C, ClassId)))
+ // Refill half of the number of max cached.
+ DCHECK_GT(C->MaxCount / 2, 0U);
+ if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
return nullptr;
DCHECK_GT(C->Count, 0);
}
@@ -125,9 +60,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
bool deallocate(uptr ClassId, void *P) {
CHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
- // We still have to initialize the cache in the event that the first heap
- // operation in a thread is a deallocation.
- initCacheMaybe(C);
// If the cache is full, drain half of blocks back to the main allocator.
const bool NeedToDrainCache = C->Count == C->MaxCount;
@@ -151,7 +83,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}
void drain() {
- // Drain BatchClassId last as createBatch can refill it.
+ // Drain BatchClassId last as it may be needed while draining normal blocks.
for (uptr I = 0; I < NumClasses; ++I) {
if (I == BatchClassId)
continue;
@@ -163,19 +95,11 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
DCHECK(isEmpty());
}
- TransferBatch *createBatch(uptr ClassId, void *B) {
- if (ClassId != BatchClassId)
- B = allocate(BatchClassId);
+ void *getBatchClassBlock() {
+ void *B = allocate(BatchClassId);
if (UNLIKELY(!B))
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
- return reinterpret_cast<TransferBatch *>(B);
- }
-
- BatchGroup *createGroup() {
- void *Ptr = allocate(BatchClassId);
- if (UNLIKELY(!Ptr))
- reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
- return reinterpret_cast<BatchGroup *>(Ptr);
+ return B;
}
LocalStats &getStats() { return Stats; }
@@ -203,6 +127,11 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Str->append(" No block is cached.\n");
}
+ static u16 getMaxCached(uptr Size) {
+ return Min(SizeClassMap::MaxNumCachedHint,
+ SizeClassMap::getMaxCachedHint(Size));
+ }
+
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
@@ -211,24 +140,17 @@ private:
u16 MaxCount;
// Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
- CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
+ CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
};
PerClass PerClassArray[NumClasses] = {};
LocalStats Stats;
SizeClassAllocator *Allocator = nullptr;
- ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
- if (LIKELY(C->MaxCount))
- return;
- initCache();
- DCHECK_NE(C->MaxCount, 0U);
- }
-
NOINLINE void initCache() {
for (uptr I = 0; I < NumClasses; I++) {
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
- P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
+ P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
if (I != BatchClassId) {
P->ClassSize = Size;
} else {
@@ -244,17 +166,12 @@ private:
deallocate(BatchClassId, B);
}
- NOINLINE bool refill(PerClass *C, uptr ClassId) {
- initCacheMaybe(C);
- TransferBatch *B = Allocator->popBatch(this, ClassId);
- if (UNLIKELY(!B))
- return false;
- DCHECK_GT(B->getCount(), 0);
- C->Count = B->getCount();
- B->copyToArray(C->Chunks);
- B->clear();
- destroyBatch(ClassId, B);
- return true;
+ NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
+ const u16 NumBlocksRefilled =
+ Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
+ DCHECK_LE(NumBlocksRefilled, MaxRefill);
+ C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
+ return NumBlocksRefilled != 0;
}
NOINLINE void drain(PerClass *C, uptr ClassId) {
diff --git a/compiler-rt/lib/scudo/standalone/mem_map.h b/compiler-rt/lib/scudo/standalone/mem_map.h
index 409e4dbbe04b..b92216cf271d 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map.h
@@ -22,6 +22,7 @@
#include "trusty.h"
#include "mem_map_fuchsia.h"
+#include "mem_map_linux.h"
namespace scudo {
@@ -73,10 +74,10 @@ private:
};
#if SCUDO_LINUX
-using ReservedMemoryT = ReservedMemoryDefault;
+using ReservedMemoryT = ReservedMemoryLinux;
using MemMapT = ReservedMemoryT::MemMapT;
#elif SCUDO_FUCHSIA
-using ReservedMemoryT = ReservedMemoryDefault;
+using ReservedMemoryT = ReservedMemoryFuchsia;
using MemMapT = ReservedMemoryT::MemMapT;
#elif SCUDO_TRUSTY
using ReservedMemoryT = ReservedMemoryDefault;
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
index 9ace1fef7ad4..0566ab065526 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -41,7 +41,7 @@ static void setVmoName(zx_handle_t Vmo, const char *Name) {
static uptr getRootVmarBase() {
static atomic_uptr CachedResult = {0};
- uptr Result = atomic_load_relaxed(&CachedResult);
+ uptr Result = atomic_load(&CachedResult, memory_order_acquire);
if (UNLIKELY(!Result)) {
zx_info_vmar_t VmarInfo;
zx_status_t Status =
@@ -50,7 +50,7 @@ static uptr getRootVmarBase() {
CHECK_EQ(Status, ZX_OK);
CHECK_NE(VmarInfo.base, 0);
- atomic_store_relaxed(&CachedResult, VmarInfo.base);
+ atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
Result = VmarInfo.base;
}
@@ -61,7 +61,7 @@ static uptr getRootVmarBase() {
static zx_handle_t getPlaceholderVmo() {
static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
- zx_handle_t Vmo = atomic_load_relaxed(&StoredVmo);
+ zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
// Create a zero-sized placeholder VMO.
zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
@@ -72,9 +72,9 @@ static zx_handle_t getPlaceholderVmo() {
// Atomically store its handle. If some other thread wins the race, use its
// handle and discard ours.
- zx_handle_t OldValue =
- atomic_compare_exchange(&StoredVmo, ZX_HANDLE_INVALID, Vmo);
- if (OldValue != ZX_HANDLE_INVALID) {
+ zx_handle_t OldValue = atomic_compare_exchange_strong(
+ &StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
+ if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
Status = _zx_handle_close(Vmo);
CHECK_EQ(Status, ZX_OK);
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
new file mode 100644
index 000000000000..783c4f0d9ab0
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
@@ -0,0 +1,153 @@
+//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "mem_map_linux.h"
+
+#include "common.h"
+#include "internal_defs.h"
+#include "linux.h"
+#include "mutex.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+// TODO(chiahungduan): Review if we still need the followings macros.
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+#if defined(__aarch64__)
+#ifndef PROT_MTE
+#define PROT_MTE 0x20
+#endif
+ if (Flags & MAP_MEMTAG)
+ MmapProt |= PROT_MTE;
+#endif
+ if (Addr)
+ MmapFlags |= MAP_FIXED;
+ void *P =
+ mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(errno == ENOMEM ? Size : 0);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (Name)
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#else
+ (void)Name;
+#endif
+
+ return P;
+}
+
+bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (P == nullptr)
+ return false;
+
+ MapBase = reinterpret_cast<uptr>(P);
+ MapCapacity = Size;
+ return true;
+}
+
+void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
+ // If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
+ // status.
+ if (Size == MapCapacity) {
+ MapBase = MapCapacity = 0;
+ } else {
+ // This is partial unmap and is unmapping the pages from the beginning,
+ // shift `MapBase` to the new base.
+ if (MapBase == Addr)
+ MapBase = Addr + Size;
+ MapCapacity -= Size;
+ }
+
+ if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
+ reportUnmapError(Addr, Size);
+}
+
+bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (reinterpret_cast<uptr>(P) != Addr)
+ reportMapError();
+ return true;
+}
+
+void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
+ if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
+ reportProtectError(Addr, Size, Prot);
+}
+
+void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ void *Addr = reinterpret_cast<void *>(From);
+
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ ReservedMemoryLinux::MemMapT MemMap;
+ if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
+ return false;
+
+ MapBase = MemMap.getBase();
+ MapCapacity = MemMap.getCapacity();
+
+ return true;
+}
+
+void ReservedMemoryLinux::releaseImpl() {
+ if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
+ reportUnmapError(getBase(), getCapacity());
+}
+
+ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryLinux::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_linux.h b/compiler-rt/lib/scudo/standalone/mem_map_linux.h
new file mode 100644
index 000000000000..7a89b3bff5ed
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/mem_map_linux.h
@@ -0,0 +1,67 @@
+//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_LINUX_H_
+#define SCUDO_MEM_MAP_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "mem_map_base.h"
+
+namespace scudo {
+
+class MemMapLinux final : public MemMapBase<MemMapLinux> {
+public:
+ constexpr MemMapLinux() = default;
+ MemMapLinux(uptr Base, uptr Capacity)
+ : MapBase(Base), MapCapacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryLinux final
+ : public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
+public:
+ // The following two are the Impls for function in `MemMapBase`.
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+ // These threes are specific to `ReservedMemory`.
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_MEM_MAP_LINUX_H_
diff --git a/compiler-rt/lib/scudo/standalone/mutex.h b/compiler-rt/lib/scudo/standalone/mutex.h
index 05340de3e12d..4caa945219b5 100644
--- a/compiler-rt/lib/scudo/standalone/mutex.h
+++ b/compiler-rt/lib/scudo/standalone/mutex.h
@@ -35,7 +35,7 @@ public:
#pragma nounroll
#endif
for (u8 I = 0U; I < NumberOfTries; I++) {
- yieldProcessor(NumberOfYields);
+ delayLoop();
if (tryLock())
return;
}
@@ -53,10 +53,23 @@ public:
}
private:
+ void delayLoop() {
+ // The value comes from the average time spent in accessing caches (which
+ // are the fastest operations) so that we are unlikely to wait too long for
+ // fast operations.
+ constexpr u32 SpinTimes = 16;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+ }
+
void assertHeldImpl();
- static constexpr u8 NumberOfTries = 8U;
- static constexpr u8 NumberOfYields = 8U;
+ // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
+ // secondary allocator have different allocation times.
+ static constexpr u8 NumberOfTries = 32U;
#if SCUDO_LINUX
atomic_u32 M = {};
diff --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h
index 4e6786513334..b20142a41590 100644
--- a/compiler-rt/lib/scudo/standalone/options.h
+++ b/compiler-rt/lib/scudo/standalone/options.h
@@ -38,7 +38,7 @@ struct Options {
}
};
-template <typename Config> bool useMemoryTagging(Options Options) {
+template <typename Config> bool useMemoryTagging(const Options &Options) {
return allocatorSupportsMemoryTagging<Config>() &&
Options.get(OptionBit::UseMemoryTagging);
}
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index 7c7024ff570e..b71a86be7669 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -63,6 +63,20 @@
#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
#endif
+// Use smaller table sizes for fuzzing in order to reduce input size.
+// Trusty just has less available memory.
+#ifndef SCUDO_SMALL_STACK_DEPOT
+#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
+#define SCUDO_SMALL_STACK_DEPOT 1
+#else
+#define SCUDO_SMALL_STACK_DEPOT 0
+#endif
+#endif
+
+#ifndef SCUDO_ENABLE_HOOKS
+#define SCUDO_ENABLE_HOOKS 0
+#endif
+
#ifndef SCUDO_MIN_ALIGNMENT_LOG
// We force malloc-type functions to be aligned to std::max_align_t, but there
// is no reason why the minimum alignment for all other functions can't be 8
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 1d8a34ec65d6..8281e02ba164 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -9,6 +9,7 @@
#ifndef SCUDO_PRIMARY32_H_
#define SCUDO_PRIMARY32_H_
+#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
#include "list.h"
@@ -53,12 +54,15 @@ public:
"");
typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
- typedef typename CacheT::TransferBatch TransferBatch;
- typedef typename CacheT::BatchGroup BatchGroup;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? sizeof(TransferBatch)
+ ? sizeof(TransferBatchT)
: SizeClassMap::getSizeByClassId(ClassId);
}
@@ -126,7 +130,7 @@ public:
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L1(Sci->Mutex);
uptr TotalBlocks = 0;
- for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
// `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
BatchClassUsedInFreeLists += BG.Batches.size() + 1;
for (const auto &It : BG.Batches)
@@ -141,7 +145,7 @@ public:
SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
ScopedLock L1(Sci->Mutex);
uptr TotalBlocks = 0;
- for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
if (LIKELY(!BG.Batches.empty())) {
for (const auto &It : BG.Batches)
TotalBlocks += It.getCount();
@@ -187,11 +191,30 @@ public:
return BlockSize > PageSize;
}
- TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
+ // count. Now it's the same as the number of blocks stored in the
+ // `TransferBatch`.
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ UNUSED const u16 MaxBlockCount) {
+ TransferBatchT *B = popBatch(C, ClassId);
+ if (!B)
+ return 0;
+
+ const u16 Count = B->getCount();
+ DCHECK_GT(Count, 0U);
+ B->moveToArray(ToArray);
+
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ return Count;
+ }
+
+ TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatch *B = popBatchImpl(C, ClassId, Sci);
+ TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
if (UNLIKELY(!B)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return nullptr;
@@ -311,6 +334,18 @@ public:
}
}
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getSizeClassFragmentationInfo(Sci, I, Str);
+ }
+ }
+
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
const s32 Interval = Max(Min(static_cast<s32>(Value),
@@ -369,7 +404,7 @@ private:
};
struct BlocksInfo {
- SinglyLinkedList<BatchGroup> BlockList = {};
+ SinglyLinkedList<BatchGroupT> BlockList = {};
uptr PoppedBlocks = 0;
uptr PushedBlocks = 0;
};
@@ -493,11 +528,11 @@ private:
// reusable and don't need additional space for them.
Sci->FreeListInfo.PushedBlocks += Size;
- BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
if (BG == nullptr) {
// Construct `BatchGroup` on the last element.
- BG = reinterpret_cast<BatchGroup *>(
+ BG = reinterpret_cast<BatchGroupT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
--Size;
BG->Batches.clear();
@@ -508,8 +543,8 @@ private:
// from `CreateGroup` in `pushBlocksImpl`
BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
- getSizeByClassId(SizeClassMap::BatchClassId));
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
Sci->FreeListInfo.BlockList.push_front(BG);
}
@@ -522,7 +557,7 @@ private:
// 2. Only 1 block is pushed when the freelist is empty.
if (BG->Batches.empty()) {
// Construct the `TransferBatch` on the last element.
- TransferBatch *TB = reinterpret_cast<TransferBatch *>(
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
TB->clear();
// As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
@@ -537,14 +572,14 @@ private:
BG->Batches.push_front(TB);
}
- TransferBatch *CurBatch = BG->Batches.front();
+ TransferBatchT *CurBatch = BG->Batches.front();
DCHECK_NE(CurBatch, nullptr);
for (u32 I = 0; I < Size;) {
u16 UnusedSlots =
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
if (UnusedSlots == 0) {
- CurBatch = reinterpret_cast<TransferBatch *>(
+ CurBatch = reinterpret_cast<TransferBatchT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[I]));
CurBatch->clear();
// Self-contained
@@ -588,24 +623,25 @@ private:
DCHECK_GT(Size, 0U);
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
- BatchGroup *BG = C->createGroup();
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
BG->Batches.clear();
- TransferBatch *TB = C->createBatch(ClassId, nullptr);
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
TB->clear();
BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch =
- TransferBatch::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
return BG;
};
- auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
- SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
- TransferBatch *CurBatch = Batches.front();
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
DCHECK_NE(CurBatch, nullptr);
for (u32 I = 0; I < Size;) {
@@ -613,9 +649,8 @@ private:
u16 UnusedSlots =
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
if (UnusedSlots == 0) {
- CurBatch = C->createBatch(
- ClassId,
- reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
CurBatch->clear();
Batches.push_front(CurBatch);
UnusedSlots = BG->MaxCachedPerBatch;
@@ -630,11 +665,11 @@ private:
};
Sci->FreeListInfo.PushedBlocks += Size;
- BatchGroup *Cur = Sci->FreeListInfo.BlockList.front();
+ BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
// In the following, `Cur` always points to the BatchGroup for blocks that
// will be pushed next. `Prev` is the element right before `Cur`.
- BatchGroup *Prev = nullptr;
+ BatchGroupT *Prev = nullptr;
while (Cur != nullptr &&
compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
@@ -695,22 +730,22 @@ private:
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
REQUIRES(Sci->Mutex) {
if (Sci->FreeListInfo.BlockList.empty())
return nullptr;
- SinglyLinkedList<TransferBatch> &Batches =
+ SinglyLinkedList<TransferBatchT> &Batches =
Sci->FreeListInfo.BlockList.front()->Batches;
if (Batches.empty()) {
DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
- BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
Sci->FreeListInfo.BlockList.pop_front();
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
- TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
TB->clear();
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
@@ -718,13 +753,13 @@ private:
return TB;
}
- TransferBatch *B = Batches.front();
+ TransferBatchT *B = Batches.front();
Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
if (Batches.empty()) {
- BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
Sci->FreeListInfo.BlockList.pop_front();
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
@@ -763,7 +798,7 @@ private:
}
const uptr Size = getSizeByClassId(ClassId);
- const u16 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
DCHECK_GT(MaxCount, 0U);
// The maximum number of blocks we should carve in the region is dictated
// by the maximum number of batches we want to fill, and the amount of
@@ -776,7 +811,7 @@ private:
DCHECK_GT(NumberOfBlocks, 0U);
constexpr u32 ShuffleArraySize =
- MaxNumBatches * TransferBatch::MaxNumCached;
+ MaxNumBatches * TransferBatchT::MaxNumCached;
// Fill the transfer batches and put them in the size-class freelist. We
// need to randomize the blocks for security purposes, so we first fill a
// local array that we then shuffle before populating the batches.
@@ -856,11 +891,56 @@ private:
PushedBytesDelta >> 10);
}
+ void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
+ ScopedString *Str) REQUIRES(Sci->Mutex) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+
+ FragmentationRecorder Recorder;
+ if (!Sci->FreeListInfo.BlockList.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
+ ReleaseToOS::ForceAll);
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ }
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ uptr AllocatedPagesCount = 0;
+ if (TotalBlocks != 0U) {
+ for (uptr I = 0; I < NumberOfRegions; ++I) {
+ if (SkipRegion(I))
+ continue;
+ AllocatedPagesCount += RegionSize / PageSize;
+ }
+
+ DCHECK_NE(AllocatedPagesCount, 0U);
+ }
+
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10);
+ }
+
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
ReleaseToOS ReleaseType = ReleaseToOS::Normal)
REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
- const uptr PageSize = getPageSizeCached();
DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
const uptr BytesInFreeList =
@@ -871,6 +951,60 @@ private:
if (UNLIKELY(BytesInFreeList == 0))
return 0;
+ // ====================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ====================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
+ }
+
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ DCHECK_NE(Last, 0U);
+ DCHECK_LE(First, Last);
+ uptr TotalReleasedBytes = 0;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+
+ // ==================================================================== //
+ // 2. Mark the free blocks and we can tell which pages are in-use by
+ // querying `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
+ NumberOfRegions, ReleaseType);
+ if (!Context.hasBlockMarked())
+ return 0;
+
+ // ==================================================================== //
+ // 3. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ ReleaseRecorder Recorder(Base);
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
+ }
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+
+ return TotalReleasedBytes;
+ }
+
+ bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
+ const uptr PageSize = getPageSizeCached();
+
if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
@@ -892,22 +1026,20 @@ private:
// (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
const uptr PushedBytesDelta =
BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
- if (PushedBytesDelta < PageSize && ReleaseType != ReleaseToOS::ForceAll)
- return 0;
+ if (PushedBytesDelta < PageSize)
+ return false;
- const bool CheckDensity =
- isSmallBlock(BlockSize) && ReleaseType != ReleaseToOS::ForceAll;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
- if (CheckDensity && ReleaseType == ReleaseToOS::Normal)
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
if (PushedBytesDelta < Sci->AllocatedUser / 16U)
- return 0;
+ return false;
if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
- return 0;
+ return false;
// The constant 8 here is selected from profiling some apps and the number
// of unreleased pages in the large size classes is around 16 pages or
@@ -920,30 +1052,31 @@ private:
static_cast<u64>(IntervalMs) * 1000000 >
getMonotonicTimeFast()) {
// Memory was returned recently.
- return 0;
+ return false;
}
}
} // if (ReleaseType == ReleaseToOS::Normal)
- const uptr First = Sci->MinRegionIndex;
- const uptr Last = Sci->MaxRegionIndex;
- DCHECK_NE(Last, 0U);
- DCHECK_LE(First, Last);
- uptr TotalReleasedBytes = 0;
- const uptr Base = First * RegionSize;
- const uptr NumberOfRegions = Last - First + 1U;
- const uptr GroupSize = (1U << GroupSizeLog);
+ return true;
+ }
+
+ PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
+ const uptr BlockSize, const uptr Base,
+ const uptr NumberOfRegions,
+ ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1UL << GroupSizeLog);
const uptr CurGroupBase =
compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
- ReleaseRecorder Recorder(Base);
PageReleaseContext Context(BlockSize, NumberOfRegions,
/*ReleaseSize=*/RegionSize);
auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
- for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
// The `GroupSize` may not be divided by `BlockSize`, which means there is
// an unused space at the end of Region. Exclude that space to avoid
@@ -960,25 +1093,27 @@ private:
BG.Batches.front()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;
- if (ReleaseType != ReleaseToOS::ForceAll &&
- BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
- BG.BytesInBGAtLastCheckpoint = BytesInBG;
- continue;
- }
- const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
- if (ReleaseType != ReleaseToOS::ForceAll && PushedBytesDelta < PageSize)
- continue;
+ if (ReleaseType != ReleaseToOS::ForceAll) {
+ if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+ continue;
+ }
- // Given the randomness property, we try to release the pages only if the
- // bytes used by free blocks exceed certain proportion of allocated
- // spaces.
- if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
- (100U - 1U - BlockSize / 16U)) {
- continue;
+ const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize)
+ continue;
+
+ // Given the randomness property, we try to release the pages only if
+ // the bytes used by free blocks exceed certain proportion of allocated
+ // spaces.
+ if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
+ (100U - 1U - BlockSize / 16U)) {
+ continue;
+ }
}
// TODO: Consider updating this after page release if `ReleaseRecorder`
- // can tell the releasd bytes in each group.
+ // can tell the released bytes in each group.
BG.BytesInBGAtLastCheckpoint = BytesInBG;
const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
@@ -1006,27 +1141,10 @@ private:
// We may not be able to do the page release In a rare case that we may
// fail on PageMap allocation.
if (UNLIKELY(!Context.hasBlockMarked()))
- return 0;
- }
-
- if (!Context.hasBlockMarked())
- return 0;
-
- auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
- ScopedLock L(ByteMapMutex);
- return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
- };
- releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-
- if (Recorder.getReleasedRangesCount() > 0) {
- Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
- Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
- TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
+ break;
}
- Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
- return TotalReleasedBytes;
+ return Context;
}
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index fd7a1f9e80cd..d1929ff7212f 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -9,6 +9,7 @@
#ifndef SCUDO_PRIMARY64_H_
#define SCUDO_PRIMARY64_H_
+#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
#include "list.h"
@@ -21,6 +22,8 @@
#include "string_utils.h"
#include "thread_annotations.h"
+#include "condition_variable.h"
+
namespace scudo {
// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
@@ -47,27 +50,39 @@ template <typename Config> class SizeClassAllocator64 {
public:
typedef typename Config::Primary::CompactPtrT CompactPtrT;
typedef typename Config::Primary::SizeClassMap SizeClassMap;
+ typedef typename ConditionVariableState<
+ typename Config::Primary>::ConditionVariableT ConditionVariableT;
static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
+ static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
+ static_assert(RegionSizeLog >= GroupSizeLog,
+ "Group size shouldn't be greater than the region size");
static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
- typedef typename CacheT::TransferBatch TransferBatch;
- typedef typename CacheT::BatchGroup BatchGroup;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? roundUp(sizeof(TransferBatch), 1U << CompactPtrScale)
+ ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+ static bool conditionVariableEnabled() {
+ return ConditionVariableState<typename Config::Primary>::enabled();
+ }
+
void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
const uptr PageSize = getPageSizeCached();
- const uptr GroupSize = (1U << GroupSizeLog);
+ const uptr GroupSize = (1UL << GroupSizeLog);
const uptr PagesInGroup = GroupSize / PageSize;
const uptr MinSizeClass = getSizeByClassId(1);
// When trying to release pages back to memory, visiting smaller size
@@ -117,13 +132,13 @@ public:
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
+
// The actual start of a region is offset by a random number of pages
// when PrimaryEnableRandomOffset is set.
- Region->RegionBeg =
- (PrimaryBase + (I << Config::Primary::RegionSizeLog)) +
- (Config::Primary::EnableRandomOffset
- ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
- : 0);
+ Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
+ (Config::Primary::EnableRandomOffset
+ ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
+ : 0);
Region->RandState = getRandomU32(&Seed);
// Releasing small blocks is expensive, set a higher threshold to avoid
// frequent page releases.
@@ -134,11 +149,16 @@ public:
Region->ReleaseInfo.LastReleaseAtNs = Time;
Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
- PrimaryBase + (I << Config::Primary::RegionSizeLog), RegionSize);
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
CHECK(Region->MemMapInfo.MemMap.isAllocated());
}
shuffle(RegionInfoArray, NumClasses, &Seed);
+ // The binding should be done after region shuffling so that it won't bind
+ // the FLLock from the wrong region.
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
+
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
@@ -165,7 +185,7 @@ public:
ScopedLock FL(Region->FLLock);
const uptr BlockSize = getSizeByClassId(I);
uptr TotalBlocks = 0;
- for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
// `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
BatchClassUsedInFreeLists += BG.Batches.size() + 1;
for (const auto &It : BG.Batches)
@@ -182,7 +202,7 @@ public:
ScopedLock FL(Region->FLLock);
const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
uptr TotalBlocks = 0;
- for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
if (LIKELY(!BG.Batches.empty())) {
for (const auto &It : BG.Batches)
TotalBlocks += It.getCount();
@@ -201,51 +221,64 @@ public:
DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
}
- TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
+ // count. Now it's the same as the number of blocks stored in the
+ // `TransferBatch`.
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ UNUSED const u16 MaxBlockCount) {
+ TransferBatchT *B = popBatch(C, ClassId);
+ if (!B)
+ return 0;
+
+ const u16 Count = B->getCount();
+ DCHECK_GT(Count, 0U);
+ B->moveToArray(ToArray);
+
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ return Count;
+ }
+
+ TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
{
ScopedLock L(Region->FLLock);
- TransferBatch *B = popBatchImpl(C, ClassId, Region);
+ TransferBatchT *B = popBatchImpl(C, ClassId, Region);
if (LIKELY(B))
return B;
}
- bool PrintStats = false;
- TransferBatch *B = nullptr;
+ bool ReportRegionExhausted = false;
+ TransferBatchT *B = nullptr;
- while (true) {
- // When two threads compete for `Region->MMLock`, we only want one of them
- // does the populateFreeListAndPopBatch(). To avoid both of them doing
- // that, always check the freelist before mapping new pages.
- //
- // TODO(chiahungduan): Use a condition variable so that we don't need to
- // hold `Region->MMLock` here.
- ScopedLock ML(Region->MMLock);
- {
- ScopedLock FL(Region->FLLock);
- B = popBatchImpl(C, ClassId, Region);
- if (LIKELY(B))
- return B;
- }
+ if (conditionVariableEnabled()) {
+ B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
+ } else {
+ while (true) {
+ // When two threads compete for `Region->MMLock`, we only want one of
+ // them to call populateFreeListAndPopBatch(). To avoid both of them
+ // doing that, always check the freelist before mapping new pages.
+ ScopedLock ML(Region->MMLock);
+ {
+ ScopedLock FL(Region->FLLock);
+ if ((B = popBatchImpl(C, ClassId, Region)))
+ break;
+ }
- const bool RegionIsExhausted = Region->Exhausted;
- if (!RegionIsExhausted)
- B = populateFreeListAndPopBatch(C, ClassId, Region);
- PrintStats = !RegionIsExhausted && Region->Exhausted;
- break;
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted)
+ B = populateFreeListAndPopBatch(C, ClassId, Region);
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+ break;
+ }
}
- // Note that `getStats()` requires locking each region so we can't call it
- // while locking the Region->Mutex in the above.
- if (UNLIKELY(PrintStats)) {
- ScopedString Str;
- getStats(&Str);
- Str.append(
- "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
- RegionSize >> 20, getSizeByClassId(ClassId));
- Str.output();
+ if (UNLIKELY(ReportRegionExhausted)) {
+ Printf("Can't populate more pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
// Theoretically, BatchClass shouldn't be used up. Abort immediately when
// it happens.
@@ -265,30 +298,36 @@ public:
if (ClassId == SizeClassMap::BatchClassId) {
ScopedLock L(Region->FLLock);
pushBatchClassBlocks(Region, Array, Size);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
return;
}
// TODO(chiahungduan): Consider not doing grouping if the group size is not
// greater than the block size with a certain scale.
- // Sort the blocks so that blocks belonging to the same group can be pushed
- // together.
bool SameGroup = true;
- for (u32 I = 1; I < Size; ++I) {
- if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
- SameGroup = false;
- CompactPtrT Cur = Array[I];
- u32 J = I;
- while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
- Array[J] = Array[J - 1];
- --J;
+ if (GroupSizeLog < RegionSizeLog) {
+ // Sort the blocks so that blocks belonging to the same group can be
+ // pushed together.
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
+ SameGroup = false;
+ CompactPtrT Cur = Array[I];
+ u32 J = I;
+ while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
+ Array[J] = Array[J - 1];
+ --J;
+ }
+ Array[J] = Cur;
}
- Array[J] = Cur;
}
{
ScopedLock L(Region->FLLock);
pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
}
}
@@ -363,6 +402,18 @@ public:
}
}
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->MMLock);
+ getRegionFragmentationInfo(Region, I, Str);
+ }
+ }
+
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
const s32 Interval = Max(Min(static_cast<s32>(Value),
@@ -477,7 +528,7 @@ public:
AtomicOptions Options;
private:
- static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
+ static const uptr RegionSize = 1UL << RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;
@@ -493,7 +544,7 @@ private:
};
struct BlocksInfo {
- SinglyLinkedList<BatchGroup> BlockList = {};
+ SinglyLinkedList<BatchGroupT> BlockList = {};
uptr PoppedBlocks = 0;
uptr PushedBlocks = 0;
};
@@ -509,6 +560,7 @@ private:
struct UnpaddedRegionInfo {
// Mutex for operations on freelist
HybridMutex FLLock;
+ ConditionVariableT FLLockCV GUARDED_BY(FLLock);
// Mutex for memmap operations
HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
// `RegionBeg` is initialized before thread creation and won't be changed.
@@ -520,6 +572,7 @@ private:
uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
bool Exhausted GUARDED_BY(MMLock) = false;
+ bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
@@ -605,11 +658,11 @@ private:
// reusable and don't need additional space for them.
Region->FreeListInfo.PushedBlocks += Size;
- BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
if (BG == nullptr) {
// Construct `BatchGroup` on the last element.
- BG = reinterpret_cast<BatchGroup *>(
+ BG = reinterpret_cast<BatchGroupT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
--Size;
BG->Batches.clear();
@@ -620,8 +673,8 @@ private:
// from `CreateGroup` in `pushBlocksImpl`
BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
- getSizeByClassId(SizeClassMap::BatchClassId));
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
Region->FreeListInfo.BlockList.push_front(BG);
}
@@ -634,7 +687,7 @@ private:
// 2. Only 1 block is pushed when the freelist is empty.
if (BG->Batches.empty()) {
// Construct the `TransferBatch` on the last element.
- TransferBatch *TB = reinterpret_cast<TransferBatch *>(
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
TB->clear();
// As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
@@ -649,14 +702,14 @@ private:
BG->Batches.push_front(TB);
}
- TransferBatch *CurBatch = BG->Batches.front();
+ TransferBatchT *CurBatch = BG->Batches.front();
DCHECK_NE(CurBatch, nullptr);
for (u32 I = 0; I < Size;) {
u16 UnusedSlots =
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
if (UnusedSlots == 0) {
- CurBatch = reinterpret_cast<TransferBatch *>(
+ CurBatch = reinterpret_cast<TransferBatchT *>(
decompactPtr(SizeClassMap::BatchClassId, Array[I]));
CurBatch->clear();
// Self-contained
@@ -699,24 +752,25 @@ private:
DCHECK_GT(Size, 0U);
auto CreateGroup = [&](uptr CompactPtrGroupBase) {
- BatchGroup *BG = C->createGroup();
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
BG->Batches.clear();
- TransferBatch *TB = C->createBatch(ClassId, nullptr);
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
TB->clear();
BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
- BG->MaxCachedPerBatch =
- TransferBatch::getMaxCached(getSizeByClassId(ClassId));
+ BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
return BG;
};
- auto InsertBlocks = [&](BatchGroup *BG, CompactPtrT *Array, u32 Size) {
- SinglyLinkedList<TransferBatch> &Batches = BG->Batches;
- TransferBatch *CurBatch = Batches.front();
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
DCHECK_NE(CurBatch, nullptr);
for (u32 I = 0; I < Size;) {
@@ -724,9 +778,8 @@ private:
u16 UnusedSlots =
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
if (UnusedSlots == 0) {
- CurBatch = C->createBatch(
- ClassId,
- reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
CurBatch->clear();
Batches.push_front(CurBatch);
UnusedSlots = BG->MaxCachedPerBatch;
@@ -741,21 +794,11 @@ private:
};
Region->FreeListInfo.PushedBlocks += Size;
- BatchGroup *Cur = Region->FreeListInfo.BlockList.front();
-
- if (ClassId == SizeClassMap::BatchClassId) {
- if (Cur == nullptr) {
- // Don't need to classify BatchClassId.
- Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
- Region->FreeListInfo.BlockList.push_front(Cur);
- }
- InsertBlocks(Cur, Array, Size);
- return;
- }
+ BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
// In the following, `Cur` always points to the BatchGroup for blocks that
// will be pushed next. `Prev` is the element right before `Cur`.
- BatchGroup *Prev = nullptr;
+ BatchGroupT *Prev = nullptr;
while (Cur != nullptr &&
compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
@@ -812,26 +855,96 @@ private:
InsertBlocks(Cur, Array + Size - Count, Count);
}
+ TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
+ bool &ReportRegionExhausted) {
+ TransferBatchT *B = nullptr;
+
+ while (true) {
+ // We only expect one thread doing the freelist refillment and other
+ // threads will be waiting for either the completion of the
+ // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
+ // threads.
+ bool PopulateFreeList = false;
+ {
+ ScopedLock FL(Region->FLLock);
+ if (!Region->isPopulatingFreeList) {
+ Region->isPopulatingFreeList = true;
+ PopulateFreeList = true;
+ }
+ }
+
+ if (PopulateFreeList) {
+ ScopedLock ML(Region->MMLock);
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted)
+ B = populateFreeListAndPopBatch(C, ClassId, Region);
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+
+ {
+ // Before reacquiring the `FLLock`, the freelist may be used up again
+ // and some threads are waiting for the freelist refillment by the
+ // current thread. It's important to set
+ // `Region->isPopulatingFreeList` to false so the threads about to
+ // sleep will notice the status change.
+ ScopedLock FL(Region->FLLock);
+ Region->isPopulatingFreeList = false;
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+
+ break;
+ }
+
+ // At here, there are two preconditions to be met before waiting,
+ // 1. The freelist is empty.
+ // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
+ // `populateFreeListAndPopBatch()`.
+ //
+ // Note that it has the chance that freelist is empty but
+ // Region->isPopulatingFreeList == false because all the new populated
+ // blocks were used up right after the refillment. Therefore, we have to
+ // check if someone is still populating the freelist.
+ ScopedLock FL(Region->FLLock);
+ if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ break;
+
+ if (!Region->isPopulatingFreeList)
+ continue;
+
+ // Now the freelist is empty and someone's doing the refillment. We will
+ // wait until anyone refills the freelist or someone finishes doing
+ // `populateFreeListAndPopBatch()`. The refillment can be done by
+ // `populateFreeListAndPopBatch()`, `pushBlocks()`,
+ // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
+ Region->FLLockCV.wait(Region->FLLock);
+
+ if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ break;
+ }
+
+ return B;
+ }
+
// Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
REQUIRES(Region->FLLock) {
if (Region->FreeListInfo.BlockList.empty())
return nullptr;
- SinglyLinkedList<TransferBatch> &Batches =
+ SinglyLinkedList<TransferBatchT> &Batches =
Region->FreeListInfo.BlockList.front()->Batches;
if (Batches.empty()) {
DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
- BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
Region->FreeListInfo.BlockList.pop_front();
// Block used by `BatchGroup` is from BatchClassId. Turn the block into
// `TransferBatch` with single block.
- TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
TB->clear();
TB->add(
compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
@@ -839,13 +952,13 @@ private:
return TB;
}
- TransferBatch *B = Batches.front();
+ TransferBatchT *B = Batches.front();
Batches.pop_front();
DCHECK_NE(B, nullptr);
DCHECK_GT(B->getCount(), 0U);
if (Batches.empty()) {
- BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
Region->FreeListInfo.BlockList.pop_front();
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
@@ -863,11 +976,11 @@ private:
}
// Refill the freelist and return one batch.
- NOINLINE TransferBatch *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
- RegionInfo *Region)
+ NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
+ RegionInfo *Region)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr Size = getSizeByClassId(ClassId);
- const u16 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
const uptr RegionBeg = Region->RegionBeg;
const uptr MappedUser = Region->MemMapInfo.MappedUser;
@@ -903,7 +1016,7 @@ private:
DCHECK_GT(NumberOfBlocks, 0);
constexpr u32 ShuffleArraySize =
- MaxNumBatches * TransferBatch::MaxNumCached;
+ MaxNumBatches * TransferBatchT::MaxNumCached;
CompactPtrT ShuffleArray[ShuffleArraySize];
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
@@ -936,7 +1049,7 @@ private:
pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
}
- TransferBatch *B = popBatchImpl(C, ClassId, Region);
+ TransferBatchT *B = popBatchImpl(C, ClassId, Region);
DCHECK_NE(B, nullptr);
// Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
@@ -957,10 +1070,10 @@ private:
if (Region->MemMapInfo.MappedUser == 0)
return;
const uptr BlockSize = getSizeByClassId(ClassId);
- const uptr InUse =
+ const uptr InUseBlocks =
Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
const uptr BytesInFreeList =
- Region->MemMapInfo.AllocatedUser - InUse * BlockSize;
+ Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
uptr RegionPushedBytesDelta = 0;
if (BytesInFreeList >=
Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
@@ -972,122 +1085,141 @@ private:
"%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
"inuse: %6zu total: %6zu releases: %6zu last "
"released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
- Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
- Region->FreeListInfo.PushedBlocks, InUse, TotalChunks,
+ Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
Region->ReleaseInfo.RangesReleased,
Region->ReleaseInfo.LastReleasedBytes >> 10,
RegionPushedBytesDelta >> 10, Region->RegionBeg,
getRegionBaseByClassId(ClassId));
}
- NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
- ReleaseToOS ReleaseType = ReleaseToOS::Normal)
- REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
- ScopedLock L(Region->FLLock);
-
+ void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
+ ScopedString *Str) REQUIRES(Region->MMLock) {
const uptr BlockSize = getSizeByClassId(ClassId);
- const uptr BytesInFreeList =
- Region->MemMapInfo.AllocatedUser - (Region->FreeListInfo.PoppedBlocks -
- Region->FreeListInfo.PushedBlocks) *
- BlockSize;
- if (UNLIKELY(BytesInFreeList == 0))
- return false;
-
const uptr AllocatedUserEnd =
Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
- const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
- // ====================================================================== //
- // 1. Check if we have enough free blocks and if it's worth doing a page
- // release.
- // ====================================================================== //
- if (ReleaseType != ReleaseToOS::ForceAll &&
- !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
- ReleaseType)) {
- return 0;
- }
-
- // ====================================================================== //
- // 2. Determine which groups can release the pages. Use a heuristic to
- // gather groups that are candidates for doing a release.
- // ====================================================================== //
- SinglyLinkedList<BatchGroup> GroupsToRelease;
- if (ReleaseType == ReleaseToOS::ForceAll) {
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+ {
+ ScopedLock L(Region->FLLock);
GroupsToRelease = Region->FreeListInfo.BlockList;
Region->FreeListInfo.BlockList.clear();
- } else {
- GroupsToRelease = collectGroupsToRelease(
- Region, BlockSize, AllocatedUserEnd, CompactPtrBase);
}
- if (GroupsToRelease.empty())
- return 0;
- // Ideally, we should use a class like `ScopedUnlock`. However, this form of
- // unlocking is not supported by the thread-safety analysis. See
- // https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
- // for more details.
- // Put it as local class so that we can mark the ctor/dtor with proper
- // annotations associated to the target lock. Note that accessing the
- // function variable in local class only works in thread-safety annotations.
- // TODO: Implement general `ScopedUnlock` when it's supported.
- class FLLockScopedUnlock {
- public:
- FLLockScopedUnlock(RegionInfo *Region) RELEASE(Region->FLLock)
- : R(Region) {
- R->FLLock.assertHeld();
- R->FLLock.unlock();
- }
- ~FLLockScopedUnlock() ACQUIRE(Region->FLLock) { R->FLLock.lock(); }
+ FragmentationRecorder Recorder;
+ if (!GroupsToRelease.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
- private:
- RegionInfo *R;
- };
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ }
- // Note that we have extracted the `GroupsToRelease` from region freelist.
- // It's safe to let pushBlocks()/popBatches() access the remaining region
- // freelist. In the steps 3 and 4, we will temporarily release the FLLock
- // and lock it again before step 5.
+ ScopedLock L(Region->FLLock);
+ const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr AllocatedPagesCount =
+ roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10);
+ }
+
+ NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ uptr BytesInFreeList;
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
- uptr ReleasedBytes = 0;
{
- FLLockScopedUnlock UL(Region);
+ ScopedLock L(Region->FLLock);
+
+ BytesInFreeList = Region->MemMapInfo.AllocatedUser -
+ (Region->FreeListInfo.PoppedBlocks -
+ Region->FreeListInfo.PushedBlocks) *
+ BlockSize;
+ if (UNLIKELY(BytesInFreeList == 0))
+ return false;
+
// ==================================================================== //
- // 3. Mark the free blocks in `GroupsToRelease` in the
- // `PageReleaseContext`. Then we can tell which pages are in-use by
- // querying `PageReleaseContext`.
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
// ==================================================================== //
- PageReleaseContext Context = markFreeBlocks(
- Region, BlockSize, AllocatedUserEnd, CompactPtrBase, GroupsToRelease);
- if (UNLIKELY(!Context.hasBlockMarked())) {
- ScopedLock L(Region->FLLock);
- mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
+ ReleaseType)) {
return 0;
}
// ==================================================================== //
- // 4. Release the unused physical pages back to the OS.
+ // 2. Determine which groups can release the pages. Use a heuristic to
+ // gather groups that are candidates for doing a release.
// ==================================================================== //
- RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
- Region->RegionBeg,
- Context.getReleaseOffset());
- auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
- releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
- if (Recorder.getReleasedRangesCount() > 0) {
- Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
- Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ if (ReleaseType == ReleaseToOS::ForceAll) {
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ } else {
+ GroupsToRelease =
+ collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId));
}
- Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
- ReleasedBytes = Recorder.getReleasedBytes();
+ if (GroupsToRelease.empty())
+ return 0;
}
+ // Note that we have extracted the `GroupsToRelease` from region freelist.
+ // It's safe to let pushBlocks()/popBatches() access the remaining region
+ // freelist. In the steps 3 and 4, we will temporarily release the FLLock
+ // and lock it again before step 5.
+
+ // ==================================================================== //
+ // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
+ // Then we can tell which pages are in-use by querying
+ // `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ if (UNLIKELY(!Context.hasBlockMarked())) {
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 4. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
+ Region->RegionBeg,
+ Context.getReleaseOffset());
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+
// ====================================================================== //
// 5. Merge the `GroupsToRelease` back to the freelist.
// ====================================================================== //
mergeGroupsToReleaseBack(Region, GroupsToRelease);
- return ReleasedBytes;
+ return Recorder.getReleasedBytes();
}
bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
@@ -1154,13 +1286,13 @@ private:
return true;
}
- SinglyLinkedList<BatchGroup>
+ SinglyLinkedList<BatchGroupT>
collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
const uptr AllocatedUserEnd, const uptr CompactPtrBase)
REQUIRES(Region->MMLock, Region->FLLock) {
- const uptr GroupSize = (1U << GroupSizeLog);
+ const uptr GroupSize = (1UL << GroupSizeLog);
const uptr PageSize = getPageSizeCached();
- SinglyLinkedList<BatchGroup> GroupsToRelease;
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
// We are examining each group and will take the minimum distance to the
// release threshold as the next Region::TryReleaseThreshold(). Note that if
@@ -1169,8 +1301,8 @@ private:
// the comment on `SmallerBlockReleasePageDelta` for more details.
uptr MinDistToThreshold = GroupSize;
- for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
- *Prev = nullptr;
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
BG != nullptr;) {
// Group boundary is always GroupSize-aligned from CompactPtr base. The
// layout of memory groups is like,
@@ -1254,7 +1386,7 @@ private:
}
}
- // If `BG` is the first BatchGroup in the list, we only need to advance
+ // If `BG` is the first BatchGroupT in the list, we only need to advance
// `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
// for `Prev`.
//
@@ -1290,7 +1422,7 @@ private:
// Note that we need to advance before pushing this BatchGroup to
// GroupsToRelease because it's a destructive operation.
- BatchGroup *Cur = BG;
+ BatchGroupT *Cur = BG;
BG = BG->Next;
// Ideally, we may want to update this only after successful release.
@@ -1323,9 +1455,9 @@ private:
PageReleaseContext
markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
const uptr AllocatedUserEnd, const uptr CompactPtrBase,
- SinglyLinkedList<BatchGroup> &GroupsToRelease)
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
- const uptr GroupSize = (1U << GroupSizeLog);
+ const uptr GroupSize = (1UL << GroupSizeLog);
auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
return decompactPtrInternal(CompactPtrBase, CompactPtr);
};
@@ -1352,7 +1484,7 @@ private:
if (UNLIKELY(!Context.ensurePageMapAllocated()))
return Context;
- for (BatchGroup &BG : GroupsToRelease) {
+ for (BatchGroupT &BG : GroupsToRelease) {
const uptr BatchGroupBase =
decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
@@ -1400,8 +1532,10 @@ private:
}
void mergeGroupsToReleaseBack(RegionInfo *Region,
- SinglyLinkedList<BatchGroup> &GroupsToRelease)
- REQUIRES(Region->MMLock, Region->FLLock) {
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ ScopedLock L(Region->FLLock);
+
// After merging two freelists, we may have redundant `BatchGroup`s that
// need to be recycled. The number of unused `BatchGroup`s is expected to be
// small. Pick a constant which is inferred from real programs.
@@ -1419,8 +1553,8 @@ private:
// Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
// that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
// sorted.
- for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
- *Prev = nullptr;
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
;) {
if (BG == nullptr || GroupsToRelease.empty()) {
if (!GroupsToRelease.empty())
@@ -1437,8 +1571,8 @@ private:
continue;
}
- BatchGroup *Cur = GroupsToRelease.front();
- TransferBatch *UnusedTransferBatch = nullptr;
+ BatchGroupT *Cur = GroupsToRelease.front();
+ TransferBatchT *UnusedTransferBatch = nullptr;
GroupsToRelease.pop_front();
if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
@@ -1454,7 +1588,7 @@ private:
if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
BG->Batches.append_back(&Cur->Batches);
} else {
- TransferBatch *NonFullBatch = Cur->Batches.front();
+ TransferBatchT *NonFullBatch = Cur->Batches.front();
Cur->Batches.pop_front();
const u16 NonFullBatchCount = NonFullBatch->getCount();
// The remaining Batches in `Cur` are full.
@@ -1481,6 +1615,8 @@ private:
if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
ScopedLock L(BatchClassRegion->FLLock);
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
Idx = 0;
}
Blocks[Idx++] =
@@ -1516,15 +1652,20 @@ private:
if (Idx != 0) {
ScopedLock L(BatchClassRegion->FLLock);
pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
}
if (SCUDO_DEBUG) {
- BatchGroup *Prev = Region->FreeListInfo.BlockList.front();
- for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
+ BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
+ for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
Prev = Cur, Cur = Cur->Next) {
CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
}
}
+
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
}
// TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
diff --git a/compiler-rt/lib/scudo/standalone/release.cpp b/compiler-rt/lib/scudo/standalone/release.cpp
index 938bb41faf69..875a2b0c1c57 100644
--- a/compiler-rt/lib/scudo/standalone/release.cpp
+++ b/compiler-rt/lib/scudo/standalone/release.cpp
@@ -10,7 +10,8 @@
namespace scudo {
-BufferPool<RegionPageMap::StaticBufferCount, RegionPageMap::StaticBufferSize>
+BufferPool<RegionPageMap::StaticBufferCount,
+ RegionPageMap::StaticBufferNumElements>
RegionPageMap::Buffers;
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 5bf963d0f26f..b6f76a4d2058 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -80,20 +80,53 @@ private:
MapPlatformData *Data = nullptr;
};
-// A buffer pool which holds a fixed number of static buffers for fast buffer
-// allocation. If the request size is greater than `StaticBufferSize`, it'll
+class FragmentationRecorder {
+public:
+ FragmentationRecorder() = default;
+
+ uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
+
+ void releasePageRangeToOS(uptr From, uptr To) {
+ DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
+ ReleasedPagesCount += (To - From) / getPageSizeCached();
+ }
+
+private:
+ uptr ReleasedPagesCount = 0;
+};
+
+// A buffer pool which holds a fixed number of static buffers of `uptr` elements
+// for fast buffer allocation. If the request size is greater than
+// `StaticBufferNumElements` or if all the static buffers are in use, it'll
// delegate the allocation to map().
-template <uptr StaticBufferCount, uptr StaticBufferSize> class BufferPool {
+template <uptr StaticBufferCount, uptr StaticBufferNumElements>
+class BufferPool {
public:
// Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
// extracting the least significant bit from the `Mask`.
static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
- static_assert(isAligned(StaticBufferSize, SCUDO_CACHE_LINE_SIZE), "");
+ static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
+ SCUDO_CACHE_LINE_SIZE),
+ "");
+
+ struct Buffer {
+ // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
+ uptr *Data = nullptr;
+
+ // The index of the underlying static buffer, or StaticBufferCount if this
+ // buffer was dynamically allocated. This value is initially set to a poison
+ // value to aid debugging.
+ uptr BufferIndex = ~static_cast<uptr>(0);
- // Return a buffer which is at least `BufferSize`.
- uptr *getBuffer(const uptr BufferSize) {
- if (UNLIKELY(BufferSize > StaticBufferSize))
- return getDynamicBuffer(BufferSize);
+ // Only valid if BufferIndex == StaticBufferCount.
+ MemMapT MemMap = {};
+ };
+
+ // Return a zero-initialized buffer which can contain at least the given
+ // number of elements, or nullptr on failure.
+ Buffer getBuffer(const uptr NumElements) {
+ if (UNLIKELY(NumElements > StaticBufferNumElements))
+ return getDynamicBuffer(NumElements);
uptr index;
{
@@ -108,69 +141,55 @@ public:
}
if (index >= StaticBufferCount)
- return getDynamicBuffer(BufferSize);
+ return getDynamicBuffer(NumElements);
- const uptr Offset = index * StaticBufferSize;
- memset(&RawBuffer[Offset], 0, StaticBufferSize);
- return &RawBuffer[Offset];
+ Buffer Buf;
+ Buf.Data = &RawBuffer[index * StaticBufferNumElements];
+ Buf.BufferIndex = index;
+ memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
+ return Buf;
}
- void releaseBuffer(uptr *Buffer, const uptr BufferSize) {
- const uptr index = getStaticBufferIndex(Buffer, BufferSize);
- if (index < StaticBufferCount) {
+ void releaseBuffer(Buffer Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ if (Buf.BufferIndex != StaticBufferCount) {
ScopedLock L(Mutex);
- DCHECK_EQ((Mask & (static_cast<uptr>(1) << index)), 0U);
- Mask |= static_cast<uptr>(1) << index;
+ DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
+ Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
} else {
- unmap(reinterpret_cast<void *>(Buffer),
- roundUp(BufferSize, getPageSizeCached()));
+ Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
}
}
- bool isStaticBufferTestOnly(uptr *Buffer, uptr BufferSize) {
- return getStaticBufferIndex(Buffer, BufferSize) < StaticBufferCount;
+ bool isStaticBufferTestOnly(const Buffer &Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ return Buf.BufferIndex != StaticBufferCount;
}
private:
- uptr getStaticBufferIndex(uptr *Buffer, uptr BufferSize) {
- if (UNLIKELY(BufferSize > StaticBufferSize))
- return StaticBufferCount;
-
- const uptr BufferBase = reinterpret_cast<uptr>(Buffer);
- const uptr RawBufferBase = reinterpret_cast<uptr>(RawBuffer);
-
- if (BufferBase < RawBufferBase ||
- BufferBase >= RawBufferBase + sizeof(RawBuffer)) {
- return StaticBufferCount;
- }
-
- DCHECK_LE(BufferSize, StaticBufferSize);
- DCHECK_LE(BufferBase + BufferSize, RawBufferBase + sizeof(RawBuffer));
- DCHECK_EQ((BufferBase - RawBufferBase) % StaticBufferSize, 0U);
-
- const uptr index =
- (BufferBase - RawBufferBase) / (StaticBufferSize * sizeof(uptr));
- DCHECK_LT(index, StaticBufferCount);
- return index;
- }
-
- uptr *getDynamicBuffer(const uptr BufferSize) {
+ Buffer getDynamicBuffer(const uptr NumElements) {
// When using a heap-based buffer, precommit the pages backing the
// Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
// where page fault exceptions are skipped as the allocated memory
// is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
// performance benefit on other platforms.
const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
- return reinterpret_cast<uptr *>(
- map(nullptr, roundUp(BufferSize, getPageSizeCached()), "scudo:counters",
- MmapFlags, &MapData));
+ const uptr MappedSize =
+ roundUp(NumElements * sizeof(uptr), getPageSizeCached());
+ Buffer Buf;
+ if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
+ Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
+ Buf.BufferIndex = StaticBufferCount;
+ }
+ return Buf;
}
HybridMutex Mutex;
// '1' means that buffer index is not used. '0' means the buffer is in use.
uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
- uptr RawBuffer[StaticBufferCount * StaticBufferSize] GUARDED_BY(Mutex);
- [[no_unique_address]] MapPlatformData MapData = {};
+ uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
};
// A Region page map is used to record the usage of pages in the regions. It
@@ -185,23 +204,17 @@ private:
class RegionPageMap {
public:
RegionPageMap()
- : Regions(0),
- NumCounters(0),
- CounterSizeBitsLog(0),
- CounterMask(0),
- PackingRatioLog(0),
- BitOffsetMask(0),
- SizePerRegion(0),
- BufferSize(0),
- Buffer(nullptr) {}
+ : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
+ PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
+ BufferNumElements(0) {}
RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
reset(NumberOfRegions, CountersPerRegion, MaxValue);
}
~RegionPageMap() {
if (!isAllocated())
return;
- Buffers.releaseBuffer(Buffer, BufferSize);
- Buffer = nullptr;
+ Buffers.releaseBuffer(Buffer);
+ Buffer = {};
}
// Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
@@ -216,7 +229,7 @@ public:
Regions = NumberOfRegion;
NumCounters = CountersPerRegion;
- constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+ constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
const uptr CounterSizeBits =
@@ -233,11 +246,11 @@ public:
SizePerRegion =
roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
PackingRatioLog;
- BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
- Buffer = Buffers.getBuffer(BufferSize);
+ BufferNumElements = SizePerRegion * Regions;
+ Buffer = Buffers.getBuffer(BufferNumElements);
}
- bool isAllocated() const { return !!Buffer; }
+ bool isAllocated() const { return Buffer.Data != nullptr; }
uptr getCount() const { return NumCounters; }
@@ -246,7 +259,8 @@ public:
DCHECK_LT(I, NumCounters);
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
- return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
+ return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
+ CounterMask;
}
void inc(uptr Region, uptr I) const {
@@ -255,8 +269,8 @@ public:
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
DCHECK_EQ(isAllCounted(Region, I), false);
- Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
- << BitOffset;
+ Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
+ << BitOffset;
}
void incN(uptr Region, uptr I, uptr N) const {
@@ -267,7 +281,7 @@ public:
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
DCHECK_EQ(isAllCounted(Region, I), false);
- Buffer[Region * SizePerRegion + Index] += N << BitOffset;
+ Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
}
void incRange(uptr Region, uptr From, uptr To) const {
@@ -286,7 +300,7 @@ public:
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
- Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
+ Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
}
void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
DCHECK_LE(From, To);
@@ -309,9 +323,16 @@ public:
return get(Region, I) == CounterMask;
}
- uptr getBufferSize() const { return BufferSize; }
+ uptr getBufferNumElements() const { return BufferNumElements; }
private:
+ // We may consider making this configurable if there are cases which may
+ // benefit from this.
+ static const uptr StaticBufferCount = 2U;
+ static const uptr StaticBufferNumElements = 512U;
+ using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
+ static BufferPoolT Buffers;
+
uptr Regions;
uptr NumCounters;
uptr CounterSizeBitsLog;
@@ -320,14 +341,8 @@ private:
uptr BitOffsetMask;
uptr SizePerRegion;
- uptr BufferSize;
- uptr *Buffer;
-
- // We may consider making this configurable if there are cases which may
- // benefit from this.
- static const uptr StaticBufferCount = 2U;
- static const uptr StaticBufferSize = 512U;
- static BufferPool<StaticBufferCount, StaticBufferSize> Buffers;
+ uptr BufferNumElements;
+ BufferPoolT::Buffer Buffer;
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
diff --git a/compiler-rt/lib/scudo/standalone/report.cpp b/compiler-rt/lib/scudo/standalone/report.cpp
index 81b3dce4e02c..9cef0adc0bb3 100644
--- a/compiler-rt/lib/scudo/standalone/report.cpp
+++ b/compiler-rt/lib/scudo/standalone/report.cpp
@@ -24,11 +24,7 @@ public:
Message.vappend(Format, Args);
va_end(Args);
}
- NORETURN ~ScopedErrorReport() {
- outputRaw(Message.data());
- setAbortMessage(Message.data());
- die();
- }
+ NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
private:
ScopedString Message;
@@ -36,18 +32,6 @@ private:
inline void NORETURN trap() { __builtin_trap(); }
-void NORETURN reportSoftRSSLimit(uptr RssLimitMb) {
- ScopedErrorReport Report;
- Report.append("Soft RSS limit of %zu MB exhausted, current RSS is %zu MB\n",
- RssLimitMb, GetRSS() >> 20);
-}
-
-void NORETURN reportHardRSSLimit(uptr RssLimitMb) {
- ScopedErrorReport Report;
- Report.append("Hard RSS limit of %zu MB exhausted, current RSS is %zu MB\n",
- RssLimitMb, GetRSS() >> 20);
-}
-
// This could potentially be called recursively if a CHECK fails in the reports.
void NORETURN reportCheckFailed(const char *File, int Line,
const char *Condition, u64 Value1, u64 Value2) {
@@ -67,6 +51,13 @@ void NORETURN reportError(const char *Message) {
Report.append("%s\n", Message);
}
+// Generic fatal error message without ScopedString.
+void NORETURN reportRawError(const char *Message) {
+ outputRaw(Message);
+ setAbortMessage(Message);
+ die();
+}
+
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
ScopedErrorReport Report;
Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
@@ -79,14 +70,6 @@ void NORETURN reportHeaderCorruption(void *Ptr) {
Report.append("corrupted chunk header at address %p\n", Ptr);
}
-// Two threads have attempted to modify a chunk header at the same time. This is
-// symptomatic of a race-condition in the application code, or general lack of
-// proper locking.
-void NORETURN reportHeaderRace(void *Ptr) {
- ScopedErrorReport Report;
- Report.append("race on chunk header at address %p\n", Ptr);
-}
-
// The allocator was compiled with parameters that conflict with field size
// requirements.
void NORETURN reportSanityCheckError(const char *Field) {
diff --git a/compiler-rt/lib/scudo/standalone/report.h b/compiler-rt/lib/scudo/standalone/report.h
index 3a78ab64b13f..a510fdaebb6d 100644
--- a/compiler-rt/lib/scudo/standalone/report.h
+++ b/compiler-rt/lib/scudo/standalone/report.h
@@ -15,15 +15,17 @@ namespace scudo {
// Reports are *fatal* unless stated otherwise.
-// Generic error.
+// Generic error, adds newline to end of message.
void NORETURN reportError(const char *Message);
+// Generic error, but the message is not modified.
+void NORETURN reportRawError(const char *Message);
+
// Flags related errors.
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
// Chunk header related errors.
void NORETURN reportHeaderCorruption(void *Ptr);
-void NORETURN reportHeaderRace(void *Ptr);
// Sanity checks related error.
void NORETURN reportSanityCheckError(const char *Field);
@@ -34,8 +36,6 @@ void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
uptr MaxSize);
void NORETURN reportOutOfBatchClass();
void NORETURN reportOutOfMemory(uptr RequestedSize);
-void NORETURN reportSoftRSSLimit(uptr RssLimitMb);
-void NORETURN reportHardRSSLimit(uptr RssLimitMb);
enum class AllocatorAction : u8 {
Recycling,
Deallocating,
diff --git a/compiler-rt/lib/scudo/standalone/report_linux.cpp b/compiler-rt/lib/scudo/standalone/report_linux.cpp
new file mode 100644
index 000000000000..6a983036e6cd
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/report_linux.cpp
@@ -0,0 +1,58 @@
+//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "common.h"
+#include "internal_defs.h"
+#include "report.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+// Fatal internal map() error (potentially OOM related).
+void NORETURN reportMapError(uptr SizeIfOOM) {
+ char Error[128] = "Scudo ERROR: internal map failure\n";
+ if (SizeIfOOM) {
+ formatString(
+ Error, sizeof(Error),
+ "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
+ SizeIfOOM >> 10);
+ }
+ reportRawError(Error);
+}
+
+void NORETURN reportUnmapError(uptr Addr, uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
+ "Size %zu\n",
+ strerror(errno), Addr, Size);
+ reportRawError(Error);
+}
+
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
+ char Error[128];
+ formatString(
+ Error, sizeof(Error),
+ "Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
+ "Size %zu Prot %x\n",
+ strerror(errno), Addr, Size, Prot);
+ reportRawError(Error);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
diff --git a/compiler-rt/lib/scudo/standalone/report_linux.h b/compiler-rt/lib/scudo/standalone/report_linux.h
new file mode 100644
index 000000000000..aa0bb247e672
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/report_linux.h
@@ -0,0 +1,34 @@
+//===-- report_linux.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_LINUX_H_
+#define SCUDO_REPORT_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Report a fatal error when a map call fails. SizeIfOOM shall
+// hold the requested size on an out-of-memory error, 0 otherwise.
+void NORETURN reportMapError(uptr SizeIfOOM = 0);
+
+// Report a fatal error when an unmap call fails.
+void NORETURN reportUnmapError(uptr Addr, uptr Size);
+
+// Report a fatal error when a mprotect call fails.
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
+
+#endif // SCUDO_REPORT_LINUX_H_
diff --git a/compiler-rt/lib/scudo/standalone/rss_limit_checker.cpp b/compiler-rt/lib/scudo/standalone/rss_limit_checker.cpp
deleted file mode 100644
index f428386b755c..000000000000
--- a/compiler-rt/lib/scudo/standalone/rss_limit_checker.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-//===-- common.cpp ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "rss_limit_checker.h"
-#include "atomic_helpers.h"
-#include "string_utils.h"
-
-namespace scudo {
-
-void RssLimitChecker::check(u64 NextCheck) {
- // The interval for the checks is 250ms.
- static constexpr u64 CheckInterval = 250 * 1000000;
-
- // Early return in case another thread already did the calculation.
- if (!atomic_compare_exchange_strong(&RssNextCheckAtNS, &NextCheck,
- getMonotonicTime() + CheckInterval,
- memory_order_relaxed)) {
- return;
- }
-
- const uptr CurrentRssMb = GetRSS() >> 20;
-
- RssLimitExceeded Result = RssLimitExceeded::Neither;
- if (UNLIKELY(HardRssLimitMb && HardRssLimitMb < CurrentRssMb))
- Result = RssLimitExceeded::Hard;
- else if (UNLIKELY(SoftRssLimitMb && SoftRssLimitMb < CurrentRssMb))
- Result = RssLimitExceeded::Soft;
-
- atomic_store_relaxed(&RssLimitStatus, static_cast<u8>(Result));
-}
-
-} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/rss_limit_checker.h b/compiler-rt/lib/scudo/standalone/rss_limit_checker.h
deleted file mode 100644
index 29dc063f3fc4..000000000000
--- a/compiler-rt/lib/scudo/standalone/rss_limit_checker.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//===-- common.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_RSS_LIMIT_CHECKER_H_
-#define SCUDO_RSS_LIMIT_CHECKER_H_
-
-#include "atomic_helpers.h"
-#include "common.h"
-#include "internal_defs.h"
-
-namespace scudo {
-
-class RssLimitChecker {
-public:
- enum RssLimitExceeded {
- Neither,
- Soft,
- Hard,
- };
-
- void init(int SoftRssLimitMb, int HardRssLimitMb) {
- CHECK_GE(SoftRssLimitMb, 0);
- CHECK_GE(HardRssLimitMb, 0);
- this->SoftRssLimitMb = static_cast<uptr>(SoftRssLimitMb);
- this->HardRssLimitMb = static_cast<uptr>(HardRssLimitMb);
- }
-
- // Opportunistic RSS limit check. This will update the RSS limit status, if
- // it can, every 250ms, otherwise it will just return the current one.
- RssLimitExceeded getRssLimitExceeded() {
- if (!HardRssLimitMb && !SoftRssLimitMb)
- return RssLimitExceeded::Neither;
-
- u64 NextCheck = atomic_load_relaxed(&RssNextCheckAtNS);
- u64 Now = getMonotonicTime();
-
- if (UNLIKELY(Now >= NextCheck))
- check(NextCheck);
-
- return static_cast<RssLimitExceeded>(atomic_load_relaxed(&RssLimitStatus));
- }
-
- uptr getSoftRssLimit() const { return SoftRssLimitMb; }
- uptr getHardRssLimit() const { return HardRssLimitMb; }
-
-private:
- void check(u64 NextCheck);
-
- uptr SoftRssLimitMb = 0;
- uptr HardRssLimitMb = 0;
-
- atomic_u64 RssNextCheckAtNS = {};
- atomic_u8 RssLimitStatus = {};
-};
-
-} // namespace scudo
-
-#endif // SCUDO_RSS_LIMIT_CHECKER_H_
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 105b154b5de2..8dc4c87fa7c6 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -72,11 +72,26 @@ static inline void unmap(LargeBlock::Header *H) {
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
+namespace {
+struct CachedBlock {
+ uptr CommitBase = 0;
+ uptr CommitSize = 0;
+ uptr BlockBegin = 0;
+ MemMapT MemMap = {};
+ u64 Time = 0;
+
+ bool isValid() { return CommitBase != 0; }
+
+ void invalidate() { CommitBase = 0; }
+};
+} // namespace
+
template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
- UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
+ UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
+ UNUSED bool *Zeroed) {
return false;
}
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
@@ -102,20 +117,22 @@ public:
static const uptr MaxUnusedCachePages = 4U;
template <typename Config>
-void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
+bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
uptr AllocPos, uptr Flags, MemMapT &MemMap) {
+ Flags |= MAP_RESIZABLE;
+ Flags |= MAP_ALLOWNOMEM;
+
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
- MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
- MAP_RESIZABLE | MAP_MEMTAG | Flags);
- MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
- "scudo:secondary", MAP_RESIZABLE | Flags);
+ return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+ MAP_MEMTAG | Flags) &&
+ MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+ "scudo:secondary", Flags);
} else {
const uptr RemapFlags =
- MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
- Flags;
- MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
+ (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
+ return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
}
}
@@ -138,17 +155,28 @@ public:
void getStats(ScopedString *Str) {
ScopedLock L(Mutex);
+ u32 Integral = 0;
+ u32 Fractional = 0;
+ if (CallsToRetrieve != 0) {
+ Integral = SuccessfulRetrieves * 100 / CallsToRetrieve;
+ Fractional = (((SuccessfulRetrieves * 100) % CallsToRetrieve) * 100 +
+ CallsToRetrieve / 2) /
+ CallsToRetrieve;
+ }
Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
"MaxEntriesCount: %u, MaxEntrySize: %zu\n",
EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
atomic_load_relaxed(&MaxEntrySize));
+ Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
+ "(%u.%02u%%)\n",
+ SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
for (CachedBlock Entry : Entries) {
- if (!Entry.CommitBase)
+ if (!Entry.isValid())
continue;
Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
- "BlockSize: %zu\n",
+ "BlockSize: %zu %s\n",
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
- Entry.CommitSize);
+ Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
}
}
@@ -166,7 +194,7 @@ public:
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
+ void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
if (!canCache(H->CommitSize))
return unmap(H);
@@ -195,7 +223,7 @@ public:
MAP_NOACCESS);
}
} else if (Interval == 0) {
- Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
do {
@@ -210,7 +238,7 @@ public:
if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
QuarantinePos =
(QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
- if (!Quarantine[QuarantinePos].CommitBase) {
+ if (!Quarantine[QuarantinePos].isValid()) {
Quarantine[QuarantinePos] = Entry;
return;
}
@@ -225,7 +253,7 @@ public:
EmptyCache = true;
} else {
for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].CommitBase)
+ if (Entries[I].isValid())
continue;
if (I != 0)
Entries[I] = Entries[0];
@@ -246,26 +274,31 @@ public:
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
- bool retrieve(Options Options, uptr Size, uptr Alignment,
+ bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ // 10% of the requested size proved to be the optimal choice for
+ // retrieving cached blocks after testing several options.
+ constexpr u32 FragmentedBytesDivisor = 10;
bool Found = false;
CachedBlock Entry;
- uptr HeaderPos = 0;
+ uptr EntryHeaderPos = 0;
{
ScopedLock L(Mutex);
+ CallsToRetrieve++;
if (EntriesCount == 0)
return false;
+ u32 OptimalFitIndex = 0;
+ uptr MinDiff = UINTPTR_MAX;
for (u32 I = 0; I < MaxCount; I++) {
- const uptr CommitBase = Entries[I].CommitBase;
- if (!CommitBase)
+ if (!Entries[I].isValid())
continue;
+ const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
roundDown(CommitBase + CommitSize - Size, Alignment);
- HeaderPos =
- AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ const uptr HeaderPos = AllocPos - HeadersSize;
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
@@ -273,17 +306,36 @@ public:
continue;
}
Found = true;
- Entry = Entries[I];
- Entries[I].CommitBase = 0;
+ const uptr Diff = HeaderPos - CommitBase;
+ // immediately use a cached block if it's size is close enough to the
+ // requested size.
+ const uptr MaxAllowedFragmentedBytes =
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
+ if (Diff <= MaxAllowedFragmentedBytes) {
+ OptimalFitIndex = I;
+ EntryHeaderPos = HeaderPos;
+ break;
+ }
+ // keep track of the smallest cached block
+ // that is greater than (AllocSize + HeaderSize)
+ if (Diff > MinDiff)
+ continue;
+ OptimalFitIndex = I;
+ MinDiff = Diff;
+ EntryHeaderPos = HeaderPos;
+ }
+ if (Found) {
+ Entry = Entries[OptimalFitIndex];
+ Entries[OptimalFitIndex].invalidate();
EntriesCount--;
- break;
+ SuccessfulRetrieves++;
}
}
if (!Found)
return false;
*H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(HeaderPos));
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
*Zeroed = Entry.Time == 0;
if (useMemoryTagging<Config>(Options))
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
@@ -295,8 +347,7 @@ public:
} else if (Entry.BlockBegin < NewBlockBegin) {
storeTags(Entry.BlockBegin, NewBlockBegin);
} else {
- storeTags(untagPointer(NewBlockBegin),
- untagPointer(Entry.BlockBegin));
+ storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
}
}
(*H)->CommitBase = Entry.CommitBase;
@@ -338,15 +389,15 @@ public:
void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
- if (Quarantine[I].CommitBase) {
+ if (Quarantine[I].isValid()) {
MemMapT &MemMap = Quarantine[I].MemMap;
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
- Quarantine[I].CommitBase = 0;
+ Quarantine[I].invalidate();
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].CommitBase) {
+ if (Entries[I].isValid()) {
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
Entries[I].CommitSize, 0);
}
@@ -367,10 +418,10 @@ private:
{
ScopedLock L(Mutex);
for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
- if (!Entries[I].CommitBase)
+ if (!Entries[I].isValid())
continue;
MapInfo[N] = Entries[I].MemMap;
- Entries[I].CommitBase = 0;
+ Entries[I].invalidate();
N++;
}
EntriesCount = 0;
@@ -382,23 +433,15 @@ private:
}
}
- struct CachedBlock {
- uptr CommitBase = 0;
- uptr CommitSize = 0;
- uptr BlockBegin = 0;
- MemMapT MemMap = {};
- u64 Time = 0;
- };
-
void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
- if (!Entry.CommitBase || !Entry.Time)
+ if (!Entry.isValid() || !Entry.Time)
return;
if (Entry.Time > Time) {
if (OldestTime == 0 || Entry.Time < OldestTime)
OldestTime = Entry.Time;
return;
}
- Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
@@ -421,6 +464,8 @@ private:
u64 OldestTime GUARDED_BY(Mutex) = 0;
u32 IsFullEvents GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
+ u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
+ u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
@@ -439,11 +484,11 @@ public:
S->link(&Stats);
}
- void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
+ void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
uptr *BlockEnd = nullptr,
FillContentsMode FillContents = NoFill);
- void deallocate(Options Options, void *Ptr);
+ void deallocate(const Options &Options, void *Ptr);
static uptr getBlockEnd(void *Ptr) {
auto *B = LargeBlock::getHeader<Config>(Ptr);
@@ -454,6 +499,10 @@ public:
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
}
+ static constexpr uptr getHeadersSize() {
+ return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
+ }
+
void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
Cache.disable();
@@ -494,6 +543,7 @@ private:
DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
uptr FreedBytes GUARDED_BY(Mutex) = 0;
+ uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
uptr LargestSize GUARDED_BY(Mutex) = 0;
u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
@@ -512,24 +562,23 @@ private:
// the committed memory will amount to something close to Size - AlignmentHint
// (pending rounding and headers).
template <typename Config>
-void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
- uptr *BlockEndPtr,
+void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
+ uptr Alignment, uptr *BlockEndPtr,
FillContentsMode FillContents) {
if (Options.get(OptionBit::AddLargeAllocationSlack))
Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
const uptr PageSize = getPageSizeCached();
- uptr RoundedSize =
- roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
- Chunk::getHeaderSize(),
- PageSize);
- if (Alignment > PageSize)
- RoundedSize += Alignment - PageSize;
- if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
+ // Note that cached blocks may have aligned address already. Thus we simply
+ // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
+ const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
+
+ if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
LargeBlock::Header *H;
bool Zeroed;
- if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
+ if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
+ &Zeroed)) {
const uptr BlockEnd = H->CommitBase + H->CommitSize;
if (BlockEndPtr)
*BlockEndPtr = BlockEnd;
@@ -545,6 +594,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
AllocatedBytes += H->CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
NumberOfAllocs++;
Stats.add(StatAllocated, H->CommitSize);
Stats.add(StatMapped, H->MemMap.getCapacity());
@@ -553,16 +603,22 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
}
}
+ uptr RoundedSize =
+ roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
+ if (Alignment > PageSize)
+ RoundedSize += Alignment - PageSize;
+
ReservedMemoryT ReservedMemory;
const uptr MapSize = RoundedSize + 2 * PageSize;
- ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
+ MAP_ALLOWNOMEM))) {
+ return nullptr;
+ }
// Take the entire ownership of reserved region.
MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
ReservedMemory.getCapacity());
uptr MapBase = MemMap.getBase();
- if (UNLIKELY(!MapBase))
- return nullptr;
uptr CommitBase = MapBase + PageSize;
uptr MapEnd = MapBase + MapSize;
@@ -592,9 +648,12 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
const uptr CommitSize = MapEnd - PageSize - CommitBase;
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
- mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
- const uptr HeaderPos =
- AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
+ MemMap)) {
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ return nullptr;
+ }
+ const uptr HeaderPos = AllocPos - getHeadersSize();
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
if (useMemoryTagging<Config>(Options))
@@ -609,6 +668,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
AllocatedBytes += CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
if (LargestSize < CommitSize)
LargestSize = CommitSize;
NumberOfAllocs++;
@@ -619,7 +679,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
}
template <typename Config>
-void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
+void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
EXCLUDES(Mutex) {
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
const uptr CommitSize = H->CommitSize;
@@ -627,6 +687,7 @@ void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
ScopedLock L(Mutex);
InUseBlocks.remove(H);
FreedBytes += CommitSize;
+ FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
NumberOfFrees++;
Stats.sub(StatAllocated, CommitSize);
Stats.sub(StatMapped, H->MemMap.getCapacity());
@@ -638,10 +699,11 @@ template <typename Config>
void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
- "(%zuK), remains %u (%zuK) max %zuM\n",
+ "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
- (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
+ FragmentedBytes >> 10);
Cache.getStats(Str);
}
diff --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
index 2a6e298f9366..4138885de338 100644
--- a/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -254,7 +254,7 @@ struct AndroidSizeClassConfig {
static const u16 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 13;
- static constexpr u32 Classes[] = {
+ static constexpr uptr Classes[] = {
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
@@ -269,7 +269,7 @@ struct AndroidSizeClassConfig {
static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 13;
- static constexpr u32 Classes[] = {
+ static constexpr uptr Classes[] = {
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
@@ -289,28 +289,6 @@ typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
#endif
-struct SvelteSizeClassConfig {
-#if SCUDO_WORDSIZE == 64U
- static const uptr NumBits = 4;
- static const uptr MinSizeLog = 4;
- static const uptr MidSizeLog = 8;
- static const uptr MaxSizeLog = 14;
- static const u16 MaxNumCachedHint = 13;
- static const uptr MaxBytesCachedLog = 10;
- static const uptr SizeDelta = Chunk::getHeaderSize();
-#else
- static const uptr NumBits = 4;
- static const uptr MinSizeLog = 3;
- static const uptr MidSizeLog = 7;
- static const uptr MaxSizeLog = 14;
- static const u16 MaxNumCachedHint = 14;
- static const uptr MaxBytesCachedLog = 10;
- static const uptr SizeDelta = Chunk::getHeaderSize();
-#endif
-};
-
-typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
-
struct TrustySizeClassConfig {
static const uptr NumBits = 1;
static const uptr MinSizeLog = 5;
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index 458198fcb7aa..12c35eb2a4f3 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -62,8 +62,7 @@ class StackDepot {
// This is achieved by re-checking the hash of the stack trace before
// returning the trace.
-#ifdef SCUDO_FUZZ
- // Use smaller table sizes for fuzzing in order to reduce input size.
+#if SCUDO_SMALL_STACK_DEPOT
static const uptr TabBits = 4;
#else
static const uptr TabBits = 16;
@@ -72,7 +71,7 @@ class StackDepot {
static const uptr TabMask = TabSize - 1;
atomic_u32 Tab[TabSize] = {};
-#ifdef SCUDO_FUZZ
+#if SCUDO_SMALL_STACK_DEPOT
static const uptr RingBits = 4;
#else
static const uptr RingBits = 19;
diff --git a/compiler-rt/lib/scudo/standalone/trusty.cpp b/compiler-rt/lib/scudo/standalone/trusty.cpp
index 3191091e1b96..26b349c6e506 100644
--- a/compiler-rt/lib/scudo/standalone/trusty.cpp
+++ b/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -12,6 +12,7 @@
#include "common.h"
#include "mutex.h"
+#include "report_linux.h"
#include "trusty.h"
#include <errno.h> // for errno
@@ -50,7 +51,8 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
if (IS_ERR(P)) {
errno = lk_err_to_errno(PTR_ERR(P));
- dieOnMapUnmapError(Size);
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(Size);
return nullptr;
}
@@ -60,7 +62,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {
if (_trusty_munmap(Addr, Size) != 0)
- dieOnMapUnmapError();
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
}
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
index f4fa545de5e0..b2108a01900b 100644
--- a/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -53,8 +53,14 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
- void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
- Instance->commitBack(this);
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ // As the comments attached to `getCache()`, the TSD doesn't always need to be
+ // locked. In that case, we would only skip the check before we have all TSDs
+ // locked in all paths.
+ void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
+ if (SCUDO_DEBUG && !BypassCheck)
+ Mutex.assertHeld();
}
// Ideally, we may want to assert that all the operations on
@@ -66,11 +72,8 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
// TSD doesn't always require holding the lock. Add this assertion while the
// lock is always acquired.
- typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
- return Cache;
- }
- typename Allocator::QuarantineCacheT &getQuarantineCache()
- ASSERT_CAPABILITY(Mutex) {
+ typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
+ typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
return QuarantineCache;
}
diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h
index dcb0948ad78f..1bca578ee14b 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -120,6 +120,11 @@ struct TSDRegistrySharedT {
TSDsArraySize);
for (uptr I = 0; I < NumberOfTSDs; ++I) {
TSDs[I].lock();
+ // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
+ // thread annotations. However, given the TSD is only locked on shared
+ // path, do the assertion in a separate path to avoid confusing the
+ // analyzer.
+ TSDs[I].assertLocked(/*BypassCheck=*/true);
Str->append(" Shared TSD[%zu]:\n", I);
TSDs[I].getCache().getStats(Str);
TSDs[I].unlock();
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index 9f2c200958fe..c0f1ba0eddfa 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -9,26 +9,20 @@
#ifndef SCUDO_VECTOR_H_
#define SCUDO_VECTOR_H_
-#include "common.h"
+#include "mem_map.h"
#include <string.h>
namespace scudo {
-// A low-level vector based on map. May incur a significant memory overhead for
-// small vectors. The current implementation supports only POD types.
+// A low-level vector based on map. It stores the contents inline up to a fixed
+// capacity, or in an external memory buffer if it grows bigger than that. May
+// incur a significant memory overhead for small vectors. The current
+// implementation supports only POD types.
+//
+// NOTE: This class is not meant to be used directly, use Vector<T> instead.
template <typename T> class VectorNoCtor {
public:
- constexpr void init(uptr InitialCapacity = 0) {
- Data = &LocalData[0];
- CapacityBytes = sizeof(LocalData);
- if (InitialCapacity > capacity())
- reserve(InitialCapacity);
- }
- void destroy() {
- if (Data != &LocalData[0])
- unmap(Data, CapacityBytes, 0, &MapData);
- }
T &operator[](uptr I) {
DCHECK_LT(I, Size);
return Data[I];
@@ -78,24 +72,43 @@ public:
const T *end() const { return data() + size(); }
T *end() { return data() + size(); }
+protected:
+ constexpr void init(uptr InitialCapacity = 0) {
+ Data = &LocalData[0];
+ CapacityBytes = sizeof(LocalData);
+ if (InitialCapacity > capacity())
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data != &LocalData[0])
+ ExternalBuffer.unmap(ExternalBuffer.getBase(),
+ ExternalBuffer.getCapacity());
+ }
+
private:
void reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
+
+ MemMapT NewExternalBuffer;
NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
- T *NewData = reinterpret_cast<T *>(
- map(nullptr, NewCapacity, "scudo:vector", 0, &MapData));
- memcpy(NewData, Data, Size * sizeof(T));
+ NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
+ T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
+
+ memcpy(NewExternalData, Data, Size * sizeof(T));
destroy();
- Data = NewData;
+
+ Data = NewExternalData;
CapacityBytes = NewCapacity;
+ ExternalBuffer = NewExternalBuffer;
}
T *Data = nullptr;
- T LocalData[256 / sizeof(T)] = {};
uptr CapacityBytes = 0;
uptr Size = 0;
- [[no_unique_address]] MapPlatformData MapData = {};
+
+ T LocalData[256 / sizeof(T)] = {};
+ MemMapT ExternalBuffer;
};
template <typename T> class Vector : public VectorNoCtor<T> {
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
index b4d51be716cc..60014a0f66bf 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -12,6 +12,9 @@
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 2c8e382dba0b..56d8ef20156e 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -17,6 +17,35 @@
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
+ DCHECK_NE(new_ptr, nullptr);
+
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_allocate_hook)
+ __scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
+ else if (__scudo_allocate_hook)
+ __scudo_allocate_hook(new_ptr, size);
+ }
+}
+static void reportReallocDeallocation(void *old_ptr) {
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_deallocate_hook)
+ __scudo_realloc_deallocate_hook(old_ptr);
+ else if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(old_ptr);
+ }
+}
+
extern "C" {
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
@@ -28,11 +57,14 @@ INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
}
scudo::reportCallocOverflow(nmemb, size);
}
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT, true);
+ reportAllocation(Ptr, Product);
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
}
@@ -75,8 +107,10 @@ INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
#endif
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
}
#if SCUDO_ANDROID
@@ -105,8 +139,10 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
scudo::reportAlignmentNotPowerOfTwo(alignment);
}
}
- return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
- alignment);
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
@@ -120,6 +156,8 @@ INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
if (UNLIKELY(!Ptr))
return ENOMEM;
+ reportAllocation(Ptr, size);
+
*memptr = Ptr;
return 0;
}
@@ -134,26 +172,57 @@ INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
- return scudo::setErrnoOnNull(
+ void *Ptr =
SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
- scudo::Chunk::Origin::Memalign, PageSize));
+ scudo::Chunk::Origin::Memalign, PageSize);
+ reportAllocation(Ptr, scudo::roundUp(size, PageSize));
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
- if (!ptr)
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ if (!ptr) {
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
+ }
if (size == 0) {
+ reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
return nullptr;
}
- return scudo::setErrnoOnNull(
- SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+
+ // Given that the reporting of deallocation and allocation are not atomic, we
+ // always pretend the old pointer will be released so that the user doesn't
+ // need to worry about the false double-use case from the view of hooks.
+ //
+ // For example, assume that `realloc` releases the old pointer and allocates a
+ // new pointer. Before the reporting of both operations has been done, another
+ // thread may get the old pointer from `malloc`. It may be misinterpreted as
+ // double-use if it's not handled properly on the hook side.
+ reportReallocDeallocation(ptr);
+ void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
+ if (NewPtr != nullptr) {
+ // Note that even if NewPtr == ptr, the size has changed. We still need to
+ // report the new size.
+ reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
+ } else {
+ // If `realloc` fails, the old pointer is not released. Report the old
+ // pointer as allocated again.
+ reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
+ SCUDO_ALLOCATOR.getAllocSize(ptr));
+ }
+
+ return scudo::setErrnoOnNull(NewPtr);
}
INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ scudo::getPageSizeCached());
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
@@ -198,6 +267,7 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
return 1;
} else if (param == M_LOG_STATS) {
SCUDO_ALLOCATOR.printStats();
+ SCUDO_ALLOCATOR.printFragmentationInfo();
return 1;
} else {
scudo::Option option;
@@ -233,8 +303,12 @@ INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
}
scudo::reportInvalidAlignedAllocAlignment(alignment, size);
}
- return scudo::setErrnoOnNull(
- SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 1b9fe67d920c..21694c3f17fe 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -12,6 +12,9 @@
#if SCUDO_ANDROID && _BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
@@ -24,7 +27,7 @@
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
+static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
#include "wrappers_c.inc"
@@ -35,15 +38,15 @@ static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
// TODO(kostyak): support both allocators.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-INTERFACE void
-__scudo_get_error_info(struct scudo_error_info *error_info,
- uintptr_t fault_addr, const char *stack_depot,
- const char *region_info, const char *ring_buffer,
- const char *memory, const char *memory_tags,
- uintptr_t memory_addr, size_t memory_size) {
+INTERFACE void __scudo_get_error_info(
+ struct scudo_error_info *error_info, uintptr_t fault_addr,
+ const char *stack_depot, size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size, const char *memory,
+ const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
+ (void)(stack_depot_size);
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
- ring_buffer, memory, memory_tags, memory_addr,
- memory_size);
+ ring_buffer, ring_buffer_size, memory, memory_tags,
+ memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
index 374e36d72b3d..098d4f71acc4 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -12,6 +12,9 @@
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
#include "wrappers_c.h"
#include <stdint.h>
@@ -21,86 +24,125 @@ struct nothrow_t {};
enum class align_val_t : size_t {};
} // namespace std
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+
INTERFACE WEAK void *operator new(size_t size) {
- return Allocator.allocate(size, scudo::Chunk::Origin::New);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size) {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::New);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
- return Allocator.allocate(size, scudo::Chunk::Origin::New,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::New,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void operator delete(void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
}
INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
}
INTERFACE WEAK void operator delete(void *ptr,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
}
INTERFACE WEAK void operator delete[](void *ptr,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
}
INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
}
INTERFACE WEAK void operator delete(void *ptr,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, size_t size,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
static_cast<scudo::uptr>(align));
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan.syms.extra b/compiler-rt/lib/tsan/rtl/tsan.syms.extra
index a5bd17176b12..6416e5d47fc4 100644
--- a/compiler-rt/lib/tsan/rtl/tsan.syms.extra
+++ b/compiler-rt/lib/tsan/rtl/tsan.syms.extra
@@ -22,6 +22,7 @@ __tsan_mutex_pre_signal
__tsan_mutex_post_signal
__tsan_mutex_pre_divert
__tsan_mutex_post_divert
+__tsan_check_no_mutexes_held
__tsan_get_current_fiber
__tsan_create_fiber
__tsan_destroy_fiber
diff --git a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
index 1e61c31c5a97..41fa293dbaaa 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
@@ -35,7 +35,9 @@ static const char *ReportTypeDescription(ReportType typ) {
case ReportTypeSignalUnsafe: return "signal-unsafe-call";
case ReportTypeErrnoInSignal: return "errno-in-signal-handler";
case ReportTypeDeadlock: return "lock-order-inversion";
- // No default case so compiler warns us if we miss one
+ case ReportTypeMutexHeldWrongContext:
+ return "mutex-held-in-wrong-context";
+ // No default case so compiler warns us if we miss one
}
UNREACHABLE("missing case");
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 177e338bf282..80f86ca98ed9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -81,6 +81,8 @@ struct ucontext_t {
#define PTHREAD_ABI_BASE "GLIBC_2.17"
#elif SANITIZER_LOONGARCH64
#define PTHREAD_ABI_BASE "GLIBC_2.36"
+#elif SANITIZER_RISCV64
+# define PTHREAD_ABI_BASE "GLIBC_2.27"
#endif
extern "C" int pthread_attr_init(void *attr);
@@ -2565,7 +2567,7 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
// Copy act into sigactions[sig].
// Can't use struct copy, because compiler can emit call to memcpy.
// Can't use internal_memcpy, because it copies byte-by-byte,
- // and signal handler reads the handler concurrently. It it can read
+ // and signal handler reads the handler concurrently. It can read
// some bytes from old value and some bytes from new value.
// Use volatile to prevent insertion of memcpy.
sigactions[sig].handler =
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.h b/compiler-rt/lib/tsan/rtl/tsan_interface.h
index d53c1e3935df..3731c90d4591 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -419,6 +419,14 @@ void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index 6bd72e18d942..5154662034c5 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -435,4 +435,26 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
ThreadIgnoreBegin(thr, 0);
ThreadIgnoreSyncBegin(thr, 0);
}
+
+static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexHeldWrongContext);
+ for (uptr i = 0; i < thr->mset.Size(); ++i) {
+ MutexSet::Desc desc = thr->mset.Get(i);
+ rep.AddMutex(desc.addr, desc.stack_id);
+ }
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ OutputReport(thr, rep);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_check_no_mutexes_held() {
+ SCOPED_ANNOTATION(__tsan_check_no_mutexes_held);
+ if (thr->mset.Size() == 0) {
+ return;
+ }
+ ReportMutexHeldWrongContext(thr, pc);
+}
} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index f794a2fcdd0d..2b5a2c6ef79b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -895,6 +895,30 @@ void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
}
SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h
index f0cdaf48eaa3..70b9ae09a990 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -46,17 +46,16 @@ enum {
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
-0040 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
-3400 0000 0000 - 5500 0000 0000: -
-5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
-5680 0000 0000 - 7d00 0000 0000: -
-7b00 0000 0000 - 7c00 0000 0000: heap
-7c00 0000 0000 - 7e80 0000 0000: -
-7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+0000 0000 1000 - 0200 0000 0000: main binary and/or MAP_32BIT mappings (2TB)
+0200 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 3000 0000 0000: shadow (32TB)
+3000 0000 0000 - 3800 0000 0000: metainfo (memory blocks and sync objects; 8TB)
+3800 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5a00 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5a00 0000 0000 - 7200 0000 0000: -
+7200 0000 0000 - 7300 0000 0000: heap (1TB)
+7300 0000 0000 - 7a00 0000 0000: -
+7a00 0000 0000 - 8000 0000 0000: modules and main thread stack (6TB)
C/C++ on netbsd/amd64 can reuse the same mapping:
* The address space starts from 0x1000 (option with 0x0) and ends with
@@ -72,20 +71,20 @@ C/C++ on netbsd/amd64 can reuse the same mapping:
*/
struct Mapping48AddressSpace {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
- static const uptr kHeapMemBeg = 0x7b0000000000ull;
- static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kMetaShadowEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kHeapMemBeg = 0x720000000000ull;
+ static const uptr kHeapMemEnd = 0x730000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
- static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kLoAppMemEnd = 0x020000000000ull;
static const uptr kMidAppMemBeg = 0x550000000000ull;
- static const uptr kMidAppMemEnd = 0x568000000000ull;
- static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kMidAppMemEnd = 0x5a0000000000ull;
+ static const uptr kHiAppMemBeg = 0x7a0000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
- static const uptr kShadowMsk = 0x780000000000ull;
- static const uptr kShadowXor = 0x040000000000ull;
- static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kShadowMsk = 0x700000000000ull;
+ static const uptr kShadowXor = 0x000000000000ull;
+ static const uptr kShadowAdd = 0x100000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
@@ -378,6 +377,71 @@ struct MappingPPC64_47 {
};
/*
+C/C++ on linux/riscv64 (39-bit VMA)
+0000 0010 00 - 0200 0000 00: main binary ( 8 GB)
+0200 0000 00 - 1000 0000 00: -
+1000 0000 00 - 4000 0000 00: shadow memory (64 GB)
+4000 0000 00 - 4800 0000 00: metainfo (16 GB)
+4800 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5a00 0000 00: main binary (PIE) (~8 GB)
+5600 0000 00 - 7c00 0000 00: -
+7d00 0000 00 - 7fff ffff ff: libraries and main thread stack ( 8 GB)
+
+mmap by default allocates from top downwards
+VDSO sits below loader and above dynamic libraries, within HiApp region.
+Heap starts after program region whose position depends on pie or non-pie.
+Disable tracking them since their locations are not fixed.
+*/
+struct MappingRiscv64_39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kShadowBeg = 0x1000000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x2000000000ull;
+ static const uptr kMetaShadowEnd = 0x2400000000ull;
+ static const uptr kMidAppMemBeg = 0x2aaaaaa000ull;
+ static const uptr kMidAppMemEnd = 0x2c00000000ull;
+ static const uptr kHeapMemBeg = 0x2c00000000ull;
+ static const uptr kHeapMemEnd = 0x2c00000000ull;
+ static const uptr kHiAppMemBeg = 0x3c00000000ull;
+ static const uptr kHiAppMemEnd = 0x3fffffffffull;
+ static const uptr kShadowMsk = 0x3800000000ull;
+ static const uptr kShadowXor = 0x0800000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
+ static const uptr kVdsoBeg = 0x4000000000ull;
+};
+
+/*
+C/C++ on linux/riscv64 (48-bit VMA)
+0000 0000 1000 - 0400 0000 0000: main binary ( 4 TB)
+0500 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 4000 0000 0000: shadow memory (32 TB)
+4000 0000 0000 - 4800 0000 0000: metainfo ( 8 TB)
+4800 0000 0000 - 5555 5555 5000: -
+5555 5555 5000 - 5a00 0000 0000: main binary (PIE) (~5 TB)
+5a00 0000 0000 - 7a00 0000 0000: -
+7a00 0000 0000 - 7fff ffff ffff: libraries and main thread stack ( 6 TB)
+*/
+struct MappingRiscv64_48 {
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x040000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x400000000000ull;
+ static const uptr kMetaShadowBeg = 0x400000000000ull;
+ static const uptr kMetaShadowEnd = 0x480000000000ull;
+ static const uptr kMidAppMemBeg = 0x555555555000ull;
+ static const uptr kMidAppMemEnd = 0x5a0000000000ull;
+ static const uptr kHeapMemBeg = 0x5a0000000000ull;
+ static const uptr kHeapMemEnd = 0x5a0000000000ull;
+ static const uptr kHiAppMemBeg = 0x7a0000000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffffull;
+ static const uptr kShadowMsk = 0x700000000000ull;
+ static const uptr kShadowXor = 0x100000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x800000000000ull;
+};
+
+/*
C/C++ on linux/s390x
While the kernel provides a 64-bit address space, we have to restrict ourselves
to 48 bits due to how e.g. SyncVar::GetId() works.
@@ -665,6 +729,13 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) {
}
# elif defined(__mips64)
return Func::template Apply<MappingMips64_40>(arg);
+# elif SANITIZER_RISCV64
+ switch (vmaSize) {
+ case 39:
+ return Func::template Apply<MappingRiscv64_39>(arg);
+ case 48:
+ return Func::template Apply<MappingRiscv64_48>(arg);
+ }
# elif defined(__s390x__)
return Func::template Apply<MappingS390x>(arg);
# else
@@ -686,6 +757,8 @@ void ForEachMapping() {
Func::template Apply<MappingPPC64_44>();
Func::template Apply<MappingPPC64_46>();
Func::template Apply<MappingPPC64_47>();
+ Func::template Apply<MappingRiscv64_39>();
+ Func::template Apply<MappingRiscv64_48>();
Func::template Apply<MappingS390x>();
Func::template Apply<MappingGo48>();
Func::template Apply<MappingGoWindows>();
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index 384a443c16b0..369509ed0a60 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -152,7 +152,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special Shadow::kRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
-static void MapRodata() {
+static NOINLINE void MapRodata(char* buffer, uptr size) {
// First create temp file.
const char *tmpdir = GetEnv("TMPDIR");
if (tmpdir == 0)
@@ -163,13 +163,12 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char name[256];
- internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
+ internal_snprintf(buffer, size, "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(buffer, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
- internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
+ internal_unlink(buffer); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with Shadow::kRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
@@ -188,8 +187,8 @@ static void MapRodata() {
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- // Reusing the buffer 'name'.
- MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ // Reusing the buffer 'buffer'.
+ MemoryMappedSegment segment(buffer, size);
while (proc_maps.Next(&segment)) {
if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
segment.IsReadable() && segment.IsExecutable() &&
@@ -209,7 +208,8 @@ static void MapRodata() {
}
void InitializeShadowMemoryPlatform() {
- MapRodata();
+ char buffer[256]; // Keep in a different frame.
+ MapRodata(buffer, sizeof(buffer));
}
#endif // #if !SANITIZER_GO
@@ -267,7 +267,17 @@ void InitializePlatformEarly() {
Die();
}
# endif
-#endif
+# elif SANITIZER_RISCV64
+ // the bottom half of vma is allocated for userspace
+ vmaSize = vmaSize + 1;
+# if !SANITIZER_GO
+ if (vmaSize != 39 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize);
+ Die();
+ }
+# endif
+# endif
}
void InitializePlatform() {
@@ -399,13 +409,15 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
return mangled_sp ^ xor_key;
#elif defined(__mips__)
return mangled_sp;
-#elif defined(__s390x__)
+# elif SANITIZER_RISCV64
+ return mangled_sp;
+# elif defined(__s390x__)
// tcbhead_t.stack_guard
uptr xor_key = ((uptr *)__builtin_thread_pointer())[5];
return mangled_sp ^ xor_key;
-#else
- #error "Unknown platform"
-#endif
+# else
+# error "Unknown platform"
+# endif
}
#if SANITIZER_NETBSD
@@ -429,11 +441,13 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
# define LONG_JMP_SP_ENV_SLOT 1
# elif defined(__mips64)
# define LONG_JMP_SP_ENV_SLOT 1
-# elif defined(__s390x__)
-# define LONG_JMP_SP_ENV_SLOT 9
-# else
-# define LONG_JMP_SP_ENV_SLOT 6
-# endif
+# elif SANITIZER_RISCV64
+# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__s390x__)
+# define LONG_JMP_SP_ENV_SLOT 9
+# else
+# define LONG_JMP_SP_ENV_SLOT 6
+# endif
#endif
uptr ExtractLongJmpSp(uptr *env) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index 3ae666e1212f..35cb6710a54f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -93,7 +93,9 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
return "signal handler spoils errno";
case ReportTypeDeadlock:
return "lock-order-inversion (potential deadlock)";
- // No default case so compiler warns us if we miss one
+ case ReportTypeMutexHeldWrongContext:
+ return "mutex held in the wrong context";
+ // No default case so compiler warns us if we miss one
}
UNREACHABLE("missing case");
}
@@ -106,10 +108,10 @@ void PrintStack(const ReportStack *ent) {
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
InternalScopedString res;
- RenderFrame(&res, common_flags()->stack_trace_format, i,
- frame->info.address, &frame->info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &res, common_flags()->stack_trace_format, i, frame->info.address,
+ &frame->info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
Printf("%s\n", res.data());
}
Printf("\n");
diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.h b/compiler-rt/lib/tsan/rtl/tsan_report.h
index 3c88864af147..bfe470797f8f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_report.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_report.h
@@ -34,7 +34,8 @@ enum ReportType {
ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
ReportTypeErrnoInSignal,
- ReportTypeDeadlock
+ ReportTypeDeadlock,
+ ReportTypeMutexHeldWrongContext
};
struct ReportStack {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 6b1ec1d04fdb..fd9441dfcb53 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -446,7 +446,7 @@ static bool InitializeMemoryProfiler() {
ctx->memprof_fd = 2;
} else {
InternalScopedString filename;
- filename.append("%s.%d", fname, (int)internal_getpid());
+ filename.AppendF("%s.%d", fname, (int)internal_getpid());
ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
if (ctx->memprof_fd == kInvalidFd) {
Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index a5606dbc7f88..de4ea0bb5f48 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -56,8 +56,8 @@ namespace __tsan {
#if !SANITIZER_GO
struct MapUnmapCallback;
-#if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
- defined(__powerpc__)
+# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
+ defined(__powerpc__) || SANITIZER_RISCV64
struct AP32 {
static const uptr kSpaceBeg = 0;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S
new file mode 100644
index 000000000000..8e6b9b9432ef
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S
@@ -0,0 +1,203 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+.section .text
+
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ CFI_OFFSET (10, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ CFI_RESTORE (10)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception11real_setjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ CFI_OFFSET (10, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ CFI_RESTORE (10)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception12real__setjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ sd a1, 0(sp)
+ CFI_OFFSET (10, -24)
+ CFI_OFFSET (11, -32)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ ld a1, 0(sp)
+ CFI_RESTORE (10)
+ CFI_RESTORE (11)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception14real_sigsetjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ sd a1, 0(sp)
+ CFI_OFFSET (10, -24)
+ CFI_OFFSET (11, -32)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ ld a1, 0(sp)
+ CFI_RESTORE (10)
+ CFI_RESTORE (11)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception16real___sigsetjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
diff --git a/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp b/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
index 9cdfa32a9343..70642124990d 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
@@ -81,6 +81,7 @@ static const char *conv(ReportType typ) {
case ReportTypeMutexBadUnlock:
case ReportTypeMutexBadReadLock:
case ReportTypeMutexBadReadUnlock:
+ case ReportTypeMutexHeldWrongContext:
return kSuppressionMutex;
case ReportTypeSignalUnsafe:
case ReportTypeErrnoInSignal:
diff --git a/compiler-rt/lib/ubsan/ubsan_diag.cpp b/compiler-rt/lib/ubsan/ubsan_diag.cpp
index dd99613abbe3..aac270415318 100644
--- a/compiler-rt/lib/ubsan/ubsan_diag.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_diag.cpp
@@ -134,9 +134,9 @@ Diag &Diag::operator<<(const Value &V) {
/// Hexadecimal printing for numbers too large for Printf to handle directly.
static void RenderHex(InternalScopedString *Buffer, UIntMax Val) {
#if HAVE_INT128_T
- Buffer->append("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
- (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
- (unsigned int)(Val));
+ Buffer->AppendF("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
+ (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
+ (unsigned int)(Val));
#else
UNREACHABLE("long long smaller than 64 bits?");
#endif
@@ -147,31 +147,34 @@ static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
case Location::LK_Source: {
SourceLocation SLoc = Loc.getSourceLocation();
if (SLoc.isInvalid())
- Buffer->append("<unknown>");
+ Buffer->AppendF("<unknown>");
else
- RenderSourceLocation(Buffer, SLoc.getFilename(), SLoc.getLine(),
- SLoc.getColumn(), common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ Buffer, SLoc.getFilename(), SLoc.getLine(), SLoc.getColumn(),
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
return;
}
case Location::LK_Memory:
- Buffer->append("%p", reinterpret_cast<void *>(Loc.getMemoryLocation()));
+ Buffer->AppendF("%p", reinterpret_cast<void *>(Loc.getMemoryLocation()));
return;
case Location::LK_Symbolized: {
const AddressInfo &Info = Loc.getSymbolizedStack()->info;
if (Info.file)
- RenderSourceLocation(Buffer, Info.file, Info.line, Info.column,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ Buffer, Info.file, Info.line, Info.column,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
else if (Info.module)
- RenderModuleLocation(Buffer, Info.module, Info.module_offset,
- Info.module_arch, common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderModuleLocation(
+ Buffer, Info.module, Info.module_offset, Info.module_arch,
+ common_flags()->strip_path_prefix);
else
- Buffer->append("%p", reinterpret_cast<void *>(Info.address));
+ Buffer->AppendF("%p", reinterpret_cast<void *>(Info.address));
return;
}
case Location::LK_Null:
- Buffer->append("<unknown>");
+ Buffer->AppendF("<unknown>");
return;
}
}
@@ -180,32 +183,32 @@ static void RenderText(InternalScopedString *Buffer, const char *Message,
const Diag::Arg *Args) {
for (const char *Msg = Message; *Msg; ++Msg) {
if (*Msg != '%') {
- Buffer->append("%c", *Msg);
+ Buffer->AppendF("%c", *Msg);
continue;
}
const Diag::Arg &A = Args[*++Msg - '0'];
switch (A.Kind) {
case Diag::AK_String:
- Buffer->append("%s", A.String);
+ Buffer->AppendF("%s", A.String);
break;
case Diag::AK_TypeName: {
if (SANITIZER_WINDOWS)
// The Windows implementation demangles names early.
- Buffer->append("'%s'", A.String);
+ Buffer->AppendF("'%s'", A.String);
else
- Buffer->append("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
+ Buffer->AppendF("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
break;
}
case Diag::AK_SInt:
// 'long long' is guaranteed to be at least 64 bits wide.
if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX)
- Buffer->append("%lld", (long long)A.SInt);
+ Buffer->AppendF("%lld", (long long)A.SInt);
else
RenderHex(Buffer, A.SInt);
break;
case Diag::AK_UInt:
if (A.UInt <= UINT64_MAX)
- Buffer->append("%llu", (unsigned long long)A.UInt);
+ Buffer->AppendF("%llu", (unsigned long long)A.UInt);
else
RenderHex(Buffer, A.UInt);
break;
@@ -223,11 +226,11 @@ static void RenderText(InternalScopedString *Buffer, const char *Message,
#else
snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
#endif
- Buffer->append("%s", FloatBuffer);
+ Buffer->Append(FloatBuffer);
break;
}
case Diag::AK_Pointer:
- Buffer->append("%p", A.Pointer);
+ Buffer->AppendF("%p", A.Pointer);
break;
}
}
@@ -284,12 +287,12 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
InternalScopedString Buffer;
for (uptr P = Min; P != Max; ++P) {
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
- Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
+ Buffer.AppendF("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
- Buffer.append("\n");
+ Buffer.AppendF("\n");
// Emit highlights.
- Buffer.append("%s", Decor.Highlight());
+ Buffer.Append(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
@@ -302,12 +305,12 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
if (InRange && InRange->getStart().getMemoryLocation() <= P)
Byte = '~';
if (P % 8 == 0)
- Buffer.append("%c", Pad);
- Buffer.append("%c", Pad);
- Buffer.append("%c", P == Loc ? '^' : Byte);
- Buffer.append("%c", Byte);
+ Buffer.AppendF("%c", Pad);
+ Buffer.AppendF("%c", Pad);
+ Buffer.AppendF("%c", P == Loc ? '^' : Byte);
+ Buffer.AppendF("%c", Byte);
}
- Buffer.append("%s\n", Decor.Default());
+ Buffer.AppendF("%s\n", Decor.Default());
// Go over the line again, and print names for the ranges.
InRange = 0;
@@ -322,9 +325,9 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
if (InRange && InRange->getStart().getMemoryLocation() == P) {
while (Spaces--)
- Buffer.append(" ");
+ Buffer.AppendF(" ");
RenderText(&Buffer, InRange->getText(), Args);
- Buffer.append("\n");
+ Buffer.AppendF("\n");
// FIXME: We only support naming one range for now!
break;
}
@@ -358,24 +361,24 @@ Diag::~Diag() {
Buffer.clear();
}
- Buffer.append("%s", Decor.Bold());
+ Buffer.Append(Decor.Bold());
RenderLocation(&Buffer, Loc);
- Buffer.append(":");
+ Buffer.AppendF(":");
switch (Level) {
case DL_Error:
- Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
- Decor.Bold());
+ Buffer.AppendF("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
+ Decor.Bold());
break;
case DL_Note:
- Buffer.append("%s note: %s", Decor.Note(), Decor.Default());
+ Buffer.AppendF("%s note: %s", Decor.Note(), Decor.Default());
break;
}
RenderText(&Buffer, Message, Args);
- Buffer.append("%s\n", Decor.Default());
+ Buffer.AppendF("%s\n", Decor.Default());
Printf("%s", Buffer.data());
if (Loc.isMemoryLocation())
diff --git a/compiler-rt/lib/ubsan/ubsan_monitor.cpp b/compiler-rt/lib/ubsan/ubsan_monitor.cpp
index 69dd986f9bdf..caed9726d48b 100644
--- a/compiler-rt/lib/ubsan/ubsan_monitor.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_monitor.cpp
@@ -23,7 +23,8 @@ UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind,
RegisterUndefinedBehaviorReport(this);
// Make a copy of the diagnostic.
- Buffer.append("%s", Msg.data());
+ if (Msg.length())
+ Buffer.Append(Msg.data());
// Let the monitor know that a report is available.
__ubsan_on_report();
diff --git a/compiler-rt/lib/xray/xray_utils.cpp b/compiler-rt/lib/xray/xray_utils.cpp
index befbabfe4532..5d51df9937c2 100644
--- a/compiler-rt/lib/xray/xray_utils.cpp
+++ b/compiler-rt/lib/xray/xray_utils.cpp
@@ -28,7 +28,7 @@
#include <utility>
#if SANITIZER_FUCHSIA
-#include "sanitizer_common/sanitizer_symbolizer_fuchsia.h"
+#include "sanitizer_common/sanitizer_symbolizer_markup_constants.h"
#include <inttypes.h>
#include <zircon/process.h>