aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-09-02 21:17:18 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-12-08 17:34:50 +0000
commit06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e (patch)
tree62f873df87c7c675557a179e0c4c83fe9f3087bc /contrib/llvm-project/compiler-rt
parentcf037972ea8863e2bab7461d77345367d2c1e054 (diff)
parent7fa27ce4a07f19b07799a767fc29416f3b625afb (diff)
downloadsrc-06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e.tar.gz
src-06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e.zip
Diffstat (limited to 'contrib/llvm-project/compiler-rt')
-rw-r--r--contrib/llvm-project/compiler-rt/include/fuzzer/FuzzedDataProvider.h4
-rw-r--r--contrib/llvm-project/compiler-rt/include/orc_rt/c_api.h76
-rw-r--r--contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc5
-rw-r--r--contrib/llvm-project/compiler-rt/include/profile/MemProfData.inc26
-rw-r--r--contrib/llvm-project/compiler-rt/include/sanitizer/allocator_interface.h8
-rw-r--r--contrib/llvm-project/compiler-rt/include/sanitizer/common_interface_defs.h21
-rw-r--r--contrib/llvm-project/compiler-rt/include/sanitizer/hwasan_interface.h2
-rw-r--r--contrib/llvm-project/compiler-rt/include/sanitizer/tsan_interface.h123
-rw-r--r--contrib/llvm-project/compiler-rt/include/xray/xray_interface.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp171
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp163
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp153
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp85
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp485
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/README.txt12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/assembly.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c (renamed from contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c)48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/crtend.c (renamed from contrib/llvm-project/compiler-rt/lib/crt/crtend.c)0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/multf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp112
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp176
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp143
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h101
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp399
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h1001
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception.h200
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp148
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp117
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan.h51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp77
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp69
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_report.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/common.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/debug.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp217
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp86
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c210
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h123
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc395
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc244
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp132
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h116
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h355
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h139
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h54
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h91
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h129
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp252
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h672
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h1367
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h440
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h312
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h221
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp152
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp44
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp160
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S286
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S124
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp33
343 files changed, 11289 insertions, 3772 deletions
diff --git a/contrib/llvm-project/compiler-rt/include/fuzzer/FuzzedDataProvider.h b/contrib/llvm-project/compiler-rt/include/fuzzer/FuzzedDataProvider.h
index 71cb427ec4a9..8a8214bd99fe 100644
--- a/contrib/llvm-project/compiler-rt/include/fuzzer/FuzzedDataProvider.h
+++ b/contrib/llvm-project/compiler-rt/include/fuzzer/FuzzedDataProvider.h
@@ -209,7 +209,7 @@ T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) {
abort();
// Use the biggest type possible to hold the range and the result.
- uint64_t range = static_cast<uint64_t>(max) - min;
+ uint64_t range = static_cast<uint64_t>(max) - static_cast<uint64_t>(min);
uint64_t result = 0;
size_t offset = 0;
@@ -230,7 +230,7 @@ T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) {
if (range != std::numeric_limits<decltype(range)>::max())
result = result % (range + 1);
- return static_cast<T>(min + result);
+ return static_cast<T>(static_cast<uint64_t>(min) + result);
}
// Returns a floating point value in the range [Type's lowest, Type's max] by
diff --git a/contrib/llvm-project/compiler-rt/include/orc_rt/c_api.h b/contrib/llvm-project/compiler-rt/include/orc_rt/c_api.h
index 96d01df15e86..628c5cd10676 100644
--- a/contrib/llvm-project/compiler-rt/include/orc_rt/c_api.h
+++ b/contrib/llvm-project/compiler-rt/include/orc_rt/c_api.h
@@ -48,17 +48,17 @@ ORC_RT_C_EXTERN_C_BEGIN
typedef union {
char *ValuePtr;
char Value[sizeof(char *)];
-} __orc_rt_CWrapperFunctionResultDataUnion;
+} orc_rt_CWrapperFunctionResultDataUnion;
/**
- * __orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
+ * orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
* out-of-band error state.
*
* If Size == 0 and Data.ValuePtr is non-zero then the value is in the
* 'out-of-band error' state, and Data.ValuePtr points at a malloc-allocated,
* null-terminated string error message.
*
- * If Size <= sizeof(__orc_rt_CWrapperFunctionResultData) then the value is in
+ * If Size <= sizeof(orc_rt_CWrapperFunctionResultData) then the value is in
* the 'small' state and the content is held in the first Size bytes of
* Data.Value.
*
@@ -68,29 +68,29 @@ typedef union {
* malloc, and will be freed with free when this value is destroyed.
*/
typedef struct {
- __orc_rt_CWrapperFunctionResultDataUnion Data;
+ orc_rt_CWrapperFunctionResultDataUnion Data;
size_t Size;
-} __orc_rt_CWrapperFunctionResult;
+} orc_rt_CWrapperFunctionResult;
-typedef struct __orc_rt_CSharedOpaqueJITProcessControl
- *__orc_rt_SharedJITProcessControlRef;
+typedef struct orc_rt_CSharedOpaqueJITProcessControl
+ *orc_rt_SharedJITProcessControlRef;
/**
- * Zero-initialize an __orc_rt_CWrapperFunctionResult.
+ * Zero-initialize an orc_rt_CWrapperFunctionResult.
*/
static inline void
-__orc_rt_CWrapperFunctionResultInit(__orc_rt_CWrapperFunctionResult *R) {
+orc_rt_CWrapperFunctionResultInit(orc_rt_CWrapperFunctionResult *R) {
R->Size = 0;
R->Data.ValuePtr = 0;
}
/**
- * Create an __orc_rt_CWrapperFunctionResult with an uninitialized buffer of
+ * Create an orc_rt_CWrapperFunctionResult with an uninitialized buffer of
* size Size. The buffer is returned via the DataPtr argument.
*/
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CWrapperFunctionResultAllocate(size_t Size) {
- __orc_rt_CWrapperFunctionResult R;
+static inline orc_rt_CWrapperFunctionResult
+orc_rt_CWrapperFunctionResultAllocate(size_t Size) {
+ orc_rt_CWrapperFunctionResult R;
R.Size = Size;
// If Size is 0 ValuePtr must be 0 or it is considered an out-of-band error.
R.Data.ValuePtr = 0;
@@ -100,11 +100,11 @@ __orc_rt_CWrapperFunctionResultAllocate(size_t Size) {
}
/**
- * Create an __orc_rt_WrapperFunctionResult from the given data range.
+ * Create an orc_rt_WrapperFunctionResult from the given data range.
*/
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
- __orc_rt_CWrapperFunctionResult R;
+static inline orc_rt_CWrapperFunctionResult
+orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
+ orc_rt_CWrapperFunctionResult R;
R.Size = Size;
if (R.Size > sizeof(R.Data.Value)) {
char *Tmp = (char *)malloc(Size);
@@ -116,28 +116,28 @@ __orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
}
/**
- * Create an __orc_rt_CWrapperFunctionResult by copying the given string,
+ * Create an orc_rt_CWrapperFunctionResult by copying the given string,
* including the null-terminator.
*
* This function copies the input string. The client is responsible for freeing
* the ErrMsg arg.
*/
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
- return __orc_rt_CreateCWrapperFunctionResultFromRange(Source,
- strlen(Source) + 1);
+static inline orc_rt_CWrapperFunctionResult
+orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
+ return orc_rt_CreateCWrapperFunctionResultFromRange(Source,
+ strlen(Source) + 1);
}
/**
- * Create an __orc_rt_CWrapperFunctionResult representing an out-of-band
+ * Create an orc_rt_CWrapperFunctionResult representing an out-of-band
* error.
*
* This function copies the input string. The client is responsible for freeing
* the ErrMsg arg.
*/
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
- __orc_rt_CWrapperFunctionResult R;
+static inline orc_rt_CWrapperFunctionResult
+orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
+ orc_rt_CWrapperFunctionResult R;
R.Size = 0;
char *Tmp = (char *)malloc(strlen(ErrMsg) + 1);
strcpy(Tmp, ErrMsg);
@@ -146,11 +146,11 @@ __orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
}
/**
- * This should be called to destroy __orc_rt_CWrapperFunctionResult values
+ * This should be called to destroy orc_rt_CWrapperFunctionResult values
* regardless of their state.
*/
static inline void
-__orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
+orc_rt_DisposeCWrapperFunctionResult(orc_rt_CWrapperFunctionResult *R) {
if (R->Size > sizeof(R->Data.Value) ||
(R->Size == 0 && R->Data.ValuePtr))
free(R->Data.ValuePtr);
@@ -158,22 +158,22 @@ __orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
/**
* Get a pointer to the data contained in the given
- * __orc_rt_CWrapperFunctionResult.
+ * orc_rt_CWrapperFunctionResult.
*/
static inline char *
-__orc_rt_CWrapperFunctionResultData(__orc_rt_CWrapperFunctionResult *R) {
+orc_rt_CWrapperFunctionResultData(orc_rt_CWrapperFunctionResult *R) {
assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
"Cannot get data for out-of-band error value");
return R->Size > sizeof(R->Data.Value) ? R->Data.ValuePtr : R->Data.Value;
}
/**
- * Safely get the size of the given __orc_rt_CWrapperFunctionResult.
+ * Safely get the size of the given orc_rt_CWrapperFunctionResult.
*
* Asserts that we're not trying to access the size of an error value.
*/
static inline size_t
-__orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
+orc_rt_CWrapperFunctionResultSize(const orc_rt_CWrapperFunctionResult *R) {
assert((R->Size != 0 || R->Data.ValuePtr == NULL) &&
"Cannot get size for out-of-band error value");
return R->Size;
@@ -181,22 +181,22 @@ __orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
/**
* Returns 1 if this value is equivalent to a value just initialized by
- * __orc_rt_CWrapperFunctionResultInit, 0 otherwise.
+ * orc_rt_CWrapperFunctionResultInit, 0 otherwise.
*/
static inline size_t
-__orc_rt_CWrapperFunctionResultEmpty(const __orc_rt_CWrapperFunctionResult *R) {
+orc_rt_CWrapperFunctionResultEmpty(const orc_rt_CWrapperFunctionResult *R) {
return R->Size == 0 && R->Data.ValuePtr == 0;
}
/**
* Returns a pointer to the out-of-band error string for this
- * __orc_rt_CWrapperFunctionResult, or null if there is no error.
+ * orc_rt_CWrapperFunctionResult, or null if there is no error.
*
- * The __orc_rt_CWrapperFunctionResult retains ownership of the error
+ * The orc_rt_CWrapperFunctionResult retains ownership of the error
* string, so it should be copied if the caller wishes to preserve it.
*/
-static inline const char *__orc_rt_CWrapperFunctionResultGetOutOfBandError(
- const __orc_rt_CWrapperFunctionResult *R) {
+static inline const char *orc_rt_CWrapperFunctionResultGetOutOfBandError(
+ const orc_rt_CWrapperFunctionResult *R) {
return R->Size == 0 ? R->Data.ValuePtr : 0;
}
diff --git a/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc b/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
index 05419bf01f52..94261f4705b9 100644
--- a/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
+++ b/contrib/llvm-project/compiler-rt/include/profile/InstrProfData.inc
@@ -650,7 +650,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
/* Raw profile format version (start from 1). */
#define INSTR_PROF_RAW_VERSION 8
/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 9
+#define INSTR_PROF_INDEX_VERSION 10
/* Coverage mapping format version (start from 0). */
#define INSTR_PROF_COVMAP_VERSION 5
@@ -663,6 +663,7 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
* The 60th bit indicates single byte coverage instrumentation.
* The 61st bit indicates function entry instrumentation only.
* The 62nd bit indicates whether memory profile information is present.
+ * The 63rd bit indicates if this is a temporal profile.
*/
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
@@ -673,9 +674,11 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
#define VARIANT_MASK_MEMPROF (0x1ULL << 62)
+#define VARIANT_MASK_TEMPORAL_PROF (0x1ULL << 63)
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
+#define INSTR_PROF_PROFILE_SET_TIMESTAMP __llvm_profile_set_timestamp
/* The variable that holds the name of the profile data
* specified via command line. */
diff --git a/contrib/llvm-project/compiler-rt/include/profile/MemProfData.inc b/contrib/llvm-project/compiler-rt/include/profile/MemProfData.inc
index c533073da751..b82a4baf6dd7 100644
--- a/contrib/llvm-project/compiler-rt/include/profile/MemProfData.inc
+++ b/contrib/llvm-project/compiler-rt/include/profile/MemProfData.inc
@@ -19,6 +19,7 @@
* synced up.
*
\*===----------------------------------------------------------------------===*/
+#include <string.h>
#ifdef _MSC_VER
#define PACKED(...) __pragma(pack(push,1)) __VA_ARGS__ __pragma(pack(pop))
@@ -32,7 +33,9 @@
(uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)
// The version number of the raw binary format.
-#define MEMPROF_RAW_VERSION 2ULL
+#define MEMPROF_RAW_VERSION 3ULL
+
+#define MEMPROF_BUILDID_MAX_SIZE 32ULL
namespace llvm {
namespace memprof {
@@ -46,37 +49,40 @@ PACKED(struct Header {
uint64_t StackOffset;
});
-
// A struct describing the information necessary to describe a /proc/maps
// segment entry for a particular binary/library identified by its build id.
PACKED(struct SegmentEntry {
uint64_t Start;
uint64_t End;
uint64_t Offset;
- // This field is unused until sanitizer procmaps support for build ids for
- // Linux-Elf is implemented.
- uint8_t BuildId[32] = {0};
+ uint64_t BuildIdSize;
+ uint8_t BuildId[MEMPROF_BUILDID_MAX_SIZE] = {0};
- SegmentEntry(uint64_t S, uint64_t E, uint64_t O) :
- Start(S), End(E), Offset(O) {}
+ // This constructor is only used in tests so don't set the BuildId.
+ SegmentEntry(uint64_t S, uint64_t E, uint64_t O)
+ : Start(S), End(E), Offset(O), BuildIdSize(0) {}
SegmentEntry(const SegmentEntry& S) {
Start = S.Start;
End = S.End;
Offset = S.Offset;
+ BuildIdSize = S.BuildIdSize;
+ memcpy(BuildId, S.BuildId, S.BuildIdSize);
}
SegmentEntry& operator=(const SegmentEntry& S) {
Start = S.Start;
End = S.End;
Offset = S.Offset;
+ BuildIdSize = S.BuildIdSize;
+ memcpy(BuildId, S.BuildId, S.BuildIdSize);
return *this;
}
bool operator==(const SegmentEntry& S) const {
- return Start == S.Start &&
- End == S.End &&
- Offset == S.Offset;
+ return Start == S.Start && End == S.End && Offset == S.Offset &&
+ BuildIdSize == S.BuildIdSize &&
+ memcmp(BuildId, S.BuildId, S.BuildIdSize) == 0;
}
});
diff --git a/contrib/llvm-project/compiler-rt/include/sanitizer/allocator_interface.h b/contrib/llvm-project/compiler-rt/include/sanitizer/allocator_interface.h
index 6226135ef84b..367e6409258f 100644
--- a/contrib/llvm-project/compiler-rt/include/sanitizer/allocator_interface.h
+++ b/contrib/llvm-project/compiler-rt/include/sanitizer/allocator_interface.h
@@ -26,10 +26,18 @@ extern "C" {
is not yet freed. */
int __sanitizer_get_ownership(const volatile void *p);
+ /* If a pointer lies within an allocation, it will return the start address
+ of the allocation. Otherwise, it returns nullptr. */
+ const void *__sanitizer_get_allocated_begin(const void *p);
+
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
size_t __sanitizer_get_allocated_size(const volatile void *p);
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires __sanitizer_get_allocated_begin(p) == p. */
+ size_t __sanitizer_get_allocated_size_fast(const volatile void *p);
+
/* Number of bytes, allocated and not yet freed by the application. */
size_t __sanitizer_get_current_allocated_bytes(void);
diff --git a/contrib/llvm-project/compiler-rt/include/sanitizer/common_interface_defs.h b/contrib/llvm-project/compiler-rt/include/sanitizer/common_interface_defs.h
index 2f415bd9e854..983df7cea16e 100644
--- a/contrib/llvm-project/compiler-rt/include/sanitizer/common_interface_defs.h
+++ b/contrib/llvm-project/compiler-rt/include/sanitizer/common_interface_defs.h
@@ -129,26 +129,23 @@ int __sanitizer_acquire_crash_state();
/// state <c>mid == end</c>, so that should be the final state when the
/// container is destroyed or when the container reallocates the storage.
///
-/// For ASan, <c><i>beg</i></c> should be 8-aligned and <c><i>end</i></c>
-/// should be either 8-aligned or it should point to the end of a separate
-/// heap-, stack-, or global-allocated buffer. So the following example will
-/// not work:
+/// For ASan, <c><i>beg</i></c> no longer needs to be 8-aligned,
+/// first and last granule may be shared with other objects
+/// and therefore the function can be used for any allocator.
///
-/// \code
-/// int64_t x[2]; // 16 bytes, 8-aligned
-/// char *beg = (char *)&x[0];
-/// char *end = beg + 12; // Not 8-aligned, not the end of the buffer
-/// \endcode
+/// The following example shows how to use the function:
///
-/// The following, however, will work:
/// \code
-/// int32_t x[3]; // 12 bytes, but 8-aligned under ASan.
+/// int32_t x[3]; // 12 bytes
/// char *beg = (char*)&x[0];
-/// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer
+/// char *end = beg + 12;
+/// __sanitizer_annotate_contiguous_container(beg, end, beg, end);
/// \endcode
///
/// \note Use this function with caution and do not use for anything other
/// than vector-like classes.
+/// \note Unaligned <c><i>beg</i></c> or <c><i>end</i></c> may miss bugs in
+/// these granules.
///
/// \param beg Beginning of memory region.
/// \param end End of memory region.
diff --git a/contrib/llvm-project/compiler-rt/include/sanitizer/hwasan_interface.h b/contrib/llvm-project/compiler-rt/include/sanitizer/hwasan_interface.h
index 14035c05c635..ee742c7f3031 100644
--- a/contrib/llvm-project/compiler-rt/include/sanitizer/hwasan_interface.h
+++ b/contrib/llvm-project/compiler-rt/include/sanitizer/hwasan_interface.h
@@ -1,4 +1,4 @@
-//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
+//===-- sanitizer/hwasan_interface.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/compiler-rt/include/sanitizer/tsan_interface.h b/contrib/llvm-project/compiler-rt/include/sanitizer/tsan_interface.h
index 2782e61fb8c7..f19c79d79ba6 100644
--- a/contrib/llvm-project/compiler-rt/include/sanitizer/tsan_interface.h
+++ b/contrib/llvm-project/compiler-rt/include/sanitizer/tsan_interface.h
@@ -172,6 +172,129 @@ int __tsan_on_finalize(int failed);
// Release TSan internal memory in a best-effort manner.
void __tsan_flush_memory();
+// User-provided default TSAN options.
+const char* __tsan_default_options(void);
+
+// User-provided default TSAN suppressions.
+const char* __tsan_default_suppressions(void);
+
+/// Returns a report's description.
+///
+/// Returns a report's description (issue type), number of duplicate issues
+/// found, counts of array data (stack traces, memory operations, locations,
+/// mutexes, threads, unique thread IDs) and a stack trace of a <c>sleep()</c>
+/// call (if one was involved in the issue).
+///
+/// \param report Opaque pointer to the current report.
+/// \param[out] description Report type description.
+/// \param[out] count Count of duplicate issues.
+/// \param[out] stack_count Count of stack traces.
+/// \param[out] mop_count Count of memory operations.
+/// \param[out] loc_count Count of locations.
+/// \param[out] mutex_count Count of mutexes.
+/// \param[out] thread_count Count of threads.
+/// \param[out] unique_tid_count Count of unique thread IDs.
+/// \param sleep_trace A buffer to store the stack trace of a <c>sleep()</c>
+/// call.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ unsigned long trace_size);
+
+/// Returns information about stack traces included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's stacks.
+/// \param trace A buffer to store the stack trace.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_stack(void *report, unsigned long idx, void **trace,
+ unsigned long trace_size);
+
+/// Returns information about memory operations included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's memory operations.
+/// \param[out] tid Thread ID of the memory operation.
+/// \param[out] addr Address of the memory operation.
+/// \param[out] size Size of the memory operation.
+/// \param[out] write Write flag of the memory operation.
+/// \param[out] atomic Atomicity flag of the memory operation.
+/// \param trace A buffer to store the stack trace.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_mop(void *report, unsigned long idx, int *tid,
+ void **addr, int *size, int *write, int *atomic,
+ void **trace, unsigned long trace_size);
+
+/// Returns information about locations included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's locations.
+/// \param[out] type Type of the location.
+/// \param[out] addr Address of the location.
+/// \param[out] start Start of the location.
+/// \param[out] size Size of the location.
+/// \param[out] tid Thread ID of the location.
+/// \param[out] fd File descriptor of the location.
+/// \param[out] suppressable Suppressable flag.
+/// \param trace A buffer to store the stack trace.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_loc(void *report, unsigned long idx, const char **type,
+ void **addr, void **start, unsigned long *size,
+ int *tid, int *fd, int *suppressable, void **trace,
+ unsigned long trace_size);
+
+/// Returns information about mutexes included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's mutexes.
+/// \param[out] mutex_id Id of the mutex.
+/// \param[out] addr Address of the mutex.
+/// \param[out] destroyed Destroyed mutex flag.
+/// \param trace A buffer to store the stack trace.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_mutex(void *report, unsigned long idx, uint64_t *mutex_id,
+ void **addr, int *destroyed, void **trace,
+ unsigned long trace_size);
+
+/// Returns information about threads included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's threads.
+/// \param[out] tid Thread ID of the thread.
+/// \param[out] os_id Operating system's ID of the thread.
+/// \param[out] running Running flag of the thread.
+/// \param[out] name Name of the thread.
+/// \param[out] parent_tid ID of the parent thread.
+/// \param trace A buffer to store the stack trace.
+/// \param trace_size Size in bytes of the trace buffer.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_thread(void *report, unsigned long idx, int *tid,
+ uint64_t *os_id, int *running, const char **name,
+ int *parent_tid, void **trace,
+ unsigned long trace_size);
+
+/// Returns information about unique thread IDs included in the report.
+///
+/// \param report Opaque pointer to the current report.
+/// \param idx Index to the report's unique thread IDs.
+/// \param[out] tid Unique thread ID of the report.
+/// \returns Returns 1 if successful, 0 if not.
+int __tsan_get_report_unique_tid(void *report, unsigned long idx, int *tid);
+
+/// Returns the current report.
+///
+/// If TSan is currently reporting a detected issue on the current thread,
+/// returns an opaque pointer to the current report. Otherwise returns NULL.
+/// \returns An opaque pointer to the current report. Otherwise returns NULL.
+void *__tsan_get_current_report();
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/contrib/llvm-project/compiler-rt/include/xray/xray_interface.h b/contrib/llvm-project/compiler-rt/include/xray/xray_interface.h
index 410515d2373a..727431c04e4f 100644
--- a/contrib/llvm-project/compiler-rt/include/xray/xray_interface.h
+++ b/contrib/llvm-project/compiler-rt/include/xray/xray_interface.h
@@ -76,8 +76,9 @@ extern int __xray_remove_customevent_handler();
/// Set a handler for xray typed event logging. The first parameter is a type
/// identifier, the second is a payload, and the third is the payload size.
-extern int __xray_set_typedevent_handler(void (*entry)(uint16_t, const void *,
- std::size_t));
+/// NOTE: fdrLoggingHandleTypedEvent only supports uint16_t event type.
+extern int __xray_set_typedevent_handler(void (*entry)(size_t, const void *,
+ size_t));
/// Removes the currently set typed event handler.
/// Returns 1 on success, 0 on error.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
index 74183fcd2427..15eefcb96063 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
@@ -16,6 +16,7 @@
#include "asan_allocator.h"
+#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
@@ -24,6 +25,7 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
@@ -190,28 +192,56 @@ class LargeChunkHeader {
}
};
+static void FillChunk(AsanChunk *m) {
+ // FIXME: Use ReleaseMemoryPagesToOS.
+ Flags &fl = *flags();
+
+ if (fl.max_free_fill_size > 0) {
+ // We have to skip the chunk header, it contains free_context_id.
+ uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
+ if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
+ uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
+ size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
+ REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
+ }
+ }
+}
+
struct QuarantineCallback {
QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
: cache_(cache),
stack_(stack) {
}
- void Recycle(AsanChunk *m) {
+ void PreQuarantine(AsanChunk *m) const {
+ FillChunk(m);
+ // Poison the region.
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+ }
+
+ void Recycle(AsanChunk *m) const {
void *p = get_allocator().GetBlockBegin(m);
- if (p != m) {
- // Clear the magic value, as allocator internals may overwrite the
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
- reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
- }
- u8 old_chunk_state = CHUNK_QUARANTINE;
- if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
- CHUNK_INVALID, memory_order_acquire)) {
- CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
- }
+ // The secondary will immediately unpoison and unmap the memory, so this
+ // branch is unnecessary.
+ if (get_allocator().FromPrimary(p)) {
+ if (p != m) {
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
+ }
- PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
+ u8 old_chunk_state = CHUNK_QUARANTINE;
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
+ CHUNK_INVALID,
+ memory_order_acquire)) {
+ CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
+ }
+
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ }
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
@@ -221,7 +251,17 @@ struct QuarantineCallback {
get_allocator().Deallocate(cache_, p);
}
- void *Allocate(uptr size) {
+ void RecyclePassThrough(AsanChunk *m) const {
+ // Recycle for the secondary will immediately unpoison and unmap the
+ // memory, so quarantine preparation is unnecessary.
+ if (get_allocator().FromPrimary(m)) {
+ // The primary allocation may need pattern fill if enabled.
+ FillChunk(m);
+ }
+ Recycle(m);
+ }
+
+ void *Allocate(uptr size) const {
void *res = get_allocator().Allocate(cache_, size, 1);
// TODO(alekseys): Consider making quarantine OOM-friendly.
if (UNLIKELY(!res))
@@ -229,9 +269,7 @@ struct QuarantineCallback {
return res;
}
- void Deallocate(void *p) {
- get_allocator().Deallocate(cache_, p);
- }
+ void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
private:
AllocatorCache* const cache_;
@@ -248,6 +286,22 @@ void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
+
+void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
+ user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
+ // The secondary mapping will be immediately returned to user, no value
+ // poisoning that with non-zero just before unpoisoning by Allocate(). So just
+ // poison head/tail invisible to Allocate().
+ PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
+ PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
@@ -387,8 +441,9 @@ struct Allocator {
}
void GetOptions(AllocatorOptions *options) const {
- options->quarantine_size_mb = quarantine.GetSize() >> 20;
- options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
+ options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
+ options->thread_local_quarantine_size_kb =
+ quarantine.GetMaxCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = AllocatorMayReturnNull();
@@ -502,9 +557,10 @@ struct Allocator {
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
+ bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
+ if (!from_primary)
needed_size += rz_size;
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
@@ -536,15 +592,6 @@ struct Allocator {
ReportOutOfMemory(size, stack);
}
- if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
- // Heap poisoning is enabled, but the allocator provides an unpoisoned
- // chunk. This is possible if CanPoisonMemory() was false for some
- // time, for example, due to flags()->start_disabled.
- // Anyway, poison the block before using it for anything else.
- uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
- PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
- }
-
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr user_beg = alloc_beg + rz_size;
@@ -561,6 +608,17 @@ struct Allocator {
m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
+ if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
+ // The allocator provides an unpoisoned chunk. This is possible for the
+ // secondary allocator, or if CanPoisonMemory() was false for some time,
+ // for example, due to flags()->start_disabled. Anyway, poison left and
+ // right of the block before using it for anything else.
+ uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
+ uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
+ PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
+ }
+
uptr size_rounded_down_to_granularity =
RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@@ -628,25 +686,6 @@ struct Allocator {
AsanThread *t = GetCurrentThread();
m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
- Flags &fl = *flags();
- if (fl.max_free_fill_size > 0) {
- // We have to skip the chunk header, it contains free_context_id.
- uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
- if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
- uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
- size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
- REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
- }
- }
-
- // Poison the region.
- PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
- kAsanHeapFreeMagic);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.frees++;
- thread_stats.freed += m->UsedSize();
-
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
@@ -699,6 +738,10 @@ struct Allocator {
}
}
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
QuarantineChunk(m, ptr, stack);
}
@@ -798,6 +841,10 @@ struct Allocator {
return m->UsedSize();
}
+ uptr AllocationSizeFast(uptr p) {
+ return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
+ }
+
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
sptr offset = 0;
@@ -1100,6 +1147,10 @@ uptr GetUserBegin(uptr chunk) {
return m ? m->Beg() : 0;
}
+uptr GetUserAddr(uptr chunk) {
+ return chunk;
+}
+
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
: nullptr;
@@ -1140,7 +1191,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::get_allocator().ForEachChunk(callback, arg);
}
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+IgnoreObjectResult IgnoreObject(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
if (!m ||
@@ -1160,6 +1211,17 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
+static const void *AllocationBegin(const void *p) {
+ AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
+ if (!m)
+ return nullptr;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return nullptr;
+ if (m->UsedSize() == 0)
+ return nullptr;
+ return (const void *)(m->Beg());
+}
+
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
@@ -1183,6 +1245,17 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
void __sanitizer_purge_allocator() {
GET_STACK_TRACE_MALLOC;
instance.Purge(&stack);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
index 0b4dbf03bb9d..ffeedadf0772 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
@@ -114,6 +114,7 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;
void OnUnmap(uptr p, uptr size) const;
};
@@ -143,11 +144,15 @@ typedef DefaultSizeClassMap SizeClassMap;
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
-# else
+# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
-# endif
+# else
+const uptr kAllocatorSpace = 0x500000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
index b780128c9adb..4d391cb2a885 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
@@ -92,6 +92,10 @@ static void ReportGlobal(const Global &g, const char *prefix) {
if (info.line != 0) {
Report(" location: name=%s, %d\n", info.file, static_cast<int>(info.line));
}
+ else if (g.gcc_location != 0) {
+ // Fallback to Global::gcc_location
+ Report(" location: name=%s, %d\n", g.gcc_location->filename, g.gcc_location->line_no);
+ }
}
static u32 FindRegistrationSite(const Global *g) {
@@ -302,6 +306,11 @@ void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
if (info.line != 0) {
str->append("%s:%d", info.file, static_cast<int>(info.line));
+ } else if (g.gcc_location != 0) {
+ // Fallback to Global::gcc_location
+ str->append("%s", g.gcc_location->filename ? g.gcc_location->filename : g.module_name);
+ if (g.gcc_location->line_no) str->append(":%d", g.gcc_location->line_no);
+ if (g.gcc_location->column_no) str->append(":%d", g.gcc_location->column_no);
} else {
str->append("%s", g.module_name);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
index 776f512d08a0..b9b82564b330 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
+
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_mapping.h"
@@ -20,7 +21,10 @@
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_suppressions.h"
+#include "asan_thread.h"
#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
// There is no general interception at all on Fuchsia.
@@ -84,12 +88,6 @@ using namespace __asan;
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
-#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
- AsanInterceptorContext _ctx = {#func}; \
- ctx = (void *)&_ctx; \
- (void) ctx; \
-
-#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
ASAN_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
@@ -149,22 +147,46 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
*begin = *end = 0; \
}
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
- ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
- } while (false)
+template <class Mmap>
+static void* mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ const uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Only unpoison shadow if it's an ASAN managed address.
+ if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
+ PoisonShadow(beg, RoundUpTo(length, GetPageSize()), 0);
+ }
+ return res;
+}
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
- ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ const uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
+ PoisonShadow(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
+
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
} while (false)
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memset); \
- ASAN_MEMSET_IMPL(ctx, block, c, size); \
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
} while (false)
#if CAN_SANITIZE_LEAKS
@@ -172,6 +194,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
__lsan::ScopedInterceptorDisabler disabler
#endif
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_ASAN_INITED()
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
@@ -196,19 +220,26 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ auto self = GetThreadSelf();
+ auto args = asanThreadArgRetval().GetArgs(self);
+ thread_return_t retval = t->ThreadStart(GetTid());
+ asanThreadArgRetval().Finish(self, retval);
+ CHECK_EQ(args.arg_retval, t->get_arg());
+ return retval;
}
-INTERCEPTOR(int, pthread_create, void *thread,
- void *attr, void *(*start_routine)(void*), void *arg) {
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*start_routine)(void *), void *arg) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
- int detached = 0;
- if (attr)
- REAL(pthread_attr_getdetachstate)(attr, &detached);
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !REAL(pthread_attr_getdetachstate)(attr, &d) &&
+ IsStateDetached(d);
+ }();
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
@@ -220,10 +251,13 @@ INTERCEPTOR(int, pthread_create, void *thread,
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
-#if CAN_SANITIZE_LEAKS
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
-#endif
- result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
+# endif
+ asanThreadArgRetval().Create(detached, {start_routine, arg}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
+ return result ? 0 : *(uptr *)(thread);
+ });
}
if (result != 0) {
// If the thread didn't start delete the AsanThread to avoid leaking it.
@@ -234,9 +268,51 @@ INTERCEPTOR(int, pthread_create, void *thread,
return result;
}
-INTERCEPTOR(int, pthread_join, void *t, void **arg) {
- return real_pthread_join(t, arg);
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ asanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_exit, void *retval) {
+ asanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ return REAL(pthread_exit)(retval);
+}
+
+# if ASAN_INTERCEPT_TRYJOIN
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+# endif
+
+# if ASAN_INTERCEPT_TIMEDJOIN
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
}
+# endif
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
@@ -388,7 +464,7 @@ INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
- ALIAS(WRAPPER_NAME(strchr));
+ ALIAS(WRAP(strchr));
# else
# if SANITIZER_APPLE
DECLARE_REAL(char*, index, const char *string, int c)
@@ -474,7 +550,9 @@ INTERCEPTOR(char*, strdup, const char *s) {
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
- REAL(memcpy)(new_mem, s, length + 1);
+ if (new_mem) {
+ REAL(memcpy)(new_mem, s, length + 1);
+ }
return reinterpret_cast<char*>(new_mem);
}
@@ -490,7 +568,9 @@ INTERCEPTOR(char*, __strdup, const char *s) {
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
- REAL(memcpy)(new_mem, s, length + 1);
+ if (new_mem) {
+ REAL(memcpy)(new_mem, s, length + 1);
+ }
return reinterpret_cast<char*>(new_mem);
}
#endif // ASAN_INTERCEPT___STRDUP
@@ -652,6 +732,7 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(!was_called_once);
was_called_once = true;
+ InitializePlatformInterceptors();
InitializeCommonInterceptors();
InitializeSignalInterceptors();
@@ -702,11 +783,11 @@ void InitializeAsanInterceptors() {
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
- INTERCEPT_FUNCTION(_Unwind_RaiseException);
+ ASAN_INTERCEPT_FUNC(_Unwind_RaiseException);
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
- INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
+ ASAN_INTERCEPT_FUNC(_Unwind_SjLj_RaiseException);
#endif
// Intercept threading-related functions
@@ -718,6 +799,16 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(pthread_create);
#endif
ASAN_INTERCEPT_FUNC(pthread_join);
+ ASAN_INTERCEPT_FUNC(pthread_detach);
+ ASAN_INTERCEPT_FUNC(pthread_exit);
+# endif
+
+# if ASAN_INTERCEPT_TIMEDJOIN
+ ASAN_INTERCEPT_FUNC(pthread_timedjoin_np);
+#endif
+
+#if ASAN_INTERCEPT_TRYJOIN
+ ASAN_INTERCEPT_FUNC(pthread_tryjoin_np);
#endif
// Intercept atexit function.
@@ -737,8 +828,6 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(vfork);
#endif
- InitializePlatformInterceptors();
-
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
index c4bf087ea17f..268096fea5e7 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
@@ -78,8 +78,8 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___LONGJMP_CHK 0
#endif
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
- !SANITIZER_NETBSD
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \
+ (!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__)))
# define ASAN_INTERCEPT___CXA_THROW 1
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
@@ -112,6 +112,14 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___STRDUP 0
#endif
+#if SANITIZER_GLIBC && ASAN_INTERCEPT_PTHREAD_CREATE
+# define ASAN_INTERCEPT_TIMEDJOIN 1
+# define ASAN_INTERCEPT_TRYJOIN 1
+#else
+# define ASAN_INTERCEPT_TIMEDJOIN 0
+# define ASAN_INTERCEPT_TRYJOIN 0
+#endif
+
#if SANITIZER_LINUX && \
(defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
@@ -158,6 +166,12 @@ DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
# define ASAN_INTERCEPT_FUNC(name)
# endif // SANITIZER_APPLE
+#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
+ AsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
+
#endif // !SANITIZER_FUCHSIA
#endif // ASAN_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
index 9c316bb95749..4e4ea7191d32 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
@@ -11,13 +11,54 @@
// ASan versions of memcpy, memmove, and memset.
//===---------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "asan_interceptors_memintrinsics.h"
+
+#include "asan_interceptors.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_suppressions.h"
using namespace __asan;
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ if (LIKELY(to != from)) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } else if (UNLIKELY(!asan_inited)) { \
+ return internal_memcpy(to, from, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } else if (UNLIKELY(!asan_inited)) { \
+ return internal_memset(block, c, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
@@ -40,4 +81,26 @@ extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
+#else // SANITIZER_FUCHSIA
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
index bbc5390ceaa4..eb44f8f2f729 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
@@ -79,43 +79,6 @@ struct AsanInterceptorContext {
} \
} while (0)
-// memcpy is called during __asan_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- if (LIKELY(replace_intrin_cached)) { \
- if (LIKELY(to != from)) { \
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
- } \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } else if (UNLIKELY(!asan_inited)) { \
- return internal_memcpy(to, from, size); \
- } \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-// memset is called inside Printf.
-#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- if (LIKELY(replace_intrin_cached)) { \
- ASAN_WRITE_RANGE(ctx, block, size); \
- } else if (UNLIKELY(!asan_inited)) { \
- return internal_memset(block, c, size); \
- } \
- return REAL(memset)(block, c, size); \
- } while (0)
-
-#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- if (LIKELY(replace_intrin_cached)) { \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return internal_memmove(to, from, size); \
- } while (0)
-
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
index 987f855c0f9c..a99826378022 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
@@ -53,9 +53,10 @@ extern "C" {
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
- uptr windows_padding; // TODO: Figure out how to remove this padding
- // that's simply here to make the MSVC incremental
- // linker happy...
+ __asan_global_source_location *gcc_location; // Source location of a global,
+ // used by GCC compiler. LLVM uses
+ // llvm-symbolizer that relies
+ // on DWARF debugging info.
uptr odr_indicator; // The address of the ODR indicator symbol.
};
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
index c9bd5fb8e1a8..0c0750061447 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
@@ -130,6 +130,18 @@ typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block);
+typedef unsigned long dispatch_mach_reason;
+typedef void *dispatch_mach_msg_t;
+typedef int mach_error_t;
+typedef void *dispatch_mach_t;
+
+typedef void (*dispatch_mach_handler_function_t)(void *context,
+ dispatch_mach_reason reason,
+ dispatch_mach_msg_t message,
+ mach_error_t error);
+typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason,
+ dispatch_mach_msg_t message,
+ mach_error_t error);
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {
@@ -160,7 +172,7 @@ void asan_dispatch_call_block_and_release(void *block) {
VReport(2,
"asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
- block, pthread_self());
+ block, (void*)pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -193,7 +205,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
- asan_ctxt, pthread_self()); \
+ (void*)asan_ctxt, (void*)pthread_self()); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
@@ -210,7 +222,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (Verbosity() >= 2) {
- Report("dispatch_after_f: %p\n", asan_ctxt);
+ Report("dispatch_after_f: %p\n", (void*)asan_ctxt);
PRINT_CURRENT_STACK();
}
return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
@@ -224,7 +236,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
- asan_ctxt, pthread_self());
+ (void*)asan_ctxt, (void*)pthread_self());
PRINT_CURRENT_STACK();
}
REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
@@ -241,6 +253,8 @@ void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
void(^work)(void));
void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
+dispatch_mach_t dispatch_mach_create(const char *label, dispatch_queue_t queue,
+ dispatch_mach_handler_t handler);
}
#define GET_ASAN_BLOCK(work) \
@@ -290,6 +304,34 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, asan_block);
}
+
+INTERCEPTOR(void *, dispatch_mach_create, const char *label,
+ dispatch_queue_t dq, dispatch_mach_handler_t handler) {
+ int parent_tid = GetCurrentTidOrInvalid();
+ return REAL(dispatch_mach_create)(
+ label, dq,
+ ^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
+ mach_error_t error) {
+ GET_STACK_TRACE_THREAD;
+ asan_register_worker_thread(parent_tid, &stack);
+ handler(reason, message, error);
+ });
+}
+
+INTERCEPTOR(void *, dispatch_mach_create_f, const char *label,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_mach_handler_function_t handler) {
+ int parent_tid = GetCurrentTidOrInvalid();
+ return REAL(dispatch_mach_create)(
+ label, dq,
+ ^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
+ mach_error_t error) {
+ GET_STACK_TRACE_THREAD;
+ asan_register_worker_thread(parent_tid, &stack);
+ handler(ctxt, reason, message, error);
+ });
+}
+
#endif
#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
index 4b76d4ebd3eb..ff78d7646a90 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
@@ -508,10 +508,10 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_expand_base", (uptr)_expand);
if (flags()->windows_hook_rtl_allocators) {
- INTERCEPT_FUNCTION(HeapSize);
- INTERCEPT_FUNCTION(HeapFree);
- INTERCEPT_FUNCTION(HeapReAlloc);
- INTERCEPT_FUNCTION(HeapAlloc);
+ ASAN_INTERCEPT_FUNC(HeapSize);
+ ASAN_INTERCEPT_FUNC(HeapFree);
+ ASAN_INTERCEPT_FUNC(HeapReAlloc);
+ ASAN_INTERCEPT_FUNC(HeapAlloc);
// Undocumented functions must be intercepted by name, not by symbol.
__interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap),
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
index 4fcd5600ed1a..3396fc2bab94 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
@@ -11,12 +11,11 @@
// This file implements __sanitizer_print_memory_profile.
//===----------------------------------------------------------------------===//
+#include "asan/asan_allocator.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
-#include "lsan/lsan_common.h"
-#include "asan/asan_allocator.h"
#if CAN_SANITIZE_LEAKS
@@ -100,17 +99,16 @@ static void ChunkCallback(uptr chunk, void *arg) {
FindHeapChunkByAllocBeg(chunk));
}
-static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
- void *argument) {
+static void MemoryProfileCB(uptr top_percent, uptr max_number_of_contexts) {
HeapProfile hp;
+ __lsan::LockAllocator();
__lsan::ForEachChunk(ChunkCallback, &hp);
- uptr *Arg = reinterpret_cast<uptr*>(argument);
- hp.Print(Arg[0], Arg[1]);
+ __lsan::UnlockAllocator();
+ hp.Print(top_percent, max_number_of_contexts);
if (Verbosity())
__asan_print_accumulated_stats();
}
-
} // namespace __asan
#endif // CAN_SANITIZE_LEAKS
@@ -120,10 +118,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_memory_profile(uptr top_percent,
uptr max_number_of_contexts) {
#if CAN_SANITIZE_LEAKS
- uptr Arg[2];
- Arg[0] = top_percent;
- Arg[1] = max_number_of_contexts;
- __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
+ __asan::MemoryProfileCB(top_percent, max_number_of_contexts);
#endif // CAN_SANITIZE_LEAKS
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
index 5164b7d860f4..e99b91d9c0a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -449,11 +449,11 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// FIXME: Two of these three checks are disabled until we fix
// https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
- // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ // DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
- CHECK_EQ(*(u8 *)MemToShadow(a), 0);
+ DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
// if (d2 + granularity <= c && c <= end)
- // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
// kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_end, granularity);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
index b9575d2f427e..02a76af847ae 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
@@ -32,24 +32,24 @@ u32 GetMallocContextSize();
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE(max_size, fast) \
- BufferedStackTrace stack; \
- if (max_size <= 2) { \
- stack.size = max_size; \
- if (max_size > 0) { \
- stack.top_frame_bp = GET_CURRENT_FRAME(); \
- stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
- if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
- } \
- } else { \
- stack.Unwind(StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), nullptr, fast, max_size); \
+#define GET_STACK_TRACE(max_size, fast) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ fast, max_size); \
}
-#define GET_STACK_TRACE_FATAL(pc, bp) \
- BufferedStackTrace stack; \
- stack.Unwind(pc, bp, nullptr, \
- common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
index 003cd2b9eee8..f718adf5e1f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
@@ -10,24 +10,25 @@
//
// Thread-related code.
//===----------------------------------------------------------------------===//
+#include "asan_thread.h"
+
#include "asan_allocator.h"
#include "asan_interceptors.h"
+#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
-#include "asan_thread.h"
-#include "asan_mapping.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#include "lsan/lsan_common.h"
namespace __asan {
// AsanThreadContext implementation.
void AsanThreadContext::OnCreated(void *arg) {
- CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
+ CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg);
if (args->stack)
stack_id = StackDepotPut(*args->stack);
thread = args->thread;
@@ -39,34 +40,50 @@ void AsanThreadContext::OnFinished() {
thread = nullptr;
}
-// MIPS requires aligned address
-static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry;
+static ThreadArgRetval *thread_data;
static Mutex mu_for_thread_context;
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
Lock lock(&mu_for_thread_context);
- return new(allocator_for_thread_context) AsanThreadContext(tid);
+ return new (allocator_for_thread_context) AsanThreadContext(tid);
}
-ThreadRegistry &asanThreadRegistry() {
+static void InitThreads() {
static bool initialized;
// Don't worry about thread_safety - this should be called when there is
// a single thread.
- if (!initialized) {
- // Never reuse ASan threads: we store pointer to AsanThreadContext
- // in TSD and can't reliably tell when no more TSD destructors will
- // be called. It would be wrong to reuse AsanThreadContext for another
- // thread before all TSD destructors will be called for it.
- asan_thread_registry =
- new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
- initialized = true;
- }
+ if (LIKELY(initialized))
+ return;
+ // Never reuse ASan threads: we store pointer to AsanThreadContext
+ // in TSD and can't reliably tell when no more TSD destructors will
+ // be called. It would be wrong to reuse AsanThreadContext for another
+ // thread before all TSD destructors will be called for it.
+
+ // MIPS requires aligned address
+ static ALIGNED(alignof(
+ ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+ static ALIGNED(alignof(
+ ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
+
+ asan_thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
+ initialized = true;
+}
+
+ThreadRegistry &asanThreadRegistry() {
+ InitThreads();
return *asan_thread_registry;
}
+ThreadArgRetval &asanThreadArgRetval() {
+ InitThreads();
+ return *thread_data;
+}
+
AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
return static_cast<AsanThreadContext *>(
asanThreadRegistry().GetThreadLocked(tid));
@@ -79,7 +96,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
- AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
+ AsanThread *thread = (AsanThread *)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
@@ -89,7 +106,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
}
void AsanThread::TSDDtor(void *tsd) {
- AsanThreadContext *context = (AsanThreadContext*)tsd;
+ AsanThreadContext *context = (AsanThreadContext *)tsd;
VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
@@ -143,8 +160,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
-void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
- uptr *bottom_old,
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
@@ -170,7 +186,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
// Make sure the stack bounds are fully initialized.
- if (stack_bottom_ >= stack_top_) return {0, 0};
+ if (stack_bottom_ >= stack_top_)
+ return {0, 0};
return {stack_bottom_, stack_top_};
}
char local;
@@ -183,13 +200,9 @@ inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
return {stack_bottom_, stack_top_};
}
-uptr AsanThread::stack_top() {
- return GetStackBounds().top;
-}
+uptr AsanThread::stack_top() { return GetStackBounds().top; }
-uptr AsanThread::stack_bottom() {
- return GetStackBounds().bottom;
-}
+uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; }
uptr AsanThread::stack_size() {
const auto bounds = GetStackBounds();
@@ -210,8 +223,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// This CAS checks if the state was 0 and if so changes it to state 1,
// if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
- reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
- memory_order_relaxed)) {
+ reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
+ memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
stack_size_log =
@@ -264,7 +277,8 @@ thread_return_t AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
- if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+ if (common_flags()->use_sigaltstack)
+ SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
@@ -306,6 +320,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_begin_, &tls_size);
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
+ stack_bottom_ = RoundDownTo(stack_bottom_, ASAN_SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
@@ -339,14 +354,14 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
bottom = fake_stack->AddrIsInFakeStack(addr);
CHECK(bottom);
access->offset = addr - bottom;
- access->frame_pc = ((uptr*)bottom)[2];
- access->frame_descr = (const char *)((uptr*)bottom)[1];
+ access->frame_pc = ((uptr *)bottom)[2];
+ access->frame_descr = (const char *)((uptr *)bottom)[1];
return true;
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
- u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
- u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+ u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
@@ -368,7 +383,7 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
- access->frame_descr = (const char*)ptr[1];
+ access->frame_descr = (const char *)ptr[1];
return true;
}
@@ -386,8 +401,8 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
- u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
- u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+ u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
(*shadow_ptr != kAsanStackLeftRedzoneMagic &&
@@ -471,16 +486,23 @@ void EnsureMainThreadIDIsCorrect() {
__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
- if (!context) return nullptr;
+ if (!context)
+ return nullptr;
return context->thread;
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
-void LockThreadRegistry() { __asan::asanThreadRegistry().Lock(); }
+void LockThreads() {
+ __asan::asanThreadRegistry().Lock();
+ __asan::asanThreadArgRetval().Lock();
+}
-void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); }
+void UnlockThreads() {
+ __asan::asanThreadArgRetval().Unlock();
+ __asan::asanThreadRegistry().Unlock();
+}
static ThreadRegistry *GetAsanThreadRegistryLocked() {
__asan::asanThreadRegistry().CheckLocked();
@@ -493,7 +515,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
- if (!t) return false;
+ if (!t)
+ return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
*tls_begin = t->tls_begin();
@@ -534,33 +557,7 @@ void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
- GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
- [](ThreadContextBase *tctx, void *ptrs) {
- // Look for the arg pointer of threads that have been created or are
- // running. This is necessary to prevent false positive leaks due to the
- // AsanThread holding the only live reference to a heap object. This
- // can happen because the `pthread_create()` interceptor doesn't wait
- // for the child thread to start before returning and thus loosing the
- // the only live reference to the heap object on the stack.
-
- __asan::AsanThreadContext *atctx =
- static_cast<__asan::AsanThreadContext *>(tctx);
-
- // Note ThreadStatusRunning is required because there is a small window
- // where the thread status switches to `ThreadStatusRunning` but the
- // `arg` pointer still isn't on the stack yet.
- if (atctx->status != ThreadStatusCreated &&
- atctx->status != ThreadStatusRunning)
- return;
-
- uptr thread_arg = reinterpret_cast<uptr>(atctx->thread->get_arg());
- if (!thread_arg)
- return;
-
- auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
- ptrsVec->push_back(thread_arg);
- },
- ptrs);
+ __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
@@ -573,11 +570,7 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
threads);
}
-void FinishThreadLocked(u32 tid) {
- GetAsanThreadRegistryLocked()->FinishThread(tid);
-}
-
-} // namespace __lsan
+} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
@@ -591,20 +584,18 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
return;
}
- t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
+ t->StartSwitchFiber((FakeStack **)fakestacksave, (uptr)bottom, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_finish_switch_fiber(void* fakestack,
- const void **bottom_old,
+void __sanitizer_finish_switch_fiber(void *fakestack, const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
- t->FinishSwitchFiber((FakeStack*)fakestack,
- (uptr*)bottom_old,
- (uptr*)size_old);
+ t->FinishSwitchFiber((FakeStack *)fakestack, (uptr *)bottom_old,
+ (uptr *)size_old);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
index 801a3960ec6c..c131dd40d864 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
@@ -15,11 +15,12 @@
#define ASAN_THREAD_H
#include "asan_allocator.h"
-#include "asan_internal.h"
#include "asan_fake_stack.h"
+#include "asan_internal.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __sanitizer {
@@ -129,7 +130,7 @@ class AsanThread {
void *extra_spill_area() { return &extra_spill_area_; }
- void *get_arg() { return arg_; }
+ void *get_arg() const { return arg_; }
private:
// NOTE: There is no AsanThread constructor. It is allocated
@@ -171,6 +172,7 @@ class AsanThread {
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
+ThreadArgRetval &asanThreadArgRetval();
// Must be called under ThreadRegistryLock.
AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
index 7dbd7ab98a17..25f2e6cd551f 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
@@ -159,6 +159,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
namespace __asan {
void InitializePlatformInterceptors() {
+ __interception::SetErrorReportCallback(Report);
+
// The interceptors were not designed to be removable, so we have to keep this
// module alive for the life of the process.
HMODULE pinned;
@@ -194,9 +196,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Only asan on 64-bit Windows supports committing shadow memory on demand.
+#if SANITIZER_WINDOWS64
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+#endif
}
// ---------------------- TSD ---------------- {{{
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp
new file mode 100644
index 000000000000..769fde47a33b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp
@@ -0,0 +1,85 @@
+//===-asan_abi.cpp - ASan Stable ABI---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_abi.h"
+
+extern "C" {
+// Functions concerning instrumented global variables:
+void __asan_abi_register_image_globals(void) {}
+void __asan_abi_unregister_image_globals(void) {}
+
+// Functions concerning dynamic library initialization
+void __asan_abi_before_dynamic_init(const char *module_name) {}
+void __asan_abi_after_dynamic_init(void) {}
+
+// Functions concerning block memory destinations
+void *__asan_abi_memcpy(void *d, const void *s, size_t n) { return NULL; }
+void *__asan_abi_memmove(void *d, const void *s, size_t n) { return NULL; }
+void *__asan_abi_memset(void *p, int c, size_t n) { return NULL; }
+
+// Functions concerning RTL startup and initialization
+void __asan_abi_init(void) {}
+void __asan_abi_handle_no_return(void) {}
+
+// Functions concerning memory load and store reporting
+void __asan_abi_report_load_n(void *p, size_t n, bool abort) {}
+void __asan_abi_report_exp_load_n(void *p, size_t n, int exp, bool abort) {}
+void __asan_abi_report_store_n(void *p, size_t n, bool abort) {}
+void __asan_abi_report_exp_store_n(void *p, size_t n, int exp, bool abort) {}
+
+// Functions concerning memory load and store
+void __asan_abi_load_n(void *p, size_t n, bool abort) {}
+void __asan_abi_exp_load_n(void *p, size_t n, int exp, bool abort) {}
+void __asan_abi_store_n(void *p, size_t n, bool abort) {}
+void __asan_abi_exp_store_n(void *p, size_t n, int exp, bool abort) {}
+
+// Functions concerning query about whether memory is poisoned
+int __asan_abi_address_is_poisoned(void const volatile *p) { return 0; }
+void *__asan_abi_region_is_poisoned(void const volatile *p, size_t size) {
+ return NULL;
+}
+
+// Functions concerning the poisoning of memory
+void __asan_abi_poison_memory_region(void const volatile *p, size_t n) {}
+void __asan_abi_unpoison_memory_region(void const volatile *p, size_t n) {}
+
+// Functions concerning the partial poisoning of memory
+void __asan_abi_set_shadow_xx_n(void *p, unsigned char xx, size_t n) {}
+
+// Functions concerning stack poisoning
+void __asan_abi_poison_stack_memory(void *p, size_t n) {}
+void __asan_abi_unpoison_stack_memory(void *p, size_t n) {}
+
+// Functions concerning redzone poisoning
+void __asan_abi_poison_intra_object_redzone(void *p, size_t size) {}
+void __asan_abi_unpoison_intra_object_redzone(void *p, size_t size) {}
+
+// Functions concerning array cookie poisoning
+void __asan_abi_poison_cxx_array_cookie(void *p) {}
+void *__asan_abi_load_cxx_array_cookie(void **p) { return NULL; }
+
+// Functions concerning fake stacks
+void *__asan_abi_get_current_fake_stack(void) { return NULL; }
+void *__asan_abi_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ return NULL;
+}
+
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_abi_alloca_poison(void *addr, size_t size) {}
+void __asan_abi_allocas_unpoison(void *top, void *bottom) {}
+
+// Functions concerning fake stack malloc
+void *__asan_abi_stack_malloc_n(size_t scale, size_t size) { return NULL; }
+void *__asan_abi_stack_malloc_always_n(size_t scale, size_t size) {
+ return NULL;
+}
+
+// Functions concerning fake stack free
+void __asan_abi_stack_free_n(int scale, void *p, size_t n) {}
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h
new file mode 100644
index 000000000000..562a552662b3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h
@@ -0,0 +1,84 @@
+//===-asan_abi.h - ASan Stable ABI Interface-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ABI_H
+#define ASAN_ABI_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <sys/types.h>
+
+extern "C" {
+// Functions concerning instrumented global variables:
+void __asan_abi_register_image_globals();
+void __asan_abi_unregister_image_globals();
+
+// Functions concerning dynamic library initialization
+void __asan_abi_before_dynamic_init(const char *module_name);
+void __asan_abi_after_dynamic_init();
+
+// Functions concerning block memory destinations
+void *__asan_abi_memcpy(void *d, const void *s, size_t n);
+void *__asan_abi_memmove(void *d, const void *s, size_t n);
+void *__asan_abi_memset(void *p, int c, size_t n);
+
+// Functions concerning RTL startup and initialization
+void __asan_abi_init();
+void __asan_abi_handle_no_return();
+
+// Functions concerning memory load and store reporting
+void __asan_abi_report_load_n(void *p, size_t n, bool abort);
+void __asan_abi_report_exp_load_n(void *p, size_t n, int exp, bool abort);
+void __asan_abi_report_store_n(void *p, size_t n, bool abort);
+void __asan_abi_report_exp_store_n(void *p, size_t n, int exp, bool abort);
+
+// Functions concerning memory load and store
+void __asan_abi_load_n(void *p, size_t n, bool abort);
+void __asan_abi_exp_load_n(void *p, size_t n, int exp, bool abort);
+void __asan_abi_store_n(void *p, size_t n, bool abort);
+void __asan_abi_exp_store_n(void *p, size_t n, int exp, bool abort);
+
+// Functions concerning query about whether memory is poisoned
+int __asan_abi_address_is_poisoned(void const volatile *p);
+void *__asan_abi_region_is_poisoned(void const volatile *p, size_t size);
+
+// Functions concerning the poisoning of memory
+void __asan_abi_unpoison_memory_region(void const volatile *p, size_t n);
+void __asan_abi_poison_memory_region(void const volatile *p, size_t n);
+
+// Functions concerning the partial poisoning of memory
+void __asan_abi_set_shadow_xx_n(void *p, unsigned char xx, size_t n);
+
+// Functions concerning stack poisoning
+void __asan_abi_poison_stack_memory(void *p, size_t n);
+void __asan_abi_unpoison_stack_memory(void *p, size_t n);
+
+// Functions concerning redzone poisoning
+void __asan_abi_poison_intra_object_redzone(void *p, size_t size);
+void __asan_abi_unpoison_intra_object_redzone(void *p, size_t size);
+
+// Functions concerning array cookie poisoning
+void __asan_abi_poison_cxx_array_cookie(void *p);
+void *__asan_abi_load_cxx_array_cookie(void **p);
+
+// Functions concerning fake stacks
+void *__asan_abi_get_current_fake_stack();
+void *__asan_abi_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_abi_alloca_poison(void *addr, size_t size);
+void __asan_abi_allocas_unpoison(void *top, void *bottom);
+
+// Functions concerning fake stack malloc
+void *__asan_abi_stack_malloc_n(size_t scale, size_t size);
+void *__asan_abi_stack_malloc_always_n(size_t scale, size_t size);
+
+// Functions concerning fake stack free
+void __asan_abi_stack_free_n(int scale, void *p, size_t n);
+}
+#endif // ASAN_ABI_H
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
new file mode 100644
index 000000000000..61c45db4bb9d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
@@ -0,0 +1,485 @@
+//===-asan_abi_shim.cpp - ASan Stable ABI Shim-----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../asan/asan_interface_internal.h"
+#include "asan_abi.h"
+#include <assert.h>
+
+extern "C" {
+// Functions concerning instrumented global variables
+void __asan_register_image_globals(uptr *flag) {
+ __asan_abi_register_image_globals();
+}
+
+void __asan_unregister_image_globals(uptr *flag) {
+ __asan_abi_unregister_image_globals();
+}
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {}
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {}
+void __asan_register_globals(__asan_global *globals, uptr n) {}
+void __asan_unregister_globals(__asan_global *globals, uptr n) {}
+
+// Functions concerning dynamic library initialization
+void __asan_before_dynamic_init(const char *module_name) {
+ __asan_abi_before_dynamic_init(module_name);
+}
+void __asan_after_dynamic_init(void) { __asan_abi_after_dynamic_init(); }
+
+// Functions concerning block memory destinations
+void *__asan_memcpy(void *dst, const void *src, uptr size) {
+ return __asan_abi_memcpy(dst, src, size);
+}
+void *__asan_memset(void *s, int c, uptr n) {
+ return __asan_abi_memset(s, c, n);
+}
+void *__asan_memmove(void *dest, const void *src, uptr n) {
+ return __asan_abi_memmove(dest, src, n);
+}
+
+// Functions concerning RTL startup and initialization
+void __asan_init(void) {
+ static_assert(sizeof(uptr) == 8);
+ static_assert(sizeof(u64) == 8);
+ static_assert(sizeof(u32) == 4);
+
+ __asan_abi_init();
+}
+void __asan_version_mismatch_check_v8(void) {}
+void __asan_handle_no_return(void) { __asan_abi_handle_no_return(); }
+
+// Variables concerning RTL state. These provisionally exist for completeness
+// but will likely move into the Stable ABI implementation and not in the shim.
+uptr __asan_shadow_memory_dynamic_address = (uptr)0xdeaddeaddeadbeaf;
+int __asan_option_detect_stack_use_after_return = 0;
+
+// Functions concerning memory load and store reporting
+void __asan_report_load1(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 1, true);
+}
+void __asan_report_load2(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 2, true);
+}
+void __asan_report_load4(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 4, true);
+}
+void __asan_report_load8(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 8, true);
+}
+void __asan_report_load16(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 16, true);
+}
+void __asan_report_load_n(uptr addr, uptr size) {
+ __asan_abi_report_load_n((void *)addr, size, true);
+}
+void __asan_report_store1(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 1, true);
+}
+void __asan_report_store2(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 2, true);
+}
+void __asan_report_store4(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 4, true);
+}
+void __asan_report_store8(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 8, true);
+}
+void __asan_report_store16(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 16, true);
+}
+void __asan_report_store_n(uptr addr, uptr size) {
+ __asan_abi_report_store_n((void *)addr, size, true);
+}
+
+// Functions concerning memory load and store reporting (experimental variants)
+void __asan_report_exp_load1(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 1, true);
+}
+void __asan_report_exp_load2(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 2, true);
+}
+void __asan_report_exp_load4(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 4, true);
+}
+void __asan_report_exp_load8(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 8, true);
+}
+void __asan_report_exp_load16(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 16, true);
+}
+void __asan_report_exp_load_n(uptr addr, uptr size, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, size, exp, true);
+}
+void __asan_report_exp_store1(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 1, true);
+}
+void __asan_report_exp_store2(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 2, true);
+}
+void __asan_report_exp_store4(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 4, true);
+}
+void __asan_report_exp_store8(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 8, true);
+}
+void __asan_report_exp_store16(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 16, true);
+}
+void __asan_report_exp_store_n(uptr addr, uptr size, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, size, exp, true);
+}
+
+// Functions concerning memory load and store reporting (noabort variants)
+void __asan_report_load1_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 1, false);
+}
+void __asan_report_load2_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 2, false);
+}
+void __asan_report_load4_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 4, false);
+}
+void __asan_report_load8_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 8, false);
+}
+void __asan_report_load16_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 16, false);
+}
+void __asan_report_load_n_noabort(uptr addr, uptr size) {
+ __asan_abi_report_load_n((void *)addr, size, false);
+}
+void __asan_report_store1_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 1, false);
+}
+void __asan_report_store2_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 2, false);
+}
+void __asan_report_store4_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 4, false);
+}
+void __asan_report_store8_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 8, false);
+}
+void __asan_report_store16_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 16, false);
+}
+void __asan_report_store_n_noabort(uptr addr, uptr size) {
+ __asan_abi_report_store_n((void *)addr, size, false);
+}
+
+// Functions concerning memory load and store
+void __asan_load1(uptr addr) { __asan_abi_load_n((void *)addr, 1, true); }
+void __asan_load2(uptr addr) { __asan_abi_load_n((void *)addr, 2, true); }
+void __asan_load4(uptr addr) { __asan_abi_load_n((void *)addr, 4, true); }
+void __asan_load8(uptr addr) { __asan_abi_load_n((void *)addr, 8, true); }
+void __asan_load16(uptr addr) { __asan_abi_load_n((void *)addr, 16, true); }
+void __asan_loadN(uptr addr, uptr size) {
+ __asan_abi_load_n((void *)addr, size, true);
+}
+void __asan_store1(uptr addr) { __asan_abi_store_n((void *)addr, 1, true); }
+void __asan_store2(uptr addr) { __asan_abi_store_n((void *)addr, 2, true); }
+void __asan_store4(uptr addr) { __asan_abi_store_n((void *)addr, 4, true); }
+void __asan_store8(uptr addr) { __asan_abi_store_n((void *)addr, 8, true); }
+void __asan_store16(uptr addr) { __asan_abi_store_n((void *)addr, 16, true); }
+void __asan_storeN(uptr addr, uptr size) {
+ __asan_abi_store_n((void *)addr, size, true);
+}
+
+// Functions concerning memory load and store (experimental variants)
+void __asan_exp_load1(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 1, exp, true);
+}
+void __asan_exp_load2(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 2, exp, true);
+}
+void __asan_exp_load4(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 4, exp, true);
+}
+void __asan_exp_load8(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 8, exp, true);
+}
+void __asan_exp_load16(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 16, exp, true);
+}
+void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, size, exp, true);
+}
+void __asan_exp_store1(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 1, exp, true);
+}
+void __asan_exp_store2(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 2, exp, true);
+}
+void __asan_exp_store4(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 4, exp, true);
+}
+void __asan_exp_store8(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 8, exp, true);
+}
+void __asan_exp_store16(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 16, exp, true);
+}
+void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, size, exp, true);
+}
+
+// Functions concerning memory load and store (noabort variants)
+void __asan_load1_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 1, false);
+}
+void __asan_load2_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 2, false);
+}
+void __asan_load4_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 4, false);
+}
+void __asan_load8_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 8, false);
+}
+void __asan_load16_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 16, false);
+}
+void __asan_loadN_noabort(uptr addr, uptr size) {
+ __asan_abi_load_n((void *)addr, size, false);
+}
+void __asan_store1_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 1, false);
+}
+void __asan_store2_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 2, false);
+}
+void __asan_store4_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 4, false);
+}
+void __asan_store8_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 8, false);
+}
+void __asan_store16_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 16, false);
+}
+void __asan_storeN_noabort(uptr addr, uptr size) {
+ __asan_abi_store_n((void *)addr, size, false);
+}
+
+// Functions concerning query about whether memory is poisoned
+int __asan_address_is_poisoned(void const volatile *addr) {
+ return __asan_abi_address_is_poisoned(addr);
+}
+uptr __asan_region_is_poisoned(uptr beg, uptr size) {
+ return (uptr)__asan_abi_region_is_poisoned((void *)beg, size);
+}
+
+// Functions concerning the poisoning of memory
+void __asan_poison_memory_region(void const volatile *addr, uptr size) {
+ __asan_abi_poison_memory_region(addr, size);
+}
+void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
+ __asan_abi_unpoison_memory_region(addr, size);
+}
+
+// Functions concerning the partial poisoning of memory
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x00, size);
+}
+void __asan_set_shadow_01(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x01, size);
+}
+void __asan_set_shadow_02(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x02, size);
+}
+void __asan_set_shadow_03(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x03, size);
+}
+void __asan_set_shadow_04(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x04, size);
+}
+void __asan_set_shadow_05(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x05, size);
+}
+void __asan_set_shadow_06(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x06, size);
+}
+void __asan_set_shadow_07(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x07, size);
+}
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf1, size);
+}
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf2, size);
+}
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf3, size);
+}
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf5, size);
+}
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf8, size);
+}
+
+// Functions concerning stack poisoning
+void __asan_poison_stack_memory(uptr addr, uptr size) {
+ __asan_abi_poison_stack_memory((void *)addr, size);
+}
+void __asan_unpoison_stack_memory(uptr addr, uptr size) {
+ __asan_abi_unpoison_stack_memory((void *)addr, size);
+}
+
+// Functions concerning redzone poisoning
+void __asan_poison_intra_object_redzone(uptr p, uptr size) {}
+void __asan_unpoison_intra_object_redzone(uptr p, uptr size) {}
+
+// Functions concerning array cookie poisoning
+void __asan_poison_cxx_array_cookie(uptr p) {}
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ // TBD: Fail here
+ return (uptr)0;
+}
+
+// Functions concerning fake stacks
+void *__asan_get_current_fake_stack(void) {
+ // TBD: Fail here
+ return (void *)0;
+}
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ // TBD: Fail here
+ return (void *)0;
+}
+
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_alloca_poison(uptr addr, uptr size) {
+ __asan_abi_alloca_poison((void *)addr, size);
+}
+void __asan_allocas_unpoison(uptr top, uptr bottom) {
+ __asan_abi_allocas_unpoison((void *)top, (void *)bottom);
+}
+
+// Functions concerning fake stack malloc
+uptr __asan_stack_malloc_0(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(0, size);
+}
+uptr __asan_stack_malloc_1(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(1, size);
+}
+uptr __asan_stack_malloc_2(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(2, size);
+}
+uptr __asan_stack_malloc_3(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(3, size);
+}
+uptr __asan_stack_malloc_4(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(4, size);
+}
+uptr __asan_stack_malloc_5(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(5, size);
+}
+uptr __asan_stack_malloc_6(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(6, size);
+}
+uptr __asan_stack_malloc_7(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(7, size);
+}
+uptr __asan_stack_malloc_8(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(8, size);
+}
+uptr __asan_stack_malloc_9(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(9, size);
+}
+uptr __asan_stack_malloc_10(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(10, size);
+}
+
+// Functions concerning fake stack malloc (always variants)
+uptr __asan_stack_malloc_always_0(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(0, size);
+}
+uptr __asan_stack_malloc_always_1(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(1, size);
+}
+uptr __asan_stack_malloc_always_2(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(2, size);
+}
+uptr __asan_stack_malloc_always_3(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(3, size);
+}
+uptr __asan_stack_malloc_always_4(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(4, size);
+}
+uptr __asan_stack_malloc_always_5(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(5, size);
+}
+uptr __asan_stack_malloc_always_6(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(6, size);
+}
+uptr __asan_stack_malloc_always_7(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(7, size);
+}
+uptr __asan_stack_malloc_always_8(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(8, size);
+}
+uptr __asan_stack_malloc_always_9(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(9, size);
+}
+uptr __asan_stack_malloc_always_10(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(10, size);
+}
+
+// Functions concerning fake stack free
+void __asan_stack_free_0(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(0, (void *)ptr, size);
+}
+void __asan_stack_free_1(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(1, (void *)ptr, size);
+}
+void __asan_stack_free_2(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(2, (void *)ptr, size);
+}
+void __asan_stack_free_3(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(3, (void *)ptr, size);
+}
+void __asan_stack_free_4(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(4, (void *)ptr, size);
+}
+void __asan_stack_free_5(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(5, (void *)ptr, size);
+}
+void __asan_stack_free_6(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(6, (void *)ptr, size);
+}
+void __asan_stack_free_7(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(7, (void *)ptr, size);
+}
+void __asan_stack_free_8(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(8, (void *)ptr, size);
+}
+void __asan_stack_free_9(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(9, (void *)ptr, size);
+}
+void __asan_stack_free_10(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(10, (void *)ptr, size);
+}
+
+// Functions concerning introspection (including lldb support)
+uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ // TBD: Fail here
+ return (uptr)0;
+}
+void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
+ uptr access_size, u32 exp) {}
+void __asan_set_error_report_callback(void (*callback)(const char *)) {}
+void __asan_describe_address(uptr addr) {}
+int __asan_report_present(void) { return (int)0; }
+uptr __asan_get_report_pc(void) { return (uptr)0; }
+uptr __asan_get_report_bp(void) { return (uptr)0; }
+uptr __asan_get_report_sp(void) { return (uptr)0; }
+uptr __asan_get_report_address(void) { return (uptr)0; }
+int __asan_get_report_access_type(void) { return (int)0; }
+uptr __asan_get_report_access_size(void) { return (uptr)0; }
+const char *__asan_get_report_description(void) { return (const char *)0; }
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
new file mode 100644
index 000000000000..2022c0b94283
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
@@ -0,0 +1,10 @@
+__asan_default_options
+__asan_default_suppressions
+__asan_get_free_stack
+__asan_get_shadow_mapping
+__asan_handle_vfork
+__asan_locate_address
+__asan_on_error
+__asan_print_accumulated_stats
+__asan_set_death_callback
+__asan_update_allocation_context
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
index 53d656d5086d..5637183cc3b4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
@@ -35,13 +35,13 @@ typedef uint64_t du_int;
// Integral bit manipulation
-di_int __ashldi3(di_int a, si_int b); // a << b
-ti_int __ashlti3(ti_int a, si_int b); // a << b
+di_int __ashldi3(di_int a, int b); // a << b
+ti_int __ashlti3(ti_int a, int b); // a << b
-di_int __ashrdi3(di_int a, si_int b); // a >> b arithmetic (sign fill)
-ti_int __ashrti3(ti_int a, si_int b); // a >> b arithmetic (sign fill)
-di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill)
-ti_int __lshrti3(ti_int a, si_int b); // a >> b logical (zero fill)
+di_int __ashrdi3(di_int a, int b); // a >> b arithmetic (sign fill)
+ti_int __ashrti3(ti_int a, int b); // a >> b arithmetic (sign fill)
+di_int __lshrdi3(di_int a, int b); // a >> b logical (zero fill)
+ti_int __lshrti3(ti_int a, int b); // a >> b logical (zero fill)
int __clzsi2(si_int a); // count leading zeros
int __clzdi2(di_int a); // count leading zeros
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
index 1fe18f4a4681..5dc0d5320b5a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
@@ -7,7 +7,7 @@
// Out-of-line LSE atomics helpers. Ported from libgcc library.
// N = {1, 2, 4, 8}
// M = {1, 2, 4, 8, 16}
-// ORDER = {'relax', 'acq', 'rel', 'acq_rel', 'sync'}
+// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
// Routines implemented:
//
// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
@@ -35,8 +35,8 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
// Generate mnemonics for
-// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4,5
-// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4,5
+// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
+// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
#if SIZE == 1
#define S b
@@ -64,44 +64,24 @@ HIDDEN(___aarch64_have_lse_atomics)
#define L
#define M 0x000000
#define N 0x000000
-#define BARRIER
#elif MODEL == 2
#define SUFF _acq
#define A a
#define L
#define M 0x400000
#define N 0x800000
-#define BARRIER
#elif MODEL == 3
#define SUFF _rel
#define A
#define L l
#define M 0x008000
#define N 0x400000
-#define BARRIER
#elif MODEL == 4
#define SUFF _acq_rel
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
-#define BARRIER
-#elif MODEL == 5
-#define SUFF _sync
-#ifdef L_swp
-// swp has _acq semantics.
-#define A a
-#define L
-#define M 0x400000
-#define N 0x800000
-#else
-// All other _sync functions have _seq semantics.
-#define A a
-#define L l
-#define M 0x408000
-#define N 0xc00000
-#endif
-#define BARRIER dmb ish
#else
#error
#endif // MODEL
@@ -116,12 +96,7 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
-#if MODEL == 5
-// Drop A for _sync functions.
-#define LDXR GLUE3(ld, xr, S)
-#else
#define LDXR GLUE4(ld, A, xr, S)
-#endif
#define STXR GLUE4(st, L, xr, S)
// Define temporary registers.
@@ -161,15 +136,9 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1:
- BARRIER
ret
#else
-#if MODEL == 5
-// Drop A for _sync functions.
-#define LDXP GLUE2(ld, xp)
-#else
#define LDXP GLUE3(ld, A, xp)
-#endif
#define STXP GLUE3(st, L, xp)
#ifdef HAS_ASM_LSE
#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
@@ -190,7 +159,6 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1:
- BARRIER
ret
#endif
END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
@@ -212,7 +180,6 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
- BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
#endif // L_swp
@@ -257,7 +224,6 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
- BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
#endif // L_ldadd L_ldclr L_ldeor L_ldset
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
index b9566cd874fe..291ab5f7f91d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
@@ -18,7 +18,7 @@
COMPILER_RT_ABI di_int __absvdi2(di_int a) {
const int N = (int)(sizeof(di_int) * CHAR_BIT);
- if (a == ((di_int)1 << (N - 1)))
+ if (a == ((di_int)((du_int)1 << (N - 1))))
compilerrt_abort();
const di_int t = a >> (N - 1);
return (a ^ t) - t;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
index 9d5de7e8a3f2..9977c33d8f7e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
@@ -18,7 +18,7 @@
COMPILER_RT_ABI si_int __absvsi2(si_int a) {
const int N = (int)(sizeof(si_int) * CHAR_BIT);
- if (a == ((si_int)1 << (N - 1)))
+ if (a == ((si_int)((su_int)1 << (N - 1))))
compilerrt_abort();
const si_int t = a >> (N - 1);
return (a ^ t) - t;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
index 86e4f4cfc3fc..2cb3a4d59191 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
@@ -13,7 +13,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_add_impl.inc"
COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
index bd039a0329ea..c7abdb003a68 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
@@ -8,10 +8,6 @@
#include "../assembly.h"
-#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
-#error big endian support not implemented
-#endif
-
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
index a26cb2a3ce16..81c47661c8b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
@@ -8,10 +8,6 @@
#include "../assembly.h"
-#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
-#error big endian support not implemented
-#endif
-
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
index 761bf49d3ed0..faf9af917ab6 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
@@ -37,7 +37,8 @@ DEFINE_COMPILERRT_FUNCTION(__divsi3)
sdiv r0, r0, r1
bx lr
LOCAL_LABEL(divzero):
- mov r0,#0
+ // Use movs for compatibility with v8-m.base.
+ movs r0,#0
bx lr
#else
ESTABLISH_FRAME
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
index 9b1b035b33d6..0567ab4ab765 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
@@ -32,7 +32,8 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
bx lr
LOCAL_LABEL(divby0):
- mov r0, #0
+ // Use movs for compatibility with v8-m.base.
+ movs r0, #0
# ifdef __ARM_EABI__
b __aeabi_idiv0
# else
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
index 2d7bd4a89380..99a133ffa22f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __ashlti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
index f573b6d6ccba..b306051df028 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __ashrti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
index 69a3d8620f92..169d49683f50 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
@@ -267,7 +267,7 @@
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
- DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
index f1592454138c..be5e9e5e44dd 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
@@ -39,7 +39,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_compare_impl.inc"
COMPILER_RT_ABI CMP_RESULT __letf2(fp_t a, fp_t b) { return __leXf2__(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
index f2dcd974008c..36eb696c39ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
@@ -113,6 +113,7 @@ enum ProcessorSubtypes {
ZHAOXIN_FAM7H_LUJIAZUI,
AMDFAM19H_ZNVER4,
INTEL_COREI7_GRANITERAPIDS,
+ INTEL_COREI7_GRANITERAPIDS_D,
CPU_SUBTYPE_MAX
};
@@ -474,13 +475,19 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
break;
// Granite Rapids:
- case 0xae:
case 0xad:
CPU = "graniterapids";
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_GRANITERAPIDS;
break;
+ // Granite Rapids D:
+ case 0xae:
+ CPU = "graniterapids-d";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_GRANITERAPIDS_D;
+ break;
+
case 0x1c: // Most 45 nm Intel Atom processors
case 0x26: // 45 nm Atom Lincroft
case 0x27: // 32 nm Atom Medfield
@@ -646,7 +653,7 @@ getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
unsigned *Features) {
- unsigned EAX, EBX;
+ unsigned EAX = 0, EBX = 0;
#define setFeature(F) \
Features[F / 32] |= 1U << (F % 32)
@@ -847,6 +854,17 @@ _Bool __aarch64_have_lse_atomics
#if defined(__has_include)
#if __has_include(<sys/auxv.h>)
#include <sys/auxv.h>
+
+#if __has_include(<sys/ifunc.h>)
+#include <sys/ifunc.h>
+#else
+typedef struct __ifunc_arg_t {
+ unsigned long _size;
+ unsigned long _hwcap;
+ unsigned long _hwcap2;
+} __ifunc_arg_t;
+#endif // __has_include(<sys/ifunc.h>)
+
#if __has_include(<asm/hwcap.h>)
#include <asm/hwcap.h>
@@ -858,6 +876,9 @@ _Bool __aarch64_have_lse_atomics
#include <zircon/syscalls.h>
#endif
+#ifndef _IFUNC_ARG_HWCAP
+#define _IFUNC_ARG_HWCAP (1ULL << 62)
+#endif
#ifndef AT_HWCAP
#define AT_HWCAP 16
#endif
@@ -1140,11 +1161,16 @@ struct {
// As features grows new fields could be added
} __aarch64_cpu_features __attribute__((visibility("hidden"), nocommon));
-void init_cpu_features_resolver(unsigned long hwcap, unsigned long hwcap2) {
+void init_cpu_features_resolver(unsigned long hwcap, const __ifunc_arg_t *arg) {
#define setCPUFeature(F) __aarch64_cpu_features.features |= 1ULL << F
#define getCPUFeature(id, ftr) __asm__("mrs %0, " #id : "=r"(ftr))
#define extractBits(val, start, number) \
(val & ((1ULL << number) - 1ULL) << start) >> start
+ if (__aarch64_cpu_features.features)
+ return;
+ unsigned long hwcap2 = 0;
+ if (hwcap & _IFUNC_ARG_HWCAP)
+ hwcap2 = arg->_hwcap2;
if (hwcap & HWCAP_CRC32)
setCPUFeature(FEAT_CRC);
if (hwcap & HWCAP_PMULL)
@@ -1320,6 +1346,7 @@ void init_cpu_features_resolver(unsigned long hwcap, unsigned long hwcap2) {
if (hwcap & HWCAP_SHA3)
setCPUFeature(FEAT_SHA3);
}
+ setCPUFeature(FEAT_MAX);
}
void CONSTRUCTOR_ATTRIBUTE init_cpu_features(void) {
@@ -1328,7 +1355,6 @@ void CONSTRUCTOR_ATTRIBUTE init_cpu_features(void) {
// CPU features already initialized.
if (__aarch64_cpu_features.features)
return;
- setCPUFeature(FEAT_MAX);
#if defined(__FreeBSD__)
int res = 0;
res = elf_aux_info(AT_HWCAP, &hwcap, sizeof hwcap);
@@ -1344,7 +1370,11 @@ void CONSTRUCTOR_ATTRIBUTE init_cpu_features(void) {
hwcap = getauxval(AT_HWCAP);
hwcap2 = getauxval(AT_HWCAP2);
#endif // defined(__FreeBSD__)
- init_cpu_features_resolver(hwcap, hwcap2);
+ __ifunc_arg_t arg;
+ arg._size = sizeof(__ifunc_arg_t);
+ arg._hwcap = hwcap;
+ arg._hwcap2 = hwcap2;
+ init_cpu_features_resolver(hwcap | _IFUNC_ARG_HWCAP, &arg);
#undef extractBits
#undef getCPUFeature
#undef setCPUFeature
diff --git a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c b/contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c
index 7b041ff00b6b..a0860ca12ea0 100644
--- a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c
@@ -50,25 +50,29 @@ __attribute__((section(".init_array"),
used)) static void (*__init)(void) = __do_init;
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "call __do_init\n\t"
+ ".popsection");
#elif defined(__riscv)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ "call __do_init\n\t"
".popsection");
#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "bl __do_init\n\t"
+ ".popsection");
+#elif defined(__mips__)
+__asm__(".pushsection .init,\"ax\",@progbits\n\t"
+ "jal __do_init\n\t"
+ ".popsection");
#elif defined(__powerpc__) || defined(__powerpc64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
- "nop\n\t"
- ".popsection");
+ "bl __do_init\n\t"
+ "nop\n\t"
+ ".popsection");
#elif defined(__sparc__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "call __do_init\n\t"
+ ".popsection");
#else
#error "crtbegin without .init_fini array unimplemented for this architecture"
#endif // CRT_HAS_INITFINI_ARRAY
@@ -103,25 +107,29 @@ __attribute__((section(".fini_array"),
used)) static void (*__fini)(void) = __do_fini;
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "call __do_fini\n\t"
+ ".popsection");
#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .fini,\"ax\",%progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "bl __do_fini\n\t"
+ ".popsection");
+#elif defined(__mips__)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "jal __do_fini\n\t"
+ ".popsection");
#elif defined(__powerpc__) || defined(__powerpc64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- "nop\n\t"
- ".popsection");
+ "bl __do_fini\n\t"
+ "nop\n\t"
+ ".popsection");
#elif defined(__riscv)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ "call __do_fini\n\t"
".popsection");
#elif defined(__sparc__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "call __do_fini\n\t"
+ ".popsection");
#else
#error "crtbegin without .init_fini array unimplemented for this architecture"
#endif // CRT_HAS_INIT_FINI_ARRAY
diff --git a/contrib/llvm-project/compiler-rt/lib/crt/crtend.c b/contrib/llvm-project/compiler-rt/lib/builtins/crtend.c
index ebcc60b89a10..ebcc60b89a10 100644
--- a/contrib/llvm-project/compiler-rt/lib/crt/crtend.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/crtend.c
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
index 5bcc9a8e4aa1..bd76763b07d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
@@ -14,7 +14,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define NUMBER_OF_HALF_ITERATIONS 4
#define NUMBER_OF_FULL_ITERATIONS 1
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
index ddf470ecd629..835076be1f20 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_DOUBLE
#define DST_QUAD
#include "fp_extend_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
index aefe9737d34f..a2cb0f771ee9 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
@@ -10,8 +10,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
- defined(COMPILER_RT_HAS_FLOAT16)
+#if defined(CRT_HAS_TF_MODE) && defined(COMPILER_RT_HAS_FLOAT16)
#define SRC_HALF
#define DST_QUAD
#include "fp_extend_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
index cf1fd2face20..0739859bcbc1 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_SINGLE
#define DST_QUAD
#include "fp_extend_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
index fe570e6b3755..d27a99b6f364 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef di_int fixint_t;
typedef du_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
index a32bd964caa3..01e352acc592 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef si_int fixint_t;
typedef su_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
index 19f84ce38907..491fca502113 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef ti_int fixint_t;
typedef tu_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
index a0805e63db82..febdb8f5682f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef du_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
index 3a1320ed3e0a..4efc387df453 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef su_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
index 23cd1ab615a7..fa9e7aa07108 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef tu_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
index d37c43b1f2f9..c994aad3f079 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
@@ -50,7 +50,7 @@ COMPILER_RT_ABI double __floatdidf(di_int a) {
return 0.0;
const unsigned N = sizeof(di_int) * CHAR_BIT;
const di_int s = a >> (N - 1);
- a = (a ^ s) - s;
+ a = (du_int)(a ^ s) - s;
int sd = N - __builtin_clzll(a); // number of significant digits
int e = sd - 1; // exponent
if (sd > DBL_MANT_DIG) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
index 5c6316431e39..0b62ed8689bc 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
@@ -24,7 +24,7 @@ COMPILER_RT_ABI float __floatdisf(di_int a) {
return 0.0F;
const unsigned N = sizeof(di_int) * CHAR_BIT;
const di_int s = a >> (N - 1);
- a = (a ^ s) - s;
+ a = (du_int)(a ^ s) - s;
int sd = N - __builtin_clzll(a); // number of significant digits
si_int e = sd - 1; // exponent
if (sd > FLT_MANT_DIG) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
index 9b07b65825b8..c6e326a1923a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatditf(di_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
index 80a4ef08fb0e..4d5b52f4ed91 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatsitf(si_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
index 0a1c04bec82e..7bfe87f53aa0 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
@@ -29,7 +29,7 @@ COMPILER_RT_ABI double __floattidf(ti_int a) {
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
+ si_int e = sd - 1; // exponent
if (sd > DBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
index a8fcdbe14c07..717cb361f075 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
@@ -28,7 +28,7 @@ COMPILER_RT_ABI float __floattisf(ti_int a) {
const ti_int s = a >> (N - 1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
+ si_int e = sd - 1; // exponent
if (sd > FLT_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
index 196cbdae14e0..fff0755c3bb4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
@@ -25,7 +25,7 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
if (a == 0)
return 0.0;
@@ -34,7 +34,7 @@ COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
a = (a ^ s) - s;
int sd = N - __clzti2(a); // number of significant digits
int e = sd - 1; // exponent
- if (sd > LDBL_MANT_DIG) {
+ if (sd > TF_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
@@ -43,27 +43,27 @@ COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
// Q = bit LDBL_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
- case LDBL_MANT_DIG + 1:
+ case TF_MANT_DIG + 1:
a <<= 1;
break;
- case LDBL_MANT_DIG + 2:
+ case TF_MANT_DIG + 2:
break;
default:
- a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ a = ((tu_int)a >> (sd - (TF_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + TF_MANT_DIG + 2) - sd))) != 0);
};
// finish:
a |= (a & 4) != 0; // Or P into R
++a; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ if (a & ((tu_int)1 << TF_MANT_DIG)) {
a >>= 1;
++e;
}
// a is now rounded to LDBL_MANT_DIG bits
} else {
- a <<= (LDBL_MANT_DIG - sd);
+ a <<= (TF_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
index 8d310851e179..abe0ca9ed8c5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
index 7ba1fb6000dc..3f0a5249fddd 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatunsitf(su_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
index e69e65c1ace4..4dfca8e49309 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
@@ -27,7 +27,7 @@ COMPILER_RT_ABI double __floatuntidf(tu_int a) {
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
+ si_int e = sd - 1; // exponent
if (sd > DBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
index 9dec0ab5c58f..a53659cd1fca 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
@@ -26,7 +26,7 @@ COMPILER_RT_ABI float __floatuntisf(tu_int a) {
return 0.0F;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
+ si_int e = sd - 1; // exponent
if (sd > FLT_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
index d308d3118d03..33a81b34eeb1 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
@@ -25,44 +25,44 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); // number of significant digits
int e = sd - 1; // exponent
- if (sd > LDBL_MANT_DIG) {
+ if (sd > TF_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
- // P = bit LDBL_MANT_DIG-1 bits to the right of 1
- // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // P = bit TF_MANT_DIG-1 bits to the right of 1
+ // Q = bit TF_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
- case LDBL_MANT_DIG + 1:
+ case TF_MANT_DIG + 1:
a <<= 1;
break;
- case LDBL_MANT_DIG + 2:
+ case TF_MANT_DIG + 2:
break;
default:
- a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ a = (a >> (sd - (TF_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + TF_MANT_DIG + 2) - sd))) != 0);
};
// finish:
a |= (a & 4) != 0; // Or P into R
++a; // round - this step may add a significant bit
a >>= 2; // dump Q and R
- // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ // a is now rounded to TF_MANT_DIG or TF_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << TF_MANT_DIG)) {
a >>= 1;
++e;
}
- // a is now rounded to LDBL_MANT_DIG bits
+ // a is now rounded to TF_MANT_DIG bits
} else {
- a <<= (LDBL_MANT_DIG - sd);
- // a is now rounded to LDBL_MANT_DIG bits
+ a <<= (TF_MANT_DIG - sd);
+ // a is now rounded to TF_MANT_DIG bits
}
long_double_bits fb;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
index 3fb13a033a14..58eb45fcc729 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
@@ -106,7 +106,13 @@ COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
#elif defined QUAD_PRECISION
#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
+// TODO: Availability of the *tf functions should not depend on long double
+// being IEEE 128, but instead on being able to use a 128-bit floating-point
+// type, which includes __float128.
+// Right now this (incorrectly) stops the builtins from being used for x86.
#define CRT_LDBL_128BIT
+#define CRT_HAS_TF_MODE
+#define TF_C(c) c##L
typedef uint64_t half_rep_t;
typedef __uint128_t rep_t;
typedef __int128_t srep_t;
@@ -116,6 +122,7 @@ typedef long double fp_t;
// Note: Since there is no explicit way to tell compiler the constant is a
// 128-bit integer, we let the constant be casted to 128-bit integer
#define significandBits 112
+#define TF_MANT_DIG (significandBits + 1)
static __inline int rep_clz(rep_t a) {
const union {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c b/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
index a327a83eb9f6..3437205a1cc4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
@@ -234,7 +234,7 @@ COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
}
// Walk call-site table looking for range that includes current PC.
uint8_t callSiteEncoding = *lsda++;
- uint32_t callSiteTableLength = readULEB128(&lsda);
+ size_t callSiteTableLength = readULEB128(&lsda);
const uint8_t *callSiteTableStart = lsda;
const uint8_t *callSiteTableEnd = callSiteTableStart + callSiteTableLength;
const uint8_t *p = callSiteTableStart;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
index 567d8b9e6e60..27e7c8c43d60 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
@@ -18,10 +18,10 @@
static __inline fixint_t __muloXi4(fixint_t a, fixint_t b, int *overflow) {
const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
- const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MIN = (fixint_t)((fixuint_t)1 << (N - 1));
const fixint_t MAX = ~MIN;
*overflow = 0;
- fixint_t result = a * b;
+ fixint_t result = (fixuint_t)a * b;
if (a == MIN) {
if (b != 0 && b != 1)
*overflow = 1;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
index 1e920716ec49..06559cf302ea 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
@@ -18,7 +18,7 @@
static __inline fixint_t __mulvXi3(fixint_t a, fixint_t b) {
const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
- const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MIN = (fixint_t)((fixuint_t)1 << (N - 1));
const fixint_t MAX = ~MIN;
if (a == MIN) {
if (b == 0 || b == 1)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
index d00a22095993..5dc8a0a2347f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __lshrti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __lshrti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
utwords input;
utwords result;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
index 7209676a327e..6ecf92664fb5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t di_int
+#define fixuint_t du_int
#include "int_mulo_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
index 4e03c24455d6..3fd18a122a46 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t si_int
+#define fixuint_t su_int
#include "int_mulo_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
index 9a7aa85b022b..9aab6fc3efb3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
@@ -19,6 +19,7 @@
// Effects: sets *overflow to 1 if a * b overflows
#define fixint_t ti_int
+#define fixuint_t tu_int
#include "int_mulo_impl.inc"
COMPILER_RT_ABI ti_int __muloti4(ti_int a, ti_int b, int *overflow) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
index 0626fb8c7fc9..8fd73688712c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
@@ -14,7 +14,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_mul_impl.inc"
COMPILER_RT_ABI fp_t __multf3(fp_t a, fp_t b) { return __mulXf3__(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
index 1d672c6dc155..d787d297d564 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t di_int
+#define fixuint_t du_int
#include "int_mulv_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
index 00b2e50eeca9..2571881195fc 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t si_int
+#define fixuint_t su_int
#include "int_mulv_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
index ba355149f9a7..fad9b2ae2765 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
@@ -19,6 +19,7 @@
// Effects: aborts if a * b overflows
#define fixint_t ti_int
+#define fixuint_t tu_int
#include "int_mulv_impl.inc"
COMPILER_RT_ABI ti_int __mulvti3(ti_int a, ti_int b) { return __mulvXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
index 5c52b3ec2aa6..8c1cf2fa58d4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
@@ -17,7 +17,8 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI di_int __negvdi2(di_int a) {
- const di_int MIN = (di_int)1 << ((int)(sizeof(di_int) * CHAR_BIT) - 1);
+ const di_int MIN =
+ (di_int)((du_int)1 << ((int)(sizeof(di_int) * CHAR_BIT) - 1));
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
index cccdee6dc5e5..70f214f9761d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
@@ -17,7 +17,8 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI si_int __negvsi2(si_int a) {
- const si_int MIN = (si_int)1 << ((int)(sizeof(si_int) * CHAR_BIT) - 1);
+ const si_int MIN =
+ (si_int)((su_int)1 << ((int)(sizeof(si_int) * CHAR_BIT) - 1));
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
index ebfb2dfc72dd..182eabe7a6ae 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
@@ -86,6 +86,10 @@ typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex,
CFStringEncoding);
typedef void (*CFReleaseFuncTy)(CFTypeRef);
+extern __attribute__((weak_import))
+bool _availability_version_check(uint32_t count,
+ dyld_build_version_t versions[]);
+
static void _initializeAvailabilityCheck(bool LoadPlist) {
if (AvailabilityVersionCheck && !LoadPlist) {
// New API is supported and we're not being asked to load the plist,
@@ -94,8 +98,8 @@ static void _initializeAvailabilityCheck(bool LoadPlist) {
}
// Use the new API if it's is available.
- AvailabilityVersionCheck = (AvailabilityVersionCheckFuncTy)dlsym(
- RTLD_DEFAULT, "_availability_version_check");
+ if (_availability_version_check)
+ AvailabilityVersionCheck = &_availability_version_check;
if (AvailabilityVersionCheck && !LoadPlist) {
// New API is supported and we're not being asked to load the plist,
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
index 8e639a03a3c4..74fe707a4e8c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
@@ -13,7 +13,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
// Returns: a ^ b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c
index c542c34c9cc8..1a5a3de95de9 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c
@@ -15,7 +15,7 @@
#define RISCV_INEXACT 0x1
CRT_FE_ROUND_MODE __fe_getround(void) {
-#if defined(__riscv_f)
+#if defined(__riscv_f) || defined(__riscv_zfinx)
int frm;
__asm__ __volatile__("frrm %0" : "=r" (frm));
switch (frm) {
@@ -35,7 +35,7 @@ CRT_FE_ROUND_MODE __fe_getround(void) {
}
int __fe_raise_inexact(void) {
-#if defined(__riscv_f)
+#if defined(__riscv_f) || defined(__riscv_zfinx)
__asm__ __volatile__("csrsi fflags, %0" :: "i" (RISCV_INEXACT));
#endif
return 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
index 3364c28f8179..e1b1022034bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
@@ -13,7 +13,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b);
// Subtraction; flip the sign bit of b and add.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
index 6857ea54d8a5..f0d2e4141f3b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_QUAD
#define DST_DOUBLE
#include "fp_trunc_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
index e3a2309d954b..f7776327251c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
@@ -10,8 +10,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
- defined(COMPILER_RT_HAS_FLOAT16)
+#if defined(CRT_HAS_TF_MODE) && defined(COMPILER_RT_HAS_FLOAT16)
#define SRC_QUAD
#define DST_HALF
#include "fp_trunc_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
index 0261b1e90f5d..242735f738c1 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_QUAD
#define DST_SINGLE
#include "fp_trunc_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
index faf5a6619c26..a579c9e53444 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
@@ -197,8 +197,7 @@ static dfsan_origin GetOriginIfTainted(uptr addr, uptr size) {
#define PRINT_CALLER_STACK_TRACE \
{ \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
stack.Print(); \
}
@@ -381,8 +380,7 @@ static void SetOrigin(const void *dst, uptr size, u32 origin) {
}
#define RET_CHAIN_ORIGIN(id) \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
return ChainOrigin(id, &stack);
@@ -567,8 +565,7 @@ void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin(
dfsan_label s, void *p, uptr size, dfsan_origin o) {
if (UNLIKELY(s)) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
GET_STORE_STACK_TRACE_PC_BP(pc, bp);
SetOrigin(p, size, ChainOrigin(o, &stack));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 5fb8fef213b9..df8be2cf5ae0 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -30,6 +30,10 @@ struct Metadata {
struct DFsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ OnMap(p, size);
+ }
void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
};
@@ -174,6 +178,20 @@ void *DFsanCalloc(uptr nmemb, uptr size) {
return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
}
+static const void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+ return (const void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p)
return 0;
@@ -184,6 +202,10 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
+static uptr AllocationSizeFast(const void *p) {
+ return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
void *dfsan_malloc(uptr size) {
return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
}
@@ -294,4 +316,15 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 6f41e225d9e8..f41dd50617fb 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -204,6 +204,57 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strpbrk(
return const_cast<char *>(ret);
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strsep(char **s, const char *delim,
+ dfsan_label s_label,
+ dfsan_label delim_label,
+ dfsan_label *ret_label) {
+ dfsan_label base_label = dfsan_read_label(s, sizeof(*s));
+ char *base = *s;
+ char *res = strsep(s, delim);
+ if (res != *s) {
+ char *token_start = res;
+ int token_length = strlen(res);
+ // the delimiter byte has been set to NULL
+ dfsan_set_label(0, token_start + token_length, 1);
+ }
+
+ if (flags().strict_data_dependencies) {
+ *ret_label = res ? base_label : 0;
+ } else {
+ size_t s_bytes_read = (res ? strlen(res) : strlen(base)) + 1;
+ *ret_label = dfsan_union(
+ dfsan_union(base_label, dfsan_read_label(base, sizeof(s_bytes_read))),
+ dfsan_union(dfsan_read_label(delim, strlen(delim) + 1),
+ dfsan_union(s_label, delim_label)));
+ }
+
+ return res;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strsep(
+ char **s, const char *delim, dfsan_label s_label, dfsan_label delim_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin delim_origin,
+ dfsan_origin *ret_origin) {
+ dfsan_origin base_origin = dfsan_read_origin_of_first_taint(s, sizeof(*s));
+ char *res = __dfsw_strsep(s, delim, s_label, delim_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (res)
+ *ret_origin = base_origin;
+ } else {
+ if (*ret_label) {
+ if (base_origin) {
+ *ret_origin = base_origin;
+ } else {
+ dfsan_origin o =
+ dfsan_read_origin_of_first_taint(delim, strlen(delim) + 1);
+ *ret_origin = o ? o : (s_label ? s_origin : delim_origin);
+ }
+ }
+ }
+
+ return res;
+}
+
static int dfsan_memcmp_bcmp(const void *s1, const void *s2, size_t n,
size_t *bytes_read) {
const char *cs1 = (const char *) s1, *cs2 = (const char *) s2;
@@ -484,6 +535,36 @@ SANITIZER_INTERFACE_ATTRIBUTE size_t __dfso_strlen(const char *s,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE size_t __dfsw_strnlen(const char *s,
+ size_t maxlen,
+ dfsan_label s_label,
+ dfsan_label maxlen_label,
+ dfsan_label *ret_label) {
+ size_t ret = strnlen(s, maxlen);
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ size_t full_len = strlen(s);
+ size_t covered_len = maxlen > (full_len + 1) ? (full_len + 1) : maxlen;
+ *ret_label = dfsan_union(maxlen_label, dfsan_read_label(s, covered_len));
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE size_t __dfso_strnlen(
+ const char *s, size_t maxlen, dfsan_label s_label, dfsan_label maxlen_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin maxlen_origin,
+ dfsan_origin *ret_origin) {
+ size_t ret = __dfsw_strnlen(s, maxlen, s_label, maxlen_label, ret_label);
+ if (!flags().strict_data_dependencies) {
+ size_t full_len = strlen(s);
+ size_t covered_len = maxlen > (full_len + 1) ? (full_len + 1) : maxlen;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, covered_len);
+ *ret_origin = o ? o : maxlen_origin;
+ }
+ return ret;
+}
+
static void *dfsan_memmove(void *dest, const void *src, size_t n) {
dfsan_label *sdest = shadow_for(dest);
const dfsan_label *ssrc = shadow_for(src);
@@ -601,6 +682,37 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strcat(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strncat(
+ char *dest, const char *src, size_t num, dfsan_label dest_label,
+ dfsan_label src_label, dfsan_label num_label, dfsan_label *ret_label) {
+ size_t src_len = strlen(src);
+ src_len = src_len < num ? src_len : num;
+ size_t dest_len = strlen(dest);
+
+ char *ret = strncat(dest, src, num);
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
+ *ret_label = dest_label;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strncat(
+ char *dest, const char *src, size_t num, dfsan_label dest_label,
+ dfsan_label src_label, dfsan_label num_label, dfsan_label *ret_label,
+ dfsan_origin dest_origin, dfsan_origin src_origin, dfsan_origin num_origin,
+ dfsan_origin *ret_origin) {
+ size_t src_len = strlen(src);
+ src_len = src_len < num ? src_len : num;
+ size_t dest_len = strlen(dest);
+
+ char *ret = strncat(dest, src, num);
+
+ dfsan_mem_origin_transfer(dest + dest_len, src, src_len);
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
+ *ret_label = dest_label;
+ *ret_origin = dest_origin;
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE char *
__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
size_t len = strlen(s);
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
index ff8a37fbf426..84d1b5188401 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
@@ -88,6 +88,7 @@ fun:isalnum=functional
fun:ispunct=functional
fun:isspace=functional
fun:tolower=functional
+fun:_tolower=functional
fun:toupper=functional
# Functions that return a value that is data-dependent on the input.
@@ -268,6 +269,7 @@ fun:strtoll=custom
fun:strtoul=custom
fun:strtoull=custom
fun:strcat=custom
+fun:strncat=custom
# Functions that produce an output that is computed from the input, but is not
# necessarily data dependent.
@@ -278,11 +280,13 @@ fun:strcasecmp=custom
fun:strchr=custom
fun:strcmp=custom
fun:strlen=custom
+fun:strnlen=custom
fun:strncasecmp=custom
fun:strncmp=custom
fun:strpbrk=custom
fun:strrchr=custom
fun:strstr=custom
+fun:strsep=custom
# Functions which take action based on global state, such as running a callback
# set by a separate function.
@@ -447,8 +451,12 @@ fun:__sanitizer_get_estimated_allocated_size=uninstrumented
fun:__sanitizer_get_estimated_allocated_size=discard
fun:__sanitizer_get_ownership=uninstrumented
fun:__sanitizer_get_ownership=discard
+fun:__sanitizer_get_allocated_begin=uninstrumented
+fun:__sanitizer_get_allocated_begin=discard
fun:__sanitizer_get_allocated_size=uninstrumented
fun:__sanitizer_get_allocated_size=discard
+fun:__sanitizer_get_allocated_size_fast=uninstrumented
+fun:__sanitizer_get_allocated_size_fast=discard
fun:__sanitizer_print_stack_trace=uninstrumented
fun:__sanitizer_print_stack_trace=discard
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
index f653fe358768..eb68be9a65b6 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
@@ -139,7 +139,7 @@ public:
// be the equivalent command line.
std::string toString() const {
std::stringstream SS;
- for (auto arg : getArguments())
+ for (const auto &arg : getArguments())
SS << arg << " ";
if (hasOutputFile())
SS << ">" << getOutputFile() << " ";
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
index e01891e18fe3..912082be8fba 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
@@ -77,7 +77,7 @@ struct InputInfo {
SumIncidence = 0.0;
// Apply add-one smoothing to locally discovered features.
- for (auto F : FeatureFreqs) {
+ for (const auto &F : FeatureFreqs) {
double LocalIncidence = F.second + 1;
Energy -= LocalIncidence * log(LocalIncidence);
SumIncidence += LocalIncidence;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
index 2f9a4d2d7adc..93bf817a857b 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
@@ -88,7 +88,7 @@ bool BlockCoverage::AppendCoverage(std::istream &IN) {
// * a function with a less frequently executed code gets bigger weight.
std::vector<double> BlockCoverage::FunctionWeights(size_t NumFunctions) const {
std::vector<double> Res(NumFunctions);
- for (auto It : Functions) {
+ for (const auto &It : Functions) {
auto FunctionID = It.first;
auto Counters = It.second;
assert(FunctionID < NumFunctions);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index 6b007f2ad45c..8c8c95392c7e 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -463,7 +463,7 @@ int MinimizeCrashInput(const std::vector<std::string> &Args,
CurrentFilePath = Flags.exact_artifact_path;
WriteToFile(U, CurrentFilePath);
}
- Printf("CRASH_MIN: failed to minimize beyond %s (%d bytes), exiting\n",
+ Printf("CRASH_MIN: failed to minimize beyond %s (%zu bytes), exiting\n",
CurrentFilePath.c_str(), U.size());
break;
}
@@ -501,7 +501,6 @@ int MinimizeCrashInputInternalStep(Fuzzer *F, InputCorpus *Corpus) {
F->MinimizeCrashLoop(U);
Printf("INFO: Done MinimizeCrashInputInternalStep, no crashes found\n");
exit(0);
- return 0;
}
void Merge(Fuzzer *F, FuzzingOptions &Options,
@@ -535,7 +534,7 @@ void Merge(Fuzzer *F, FuzzingOptions &Options,
int AnalyzeDictionary(Fuzzer *F, const std::vector<Unit> &Dict,
UnitVector &Corpus) {
- Printf("Started dictionary minimization (up to %d tests)\n",
+ Printf("Started dictionary minimization (up to %zu tests)\n",
Dict.size() * Corpus.size() * 2);
// Scores and usage count for each dictionary unit.
@@ -779,7 +778,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (!Options.FocusFunction.empty())
Options.Entropic = false; // FocusFunction overrides entropic scheduling.
if (Options.Entropic)
- Printf("INFO: Running with entropic power schedule (0x%X, %d).\n",
+ Printf("INFO: Running with entropic power schedule (0x%zX, %zu).\n",
Options.EntropicFeatureFrequencyThreshold,
Options.EntropicNumberOfRarestFeatures);
struct EntropicOptions Entropic;
@@ -797,7 +796,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (Flags.verbosity)
Printf("INFO: Seed: %u\n", Seed);
- if (Flags.collect_data_flow && !Flags.fork &&
+ if (Flags.collect_data_flow && Flags.data_flow_trace && !Flags.fork &&
!(Flags.merge || Flags.set_cover_merge)) {
if (RunIndividualFiles)
return CollectDataFlow(Flags.collect_data_flow, Flags.data_flow_trace,
@@ -860,7 +859,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
RunOneTest(F, Path.c_str(), Options.MaxLen);
auto StopTime = system_clock::now();
auto MS = duration_cast<milliseconds>(StopTime - StartTime).count();
- Printf("Executed %s in %zd ms\n", Path.c_str(), (long)MS);
+ Printf("Executed %s in %ld ms\n", Path.c_str(), (long)MS);
}
Printf("***\n"
"*** NOTE: fuzzing was not performed, you have only\n"
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
index 0a58c5377b34..54cc4ee54be0 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
@@ -65,7 +65,7 @@ std::string FileToString(const std::string &Path) {
}
void CopyFileToErr(const std::string &Path) {
- Printf("%s", FileToString(Path).c_str());
+ Puts(FileToString(Path).c_str());
}
void WriteToFile(const Unit &U, const std::string &Path) {
@@ -151,6 +151,11 @@ void CloseStdout() {
DiscardOutput(1);
}
+void Puts(const char *Str) {
+ fputs(Str, OutputFile);
+ fflush(OutputFile);
+}
+
void Printf(const char *Fmt, ...) {
va_list ap;
va_start(ap, Fmt);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
index 401afa0b4477..874caad1baed 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
@@ -58,6 +58,7 @@ void CloseStdout();
FILE *GetOutputFile();
void SetOutputFile(FILE *NewOutputFile);
+void Puts(const char *Str);
void Printf(const char *Fmt, ...);
void VPrintf(bool Verbose, const char *Fmt, ...);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
index a732ca87b0f3..88504705137a 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
@@ -29,12 +29,11 @@ namespace fuzzer {
using namespace std::chrono;
-class Fuzzer {
+class Fuzzer final {
public:
-
Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options);
- ~Fuzzer();
+ const FuzzingOptions &Options);
+ ~Fuzzer() = delete;
void Loop(std::vector<SizedFile> &CorporaFiles);
void ReadAndExecuteSeedCorpora(std::vector<SizedFile> &CorporaFiles);
void MinimizeCrashLoop(const Unit &U);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
index 00f5ed7743b6..8b430c5428d8 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
@@ -136,7 +136,7 @@ void Fuzzer::HandleMalloc(size_t Size) {
}
Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options)
+ const FuzzingOptions &Options)
: CB(CB), Corpus(Corpus), MD(MD), Options(Options) {
if (EF->__sanitizer_set_death_callback)
EF->__sanitizer_set_death_callback(StaticDeathCallback);
@@ -160,8 +160,6 @@ Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
memset(BaseSha1, 0, sizeof(BaseSha1));
}
-Fuzzer::~Fuzzer() {}
-
void Fuzzer::AllocateCurrentUnitData() {
if (CurrentUnitData || MaxInputLen == 0)
return;
@@ -301,7 +299,7 @@ void Fuzzer::AlarmCallback() {
Printf(" and the timeout value is %d (use -timeout=N to change)\n",
Options.UnitTimeoutSec);
DumpCurrentUnit("timeout-");
- Printf("==%lu== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(),
+ Printf("==%lu== ERROR: libFuzzer: timeout after %zu seconds\n", GetPid(),
Seconds);
PrintStackTrace();
Printf("SUMMARY: libFuzzer: timeout\n");
@@ -314,9 +312,8 @@ void Fuzzer::RssLimitCallback() {
if (EF->__sanitizer_acquire_crash_state &&
!EF->__sanitizer_acquire_crash_state())
return;
- Printf(
- "==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %zdMb)\n",
- GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
+ Printf("==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %dMb)\n",
+ GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
PrintMemoryProfile();
DumpCurrentUnit("oom-");
@@ -371,7 +368,7 @@ void Fuzzer::PrintFinalStats() {
Printf("stat::number_of_executed_units: %zd\n", TotalNumberOfRuns);
Printf("stat::average_exec_per_sec: %zd\n", ExecPerSec);
Printf("stat::new_units_added: %zd\n", NumberOfNewUnitsAdded);
- Printf("stat::slowest_unit_time_sec: %zd\n", TimeOfLongestUnitInSeconds);
+ Printf("stat::slowest_unit_time_sec: %ld\n", TimeOfLongestUnitInSeconds);
Printf("stat::peak_rss_mb: %zd\n", GetPeakRSSMb());
}
@@ -455,7 +452,7 @@ void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
static_cast<long>(static_cast<double>(TimeOfLongestUnitInSeconds) * 1.1);
if (TimeOfUnit > Threshhold && TimeOfUnit >= Options.ReportSlowUnits) {
TimeOfLongestUnitInSeconds = TimeOfUnit;
- Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
+ Printf("Slowest unit: %ld s:\n", TimeOfLongestUnitInSeconds);
WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
index 24bd11958e80..8c8806e8aafd 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
@@ -77,6 +77,7 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
size_t ExpectedStartMarker = 0;
const size_t kInvalidStartMarker = -1;
size_t LastSeenStartMarker = kInvalidStartMarker;
+ bool HaveFtMarker = true;
std::vector<uint32_t> TmpFeatures;
std::set<uint32_t> PCs;
while (std::getline(IS, Line, '\n')) {
@@ -93,12 +94,13 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
LastSeenStartMarker = ExpectedStartMarker;
assert(ExpectedStartMarker < Files.size());
ExpectedStartMarker++;
+ HaveFtMarker = false;
} else if (Marker == "FT") {
// FT FILE_ID COV1 COV2 COV3 ...
size_t CurrentFileIdx = N;
if (CurrentFileIdx != LastSeenStartMarker)
return false;
- LastSeenStartMarker = kInvalidStartMarker;
+ HaveFtMarker = true;
if (ParseCoverage) {
TmpFeatures.clear(); // use a vector from outer scope to avoid resizes.
while (ISS1 >> N)
@@ -108,6 +110,8 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
}
} else if (Marker == "COV") {
size_t CurrentFileIdx = N;
+ if (CurrentFileIdx != LastSeenStartMarker)
+ return false;
if (ParseCoverage)
while (ISS1 >> N)
if (PCs.insert(N).second)
@@ -116,7 +120,7 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
return false;
}
}
- if (LastSeenStartMarker != kInvalidStartMarker)
+ if (!HaveFtMarker && LastSeenStartMarker != kInvalidStartMarker)
LastFailure = Files[LastSeenStartMarker].Name;
FirstNotProcessedFile = ExpectedStartMarker;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
index d663900fdc3a..1abce16d70d9 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
@@ -521,7 +521,7 @@ void MutationDispatcher::PrintMutationSequence(bool Verbose) {
std::string MutationDispatcher::MutationSequence() {
std::string MS;
- for (auto M : CurrentMutatorSequence) {
+ for (const auto &M : CurrentMutatorSequence) {
MS += M.Name;
MS += "-";
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
index f12f7aa61bc4..7f4e8ef91c44 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
@@ -149,8 +149,8 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) {
ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) {
#if defined(__mips__)
return PC + 8;
-#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \
- defined(__aarch64__)
+#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__loongarch__)
return PC + 4;
#else
return PC + 1;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
index 71d49097e559..5296e7784b3f 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -94,7 +94,8 @@ inline size_t Log(size_t X) {
return static_cast<size_t>((sizeof(unsigned long long) * 8) - Clzll(X) - 1);
}
-inline size_t PageSize() { return 4096; }
+size_t PageSize();
+
inline uint8_t *RoundUpByPage(uint8_t *P) {
uintptr_t X = reinterpret_cast<uintptr_t>(P);
size_t Mask = PageSize() - 1;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index d80b80cccb80..6a56505fbf1a 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -87,6 +87,7 @@ void AlarmHandler(int Seconds) {
// Alternatively, Fuchsia may in future actually implement basic signal
// handling for the machine trap signals.
#if defined(__x86_64__)
+
#define FOREACH_REGISTER(OP_REG, OP_NUM) \
OP_REG(rax) \
OP_REG(rbx) \
@@ -107,6 +108,7 @@ void AlarmHandler(int Seconds) {
OP_REG(rip)
#elif defined(__aarch64__)
+
#define FOREACH_REGISTER(OP_REG, OP_NUM) \
OP_NUM(0) \
OP_NUM(1) \
@@ -140,6 +142,41 @@ void AlarmHandler(int Seconds) {
OP_NUM(29) \
OP_REG(sp)
+#elif defined(__riscv)
+
+#define FOREACH_REGISTER(OP_REG, OP_NUM) \
+ OP_REG(ra) \
+ OP_REG(sp) \
+ OP_REG(gp) \
+ OP_REG(tp) \
+ OP_REG(t0) \
+ OP_REG(t1) \
+ OP_REG(t2) \
+ OP_REG(s0) \
+ OP_REG(s1) \
+ OP_REG(a0) \
+ OP_REG(a1) \
+ OP_REG(a2) \
+ OP_REG(a3) \
+ OP_REG(a4) \
+ OP_REG(a5) \
+ OP_REG(a6) \
+ OP_REG(a7) \
+ OP_REG(s2) \
+ OP_REG(s3) \
+ OP_REG(s4) \
+ OP_REG(s5) \
+ OP_REG(s6) \
+ OP_REG(s7) \
+ OP_REG(s8) \
+ OP_REG(s9) \
+ OP_REG(s10) \
+ OP_REG(s11) \
+ OP_REG(t3) \
+ OP_REG(t4) \
+ OP_REG(t5) \
+ OP_REG(t6) \
+
#else
#error "Unsupported architecture for fuzzing on Fuchsia"
#endif
@@ -200,6 +237,13 @@ void MakeTrampoline() {
".cfi_offset 30, %c[lr]\n"
"bl %c[StaticCrashHandler]\n"
"brk 1\n"
+#elif defined(__riscv)
+ ".cfi_return_column 64\n"
+ ".cfi_def_cfa sp, 0\n"
+ ".cfi_offset 64, %[pc]\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "call %c[StaticCrashHandler]\n"
+ "unimp\n"
#else
#error "Unsupported architecture for fuzzing on Fuchsia"
#endif
@@ -209,8 +253,11 @@ void MakeTrampoline() {
".cfi_startproc\n"
: // No outputs
: FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM)
+#if defined(__aarch64__) || defined(__riscv)
+ ASM_OPERAND_REG(pc)
+#endif
#if defined(__aarch64__)
- ASM_OPERAND_REG(pc) ASM_OPERAND_REG(lr)
+ ASM_OPERAND_REG(lr)
#endif
[StaticCrashHandler] "i"(StaticCrashHandler));
}
@@ -294,6 +341,7 @@ void CrashHandler() {
// onto the stack and jump into a trampoline with CFI instructions on how
// to restore it.
#if defined(__x86_64__)
+
uintptr_t StackPtr =
(GeneralRegisters.rsp - (128 + sizeof(GeneralRegisters))) &
-(uintptr_t)16;
@@ -302,7 +350,8 @@ void CrashHandler() {
GeneralRegisters.rsp = StackPtr;
GeneralRegisters.rip = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm);
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) || defined(__riscv)
+
uintptr_t StackPtr =
(GeneralRegisters.sp - sizeof(GeneralRegisters)) & -(uintptr_t)16;
__unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
@@ -551,6 +600,11 @@ void DiscardOutput(int Fd) {
dup2(nullfd, Fd);
}
+size_t PageSize() {
+ static size_t PageSizeCached = _zx_system_get_page_size();
+ return PageSizeCached;
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index 0446d732a9ec..392c1e5be4ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -183,6 +183,11 @@ std::string SearchRegexCmd(const std::string &Regex) {
return "grep '" + Regex + "'";
}
+size_t PageSize() {
+ static size_t PageSizeCached = sysconf(_SC_PAGESIZE);
+ return PageSizeCached;
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_POSIX
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index 3598758dbb4f..6d9bc766c695 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -224,6 +224,15 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+size_t PageSize() {
+ static size_t PageSizeCached = []() -> size_t {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+ }();
+ return PageSizeCached;
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
index e012963bffd8..198db5cb074c 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
@@ -99,6 +99,12 @@ void printHeader(Error E, uintptr_t AccessPtr,
ThreadBuffer);
}
+static bool HasReportedBadPoolAccess = false;
+static const char *kUnknownCrashText =
+ "GWP-ASan cannot provide any more information about this error. This may "
+ "occur due to a wild memory access into the GWP-ASan pool, or an "
+ "overflow/underflow that is > 512B in length.\n";
+
void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
SegvBacktrace_t SegvBacktrace, Printf_t Printf,
@@ -117,6 +123,15 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *AllocMeta =
__gwp_asan_get_metadata(State, Metadata, ErrorPtr);
+ if (AllocMeta == nullptr) {
+ if (HasReportedBadPoolAccess) return;
+ HasReportedBadPoolAccess = true;
+ Printf("*** GWP-ASan detected a memory error ***\n");
+ ScopedEndOfReportDecorator Decorator(Printf);
+ Printf(kUnknownCrashText);
+ return;
+ }
+
// It's unusual for a signal handler to be invoked multiple times for the same
// allocation, but it's possible in various scenarios, like:
// 1. A double-free or invalid-free was invoked in one thread at the same
@@ -132,9 +147,7 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr);
if (E == Error::UNKNOWN) {
- Printf("GWP-ASan cannot provide any more information about this error. "
- "This may occur due to a wild memory access into the GWP-ASan pool, "
- "or an overflow/underflow that is > 512B in length.\n");
+ Printf(kUnknownCrashText);
return;
}
@@ -149,9 +162,6 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
PrintBacktrace(Trace, TraceLength, Printf);
- if (AllocMeta == nullptr)
- return;
-
// Maybe print the deallocation trace.
if (__gwp_asan_is_deallocated(AllocMeta)) {
uint64_t ThreadID = __gwp_asan_get_deallocation_thread_id(AllocMeta);
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
index cdf231c2547e..000c0f76c1da 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
@@ -86,8 +86,9 @@ static void InitializeFlags() {
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
- // FIXME: enable once all false positives have been fixed.
- cf.detect_leaks = false;
+ // For now only tested on Linux. Other plantforms can be turned on as they
+ // become ready.
+ cf.detect_leaks = cf.detect_leaks && SANITIZER_LINUX && !SANITIZER_ANDROID;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
@@ -129,6 +130,9 @@ static void InitializeFlags() {
// Override from user-specified string.
if (__hwasan_default_options)
parser.ParseString(__hwasan_default_options());
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(__lsan_default_options());
+#endif
#if HWASAN_CONTAINS_UBSAN
const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
@@ -286,14 +290,20 @@ static bool InitializeSingleGlobal(const hwasan_global &global) {
}
static void InitLoadedGlobals() {
- dl_iterate_phdr(
- [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
- for (const hwasan_global &global : HwasanGlobalsFor(
- info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
- InitializeSingleGlobal(global);
- return 0;
- },
- nullptr);
+ // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+ // the startup path which calls into __hwasan_library_loaded on all
+ // initially loaded modules, so explicitly registering the globals here
+ // isn't needed.
+ if constexpr (!SANITIZER_FUCHSIA) {
+ dl_iterate_phdr(
+ [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
+ for (const hwasan_global &global : HwasanGlobalsFor(
+ info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
+ InitializeSingleGlobal(global);
+ return 0;
+ },
+ nullptr);
+ }
}
// Prepare to run instrumented code on the main thread.
@@ -360,13 +370,7 @@ __attribute__((constructor(0))) void __hwasan_init() {
DisableCoreDumperIfNecessary();
InitInstrumentation();
- if constexpr (!SANITIZER_FUCHSIA) {
- // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
- // the startup path which calls into __hwasan_library_loaded on all
- // initially loaded modules, so explicitly registering the globals here
- // isn't needed.
- InitLoadedGlobals();
- }
+ InitLoadedGlobals();
// Needs to be called here because flags()->random_tags might not have been
// initialized when InitInstrumentation() was called.
@@ -397,11 +401,9 @@ __attribute__((constructor(0))) void __hwasan_init() {
__ubsan::InitAsPlugin();
#endif
- if (CAN_SANITIZE_LEAKS) {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
__lsan::ScopedInterceptorDisabler disabler;
Symbolizer::LateInitialize();
- } else {
- Symbolizer::LateInitialize();
}
VPrintf(1, "HWAddressSanitizer init done\n");
@@ -443,16 +445,32 @@ void __hwasan_print_shadow(const void *p, uptr sz) {
sptr __hwasan_test_shadow(const void *p, uptr sz) {
if (sz == 0)
return -1;
- tag_t ptr_tag = GetTagFromPointer((uptr)p);
- uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr ptr = reinterpret_cast<uptr>(p);
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ uptr ptr_raw = UntagAddr(ptr);
uptr shadow_first = MemToShadow(ptr_raw);
- uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
- for (uptr s = shadow_first; s <= shadow_last; ++s)
- if (*(tag_t *)s != ptr_tag) {
- sptr offset = ShadowToMem(s) - ptr_raw;
+ uptr shadow_last = MemToShadow(ptr_raw + sz);
+ for (uptr s = shadow_first; s < shadow_last; ++s) {
+ if (UNLIKELY(*(tag_t *)s != ptr_tag)) {
+ uptr short_size =
+ ShortTagSize(*(tag_t *)s, AddTagToPointer(ShadowToMem(s), ptr_tag));
+ sptr offset = ShadowToMem(s) - ptr_raw + short_size;
return offset < 0 ? 0 : offset;
}
- return -1;
+ }
+
+ uptr end = ptr + sz;
+ uptr tail_sz = end & (kShadowAlignment - 1);
+ if (!tail_sz)
+ return -1;
+
+ uptr short_size =
+ ShortTagSize(*(tag_t *)shadow_last, end & ~(kShadowAlignment - 1));
+ if (LIKELY(tail_sz <= short_size))
+ return -1;
+
+ sptr offset = sz - tail_sz + short_size;
+ return offset < 0 ? 0 : offset;
}
u16 __sanitizer_unaligned_load16(const uu16 *p) {
@@ -512,6 +530,56 @@ void __hwasan_load16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
}
+void __hwasan_loadN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
void __hwasan_storeN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
}
@@ -550,8 +618,58 @@ void __hwasan_store16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
}
+void __hwasan_storeN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
- TagMemoryAligned(p, sz, tag);
+ TagMemoryAligned(UntagAddr(p), sz, tag);
}
uptr __hwasan_tag_pointer(uptr p, u8 tag) {
@@ -561,7 +679,7 @@ uptr __hwasan_tag_pointer(uptr p, u8 tag) {
void __hwasan_handle_longjmp(const void *sp_dst) {
uptr dst = (uptr)sp_dst;
// HWASan does not support tagged SP.
- CHECK(GetTagFromPointer(dst) == 0);
+ CHECK_EQ(GetTagFromPointer(dst), 0);
uptr sp = (uptr)__builtin_frame_address(0);
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
index c3d71a28142f..37ef48222851 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
@@ -16,6 +16,7 @@
#include "hwasan_flags.h"
#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
@@ -78,12 +79,23 @@ const unsigned kRecordFPShift = 48;
const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
+static inline bool InTaggableRegion(uptr addr) {
+#if defined(HWASAN_ALIASING_MODE)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (__hwasan::GetShadowOffset() >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
static inline tag_t GetTagFromPointer(uptr p) {
- return (p >> kAddressTagShift) & kTagMask;
+ return InTaggableRegion(p) ? ((p >> kAddressTagShift) & kTagMask) : 0;
}
static inline uptr UntagAddr(uptr tagged_addr) {
- return tagged_addr & ~kAddressTagMask;
+ return InTaggableRegion(tagged_addr) ? (tagged_addr & ~kAddressTagMask)
+ : tagged_addr;
}
static inline void *UntagPtr(const void *tagged_ptr) {
@@ -92,7 +104,9 @@ static inline void *UntagPtr(const void *tagged_ptr) {
}
static inline uptr AddTagToPointer(uptr p, tag_t tag) {
- return (p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift);
+ return InTaggableRegion(p)
+ ? ((p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift))
+ : p;
}
namespace __hwasan {
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
index 9cd82dbabd19..75d91ed09ce1 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
@@ -16,14 +16,25 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mallinfo.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#if !SANITIZER_FUCHSIA
-
using namespace __hwasan;
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
static bool UseImpl() { return !hwasan_inited; }
+ static void OnAllocate(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+# endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+# endif
+ }
};
extern "C" {
@@ -143,12 +154,19 @@ void *__sanitizer_malloc(uptr size) {
} // extern "C"
-#if HWASAN_WITH_INTERCEPTORS
+#if HWASAN_WITH_INTERCEPTORS || SANITIZER_FUCHSIA
+#if SANITIZER_FUCHSIA
+// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS(__sanitizer_##FN)
+#else
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
- ALIAS("__sanitizer_" #FN); \
+ ALIAS(__sanitizer_##FN); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
- ARGS) ALIAS("__sanitizer_" #FN)
+ ARGS) ALIAS(__sanitizer_##FN)
+#endif
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
SIZE_T size);
@@ -171,5 +189,3 @@ INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
# endif
#endif // #if HWASAN_WITH_INTERCEPTORS
-
-#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 325675ce122c..48f1873ed682 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -45,6 +45,7 @@ enum {
// Initialized in HwasanAllocatorInit, an never changed.
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
+static uptr max_malloc_size;
bool HwasanChunkView::IsAllocated() const {
return metadata_ && metadata_->IsAllocated();
@@ -63,6 +64,10 @@ u32 HwasanChunkView::GetAllocStackId() const {
return metadata_->GetAllocStackId();
}
+u32 HwasanChunkView::GetAllocThreadId() const {
+ return metadata_->GetAllocThreadId();
+}
+
uptr HwasanChunkView::ActualSize() const {
return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
}
@@ -94,8 +99,7 @@ inline void Metadata::SetUnallocated() {
}
inline bool Metadata::IsAllocated() const {
- return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED &&
- GetRequestedSize();
+ return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
}
inline u64 Metadata::GetRequestedSize() const {
@@ -106,6 +110,12 @@ inline u32 Metadata::GetAllocStackId() const {
return atomic_load(&alloc_context_id, memory_order_relaxed);
}
+inline u32 Metadata::GetAllocThreadId() const {
+ u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
+ u32 tid = context >> 32;
+ return tid;
+}
+
void GetAllocatorStats(AllocatorStatCounters s) {
allocator.GetStats(s);
}
@@ -139,18 +149,28 @@ void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
- GetAliasRegionStart());
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms,
+ GetAliasRegionStart());
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ if (common_flags()->max_allocation_size_mb) {
+ max_malloc_size = common_flags()->max_allocation_size_mb << 20;
+ max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
+ } else {
+ max_malloc_size = kMaxAllowedMallocSize;
+ }
}
void HwasanAllocatorLock() { allocator.ForceLock(); }
void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
+void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
+
+void AllocatorThreadFinish(AllocatorCache *cache) {
allocator.SwallowCache(cache);
+ allocator.DestroyCache(cache);
}
static uptr TaggedSize(uptr size) {
@@ -162,13 +182,16 @@ static uptr TaggedSize(uptr size) {
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
bool zeroise) {
- if (orig_size > kMaxAllowedMallocSize) {
+ // Keep this consistent with LSAN and ASAN behavior.
+ if (UNLIKELY(orig_size == 0))
+ orig_size = 1;
+ if (UNLIKELY(orig_size > max_malloc_size)) {
if (AllocatorMayReturnNull()) {
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
orig_size);
return nullptr;
}
- ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
+ ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
}
if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
@@ -194,7 +217,10 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
ReportOutOfMemory(size, stack);
}
if (zeroise) {
- internal_memset(allocated, 0, size);
+ // The secondary allocator mmaps memory, which should be zero-inited so we
+ // don't need to explicitly clear it.
+ if (allocator.FromPrimary(allocated))
+ internal_memset(allocated, 0, size);
} else if (flags()->max_malloc_fill_size > 0) {
uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
@@ -239,7 +265,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
: __lsan::kDirectlyLeaked);
#endif
meta->SetAllocated(StackDepotPut(*stack), orig_size);
- RunMallocHooks(user_ptr, size);
+ RunMallocHooks(user_ptr, orig_size);
return user_ptr;
}
@@ -266,11 +292,7 @@ static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK(tagged_ptr);
- RunFreeHooks(tagged_ptr);
-
- bool in_taggable_region =
- InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
- void *untagged_ptr = in_taggable_region ? UntagPtr(tagged_ptr) : tagged_ptr;
+ void *untagged_ptr = UntagPtr(tagged_ptr);
if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
return;
@@ -284,9 +306,16 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
return;
}
+
+ RunFreeHooks(tagged_ptr);
+
uptr orig_size = meta->GetRequestedSize();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->GetAllocStackId();
+ u32 alloc_thread_id = meta->GetAllocThreadId();
+
+ bool in_taggable_region =
+ InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
// Check tail magic.
uptr tagged_size = TaggedSize(orig_size);
@@ -338,8 +367,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
- ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
- free_context_id, static_cast<u32>(orig_size)});
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
+ alloc_context_id, free_context_id,
+ static_cast<u32>(orig_size)});
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
@@ -349,10 +379,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
uptr new_size, uptr alignment) {
- void *untagged_ptr_old =
- InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr_old))
- ? UntagPtr(tagged_ptr_old)
- : tagged_ptr_old;
+ void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
return nullptr;
void *tagged_ptr_new =
@@ -360,9 +387,9 @@ static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
if (tagged_ptr_old && tagged_ptr_new) {
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
- internal_memcpy(
- UntagPtr(tagged_ptr_new), untagged_ptr_old,
- Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
+ void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
+ internal_memcpy(untagged_ptr_new, untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
@@ -388,15 +415,42 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
-static uptr AllocationSize(const void *tagged_ptr) {
- const void *untagged_ptr = UntagPtr(tagged_ptr);
+static const void *AllocationBegin(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr)
+ return nullptr;
+
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return nullptr;
+
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (b->GetRequestedSize() == 0)
+ return nullptr;
+
+ tag_t tag = GetTagFromPointer((uptr)p);
+ return (const void *)AddTagToPointer((uptr)beg, tag);
+}
+
+static uptr AllocationSize(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
if (!untagged_ptr) return 0;
const void *beg = allocator.GetBlockBegin(untagged_ptr);
- Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
- if (beg != untagged_ptr) return 0;
+ if (!beg)
+ return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
return b->GetRequestedSize();
}
+static uptr AllocationSizeFast(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ return meta->GetRequestedSize();
+}
+
void *hwasan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
}
@@ -502,7 +556,7 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
}
uptr PointsIntoChunk(void *p) {
- p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
+ p = UntagPtr(p);
uptr addr = reinterpret_cast<uptr>(p);
uptr chunk =
reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
@@ -520,8 +574,7 @@ uptr PointsIntoChunk(void *p) {
}
uptr GetUserBegin(uptr chunk) {
- if (__hwasan::InTaggableRegion(chunk))
- CHECK_EQ(UntagAddr(chunk), chunk);
+ CHECK_EQ(UntagAddr(chunk), chunk);
void *block = __hwasan::allocator.GetBlockBeginFastLocked(
reinterpret_cast<void *>(chunk));
if (!block)
@@ -534,9 +587,15 @@ uptr GetUserBegin(uptr chunk) {
return reinterpret_cast<uptr>(block);
}
+uptr GetUserAddr(uptr chunk) {
+ if (!InTaggableRegion(chunk))
+ return chunk;
+ tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
+ return AddTagToPointer(chunk, mem_tag);
+}
+
LsanMetadata::LsanMetadata(uptr chunk) {
- if (__hwasan::InTaggableRegion(chunk))
- CHECK_EQ(UntagAddr(chunk), chunk);
+ CHECK_EQ(UntagAddr(chunk), chunk);
metadata_ =
chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
: nullptr;
@@ -573,11 +632,10 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__hwasan::allocator.ForEachChunk(callback, arg);
}
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
- p = __hwasan::InTaggableRegion(reinterpret_cast<uptr>(p)) ? UntagPtr(p) : p;
+IgnoreObjectResult IgnoreObject(const void *p) {
+ p = UntagPtr(p);
uptr addr = reinterpret_cast<uptr>(p);
- uptr chunk =
- reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
+ uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
if (!chunk)
return kIgnoreObjectInvalid;
__hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
@@ -625,4 +683,17 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
index 67982cad2543..2ada2a0b1851 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -46,6 +46,7 @@ struct Metadata {
inline bool IsAllocated() const;
inline u64 GetRequestedSize() const;
inline u32 GetAllocStackId() const;
+ inline u32 GetAllocThreadId() const;
inline void SetLsanTag(__lsan::ChunkTag tag);
inline __lsan::ChunkTag GetLsanTag() const;
};
@@ -53,6 +54,10 @@ static_assert(sizeof(Metadata) == 16);
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ UpdateMemoryUsage();
+ }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// It can return as user-requested mmap() or another thread stack.
@@ -68,20 +73,27 @@ struct AP64 {
#if defined(HWASAN_ALIASING_MODE)
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_LINUX && !SANITIZER_ANDROID
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
#else
- static const uptr kSpaceSize = 0x2000000000ULL;
+ static const uptr kSpaceSize = 0x2000000000ULL; // 128G.
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
#endif
+
static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
typedef HwasanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
+
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
+void AllocatorThreadStart(AllocatorCache *cache);
+void AllocatorThreadFinish(AllocatorCache *cache);
class HwasanChunkView {
public:
@@ -94,6 +106,7 @@ class HwasanChunkView {
uptr UsedSize() const; // Size requested by the user
uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
+ u32 GetAllocThreadId() const;
bool FromSmallHeap() const;
bool AddrIsInside(uptr addr) const;
@@ -107,29 +120,18 @@ HwasanChunkView FindHeapChunkByAddress(uptr address);
// Information about one (de)allocation that happened in the past.
// These are recorded in a thread-local ring buffer.
-// TODO: this is currently 24 bytes (20 bytes + alignment).
-// Compress it to 16 bytes or extend it to be more useful.
struct HeapAllocationRecord {
uptr tagged_addr;
- u32 alloc_context_id;
- u32 free_context_id;
- u32 requested_size;
+ u32 alloc_thread_id;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
};
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
-inline bool InTaggableRegion(uptr addr) {
-#if defined(HWASAN_ALIASING_MODE)
- // Aliases are mapped next to shadow so that the upper bits match the shadow
- // base.
- return (addr >> kTaggableRegionCheckShift) ==
- (GetShadowOffset() >> kTaggableRegionCheckShift);
-#endif
- return true;
-}
-
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
index b0b37d7a2e2b..0911af30dcb8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
@@ -15,17 +15,49 @@
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
+#include "hwasan_registers.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __hwasan {
-template <unsigned X>
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+// Used when the access size is known.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT,
+ unsigned LogSize) {
+ return 0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize;
+}
+
+// Used when the access size varies at runtime.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT) {
+ return SigTrapEncoding(EA, AT, 0xf);
+}
+
+template <ErrorAction EA, AccessType AT, size_t LogSize>
__attribute__((always_inline)) static void SigTrap(uptr p) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ size_t size = 2 << LogSize;
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
(void)p;
// 0x900 is added to do not interfere with the kernel use of lower values of
// brk immediate.
register uptr x0 asm("x0") = p;
- asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
+ asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + SigTrapEncoding(EA, AT, LogSize)));
#elif defined(__x86_64__)
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
// total. The pointer is passed via rdi.
@@ -34,7 +66,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
// different nop command, the three bytes one).
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT, LogSize)),
"D"(p));
#elif SANITIZER_RISCV64
// Put pointer into x10
@@ -44,7 +76,7 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
asm volatile(
"ebreak\n"
"addiw x0, x0, %1\n" ::"r"(x10),
- "I"(0x40 + X));
+ "I"(0x40 + SigTrapEncoding(EA, AT, LogSize)));
#else
// FIXME: not always sigill.
__builtin_trap();
@@ -53,17 +85,31 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
}
// Version with access size which is not power of 2
-template <unsigned X>
+template <ErrorAction EA, AccessType AT>
__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
register uptr x0 asm("x0") = p;
register uptr x1 asm("x1") = size;
- asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
+ asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + SigTrapEncoding(EA, AT)));
#elif defined(__x86_64__)
// Size is stored in rsi.
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT)),
"D"(p), "S"(size));
#elif SANITIZER_RISCV64
// Put access size into x11
@@ -72,15 +118,29 @@ __attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
asm volatile(
"ebreak\n"
"addiw x0, x0, %2\n" ::"r"(x10),
- "r"(x11), "I"(0x40 + X));
+ "r"(x11), "I"(0x40 + SigTrapEncoding(EA, AT)));
#else
__builtin_trap();
#endif
// __builtin_unreachable();
}
-__attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
- tag_t mem_tag, uptr ptr, uptr sz) {
+__attribute__((always_inline, nodebug)) static inline uptr ShortTagSize(
+ tag_t mem_tag, uptr ptr) {
+ DCHECK(IsAligned(ptr, kShadowAlignment));
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ if (ptr_tag == mem_tag)
+ return kShadowAlignment;
+ if (!mem_tag || mem_tag >= kShadowAlignment)
+ return 0;
+ if (*(u8 *)(ptr | (kShadowAlignment - 1)) != ptr_tag)
+ return 0;
+ return mem_tag;
+}
+
+__attribute__((always_inline, nodebug)) static inline bool
+PossiblyShortTagMatches(tag_t mem_tag, uptr ptr, uptr sz) {
+ DCHECK(IsAligned(ptr, kShadowAlignment));
tag_t ptr_tag = GetTagFromPointer(ptr);
if (ptr_tag == mem_tag)
return true;
@@ -88,15 +148,9 @@ __attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
return false;
if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
return false;
-#if !defined(__aarch64__) && !(SANITIZER_RISCV64)
- ptr = UntagAddr(ptr);
-#endif
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
}
-enum class ErrorAction { Abort, Recover };
-enum class AccessType { Load, Store };
-
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
if (!InTaggableRegion(p))
@@ -104,8 +158,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + LogSize>(p);
+ SigTrap<EA, AT, LogSize>(p);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
@@ -122,18 +175,16 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
for (tag_t *t = shadow_first; t < shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
uptr end = p + sz;
- uptr tail_sz = end & 0xf;
+ uptr tail_sz = end & (kShadowAlignment - 1);
if (UNLIKELY(tail_sz != 0 &&
!PossiblyShortTagMatches(
*shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
index c9968a5e3603..bf700bf56838 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
@@ -62,7 +62,8 @@ __hwasan_personality_wrapper(int version, _Unwind_Action actions,
#error Unsupported architecture
#endif
uptr sp = get_cfa(context);
- TagMemory(sp, fp - sp, 0);
+ TagMemory(UntagAddr(sp), UntagAddr(fp) - UntagAddr(sp),
+ GetTagFromPointer(sp));
}
return rc;
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
index 4a226ee2ab8a..978fa46b705c 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
@@ -23,6 +23,9 @@ HWASAN_FLAG(bool, tag_in_free, true, "")
HWASAN_FLAG(bool, print_stats, false, "")
HWASAN_FLAG(bool, halt_on_error, true, "")
HWASAN_FLAG(bool, atexit, false, "")
+HWASAN_FLAG(
+ bool, print_live_threads_info, true,
+ "If set, prints the remaining threads in report as an extra information.")
// Test only flag to disable malloc/realloc/free memory tagging on startup.
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
index d71bcd792e1f..7e0f3df20dd0 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
@@ -13,6 +13,8 @@
#include "hwasan_globals.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
namespace __hwasan {
enum { NT_LLVM_HWASAN_GLOBALS = 3 };
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
index fd7adf7a0588..94cd53e1888c 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
@@ -16,6 +16,7 @@
#include <link.h>
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index 05bf3f29eca3..1a49320b0719 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -14,42 +14,326 @@
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
-#include "interception/interception.h"
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_checks.h"
+#include "hwasan_platform_interceptors.h"
#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#if !SANITIZER_FUCHSIA
using namespace __hwasan;
-#if HWASAN_WITH_INTERCEPTORS
+# if !SANITIZER_APPLE
+# define HWASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
+ #name, ver); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport( \
+ 1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, ver, #name); \
+ } while (0)
+
+# else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+# define HWASAN_INTERCEPT_FUNC(name)
+# endif // SANITIZER_APPLE
+
+# if HWASAN_WITH_INTERCEPTORS
+
+# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) __hwasan_loadN((uptr)p, (uptr)s)
+# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ __hwasan_storeN((uptr)p, (uptr)s)
+# define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# include "sanitizer_common/sanitizer_common_syscalls.inc"
+# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ do { \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ do { \
+ (void)(ctx); \
+ (void)(ptr); \
+ (void)(size); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ do { \
+ (void)(ctx); \
+ (void)(func); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ (void)(ctx); \
+ (void)(path); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ (void)(newfd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ (void)(ctx); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ (void)(ctx); \
+ (void)(thread); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
+ do { \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ (void)(ctx); \
+ (void)(to); \
+ (void)(from); \
+ (void)(size); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ (void)(ctx); \
+ (void)(to); \
+ (void)(from); \
+ (void)(size); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ (void)(ctx); \
+ (void)(block); \
+ (void)(c); \
+ (void)(size); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_STRERROR() \
+ do { \
+ } while (false)
+
+# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
+
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
+
+// The main purpose of the mmap interceptor is to prevent the user from
+// allocating on top of shadow pages.
+//
+// For compatibility, it does not tag pointers, nor does it allow
+// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
+// will not return a tagged pointer, the tagged pointer must have come
+// from elsewhere, such as the secondary allocator, which makes it a
+// very odd usecase.)
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ if (addr) {
+ if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
+
+ addr = UntagPtr(addr);
+ }
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ void *end_addr = (char *)addr + (rounded_length - 1);
+ if (addr && length &&
+ (!MemIsApp(reinterpret_cast<uptr>(addr)) ||
+ !MemIsApp(reinterpret_cast<uptr>(end_addr)))) {
+ // User requested an address that is incompatible with HWASan's
+ // memory layout. Use a different address if allowed, else fail.
+ if (flags & map_fixed) {
+ errno = errno_EINVAL;
+ return (void *)-1;
+ } else {
+ addr = nullptr;
+ }
+ }
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ // Application has attempted to map more memory than is supported by
+ // HWASan. Act as if we ran out of memory.
+ internal_munmap(res, length);
+ errno = errno_ENOMEM;
+ return (void *)-1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+
+ return res;
+}
+
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ errno = errno_EINVAL;
+ return -1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
+
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
+ } while (false)
+
+# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
struct ThreadStartArg {
- thread_callback_t callback;
- void *param;
+ __sanitizer_sigset_t starting_sigset_;
};
static void *HwasanThreadStartFunc(void *arg) {
__hwasan_thread_enter();
- ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
- UnmapOrDie(arg, GetPageSizeCached());
- return A.callback(A.param);
-}
-
-INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
- void * param) {
- ScopedTaggingDisabler disabler;
- ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
- GetPageSizeCached(), "pthread_create"));
- *A = {callback, param};
- int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
- return res;
+ SetSigProcMask(&reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
+ nullptr);
+ InternalFree(arg);
+ auto self = GetThreadSelf();
+ auto args = hwasanThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ hwasanThreadArgRetval().Finish(self, retval);
+ return retval;
+}
+
+extern "C" {
+int pthread_attr_getdetachstate(void *attr, int *v);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*callback)(void *), void *param) {
+ EnsureMainThreadIDIsCorrect();
+ ScopedTaggingDisabler tagging_disabler;
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+ ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(sizeof(ThreadStartArg));
+ ScopedBlockSignals block(&A->starting_sigset_);
+ // ASAN uses the same approach to disable leaks from pthread_create.
+# if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler lsan_disabler;
+# endif
+
+ int result;
+ hwasanThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
+ return result ? 0 : *(uptr *)(thread);
+ });
+ if (result != 0)
+ InternalFree(A);
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ hwasanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_exit, void *retval) {
+ hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ return REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
}
-INTERCEPTOR(int, pthread_join, void *t, void **arg) {
- return REAL(pthread_join)(t, arg);
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
}
+# endif
DEFINE_REAL_PTHREAD_FUNCTIONS
@@ -59,13 +343,13 @@ DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
__hw_sigset_t *__restrict __oset);
-#define SIG_BLOCK 0
-#define SIG_SETMASK 2
+# define SIG_BLOCK 0
+# define SIG_SETMASK 2
extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
env[0].__magic = kHwJmpBufMagic;
env[0].__mask_was_saved =
- (savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
- &env[0].__saved_mask) == 0);
+ (savemask &&
+ sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0, &env[0].__saved_mask) == 0);
return 0;
}
@@ -94,26 +378,27 @@ InternalLongjmp(__hw_register_buf env, int retval) {
# if defined(__aarch64__)
register long int retval_tmp asm("x1") = retval;
register void *env_address asm("x0") = &env[0];
- asm volatile("ldp x19, x20, [%0, #0<<3];"
- "ldp x21, x22, [%0, #2<<3];"
- "ldp x23, x24, [%0, #4<<3];"
- "ldp x25, x26, [%0, #6<<3];"
- "ldp x27, x28, [%0, #8<<3];"
- "ldp x29, x30, [%0, #10<<3];"
- "ldp d8, d9, [%0, #14<<3];"
- "ldp d10, d11, [%0, #16<<3];"
- "ldp d12, d13, [%0, #18<<3];"
- "ldp d14, d15, [%0, #20<<3];"
- "ldr x5, [%0, #13<<3];"
- "mov sp, x5;"
- // Return the value requested to return through arguments.
- // This should be in x1 given what we requested above.
- "cmp %1, #0;"
- "mov x0, #1;"
- "csel x0, %1, x0, ne;"
- "br x30;"
- : "+r"(env_address)
- : "r"(retval_tmp));
+ asm volatile(
+ "ldp x19, x20, [%0, #0<<3];"
+ "ldp x21, x22, [%0, #2<<3];"
+ "ldp x23, x24, [%0, #4<<3];"
+ "ldp x25, x26, [%0, #6<<3];"
+ "ldp x27, x28, [%0, #8<<3];"
+ "ldp x29, x30, [%0, #10<<3];"
+ "ldp d8, d9, [%0, #14<<3];"
+ "ldp d10, d11, [%0, #16<<3];"
+ "ldp d12, d13, [%0, #18<<3];"
+ "ldp d14, d15, [%0, #20<<3];"
+ "ldr x5, [%0, #13<<3];"
+ "mov sp, x5;"
+ // Return the value requested to return through arguments.
+ // This should be in x1 given what we requested above.
+ "cmp %1, #0;"
+ "mov x0, #1;"
+ "csel x0, %1, x0, ne;"
+ "br x30;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
# elif defined(__x86_64__)
register long int retval_tmp asm("%rsi") = retval;
register void *env_address asm("%rdi") = &env[0];
@@ -189,8 +474,7 @@ INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
if (env[0].__mask_was_saved)
// Restore the saved signal mask.
- (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
- (__hw_sigset_t *)0);
+ (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask, (__hw_sigset_t *)0);
InternalLongjmp(env[0].__jmpbuf, val);
}
@@ -212,8 +496,8 @@ INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
}
InternalLongjmp(env[0].__jmpbuf, val);
}
-#undef SIG_BLOCK
-#undef SIG_SETMASK
+# undef SIG_BLOCK
+# undef SIG_SETMASK
# endif // HWASAN_WITH_INTERCEPTORS
@@ -228,7 +512,7 @@ int OnExit() {
return 0;
}
-} // namespace __hwasan
+} // namespace __hwasan
namespace __hwasan {
@@ -236,19 +520,30 @@ void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
-#if HWASAN_WITH_INTERCEPTORS
-#if defined(__linux__)
+ InitializeCommonInterceptors();
+
+ (void)(read_iovec);
+ (void)(write_iovec);
+
+# if HWASAN_WITH_INTERCEPTORS
+# if defined(__linux__)
INTERCEPT_FUNCTION(__libc_longjmp);
INTERCEPT_FUNCTION(longjmp);
INTERCEPT_FUNCTION(siglongjmp);
INTERCEPT_FUNCTION(vfork);
-#endif // __linux__
+# endif // __linux__
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+# if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+# endif
# endif
inited = 1;
}
-} // namespace __hwasan
+} // namespace __hwasan
#endif // #if !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
index d1ecbb592a21..e7804cc49033 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
@@ -77,6 +77,32 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_storeN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1(uptr);
@@ -103,6 +129,32 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -184,6 +236,13 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy_match_all(void *dst, const void *src, uptr size, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset_match_all(void *s, int c, uptr n, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
index d3e4b5390e82..6f5e9432974e 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -283,7 +283,7 @@ void InitThreads() {
bool MemIsApp(uptr p) {
// Memory outside the alias range has non-zero tags.
# if !defined(HWASAN_ALIASING_MODE)
- CHECK(GetTagFromPointer(p) == 0);
+ CHECK_EQ(GetTagFromPointer(p), 0);
# endif
return (p >= kHighMemStart && p <= kHighMemEnd) ||
@@ -302,8 +302,15 @@ extern "C" void __hwasan_thread_exit() {
Thread *t = GetCurrentThread();
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
- if (t)
+ if (t) {
+ // Block async signals on the thread as the handler can be instrumented.
+ // After this point instrumented code can't access essential data from TLS
+ // and will crash.
+ // Bionic already calls __hwasan_thread_exit with blocked signals.
+ if (SANITIZER_GLIBC)
+ BlockSignals();
hwasanThreadList().ReleaseThread(t);
+ }
}
# if HWASAN_WITH_INTERCEPTORS
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
index ea7f5ce40b07..16d6f9085924 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
@@ -42,3 +42,33 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(from), size);
return memmove(to, from, size);
}
+
+void *__hwasan_memset_match_all(void *block, int c, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(block)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(block, c, size);
+}
+
+void *__hwasan_memcpy_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(to, from, size);
+}
+
+void *__hwasan_memmove_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(to, from, size);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
index 495046a754f1..f0fd3726ef1b 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
@@ -92,6 +92,14 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_BODY
@@ -134,5 +142,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_ALIGN_BODY
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
new file mode 100644
index 000000000000..33ae70a4ded9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
@@ -0,0 +1,1001 @@
+#ifndef HWASAN_PLATFORM_INTERCEPTORS_H
+#define HWASAN_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// This file cancels out most of the sanitizer_common interception, thus
+// allowing HWASan to selectively reuse some of the interceptors.
+//
+// To re-enable sanitizer_common's interception of a function, comment out
+// the corresponding '#undef SANITIZER_INTERCEPT_fn' and
+// '#define SANITIZER_INTERCEPT_fn 0':
+// - We prefer to comment out rather than delete the lines, to show that
+// it is deliberate, rather than an accidental omission.
+// - We do not use '#define SANITIZE_INTERCEPT_fn 1', because
+// interception is usually conditional (e.g., based on SI_POSIX); we let
+// the condition in sanitizers_platform_interceptors.h take effect.
+
+// Originally generated with:
+// cat ../sanitizer_common/sanitizer_platform_interceptors.h | grep '^#define SANITIZER_INTERCEPT' | cut -d ' ' -f 2 | while read x; do echo "#undef $x"; echo "#define $x 0"; echo; done
+#undef SANITIZER_INTERCEPT_STRLEN
+#define SANITIZER_INTERCEPT_STRLEN 0
+
+#undef SANITIZER_INTERCEPT_STRNLEN
+#define SANITIZER_INTERCEPT_STRNLEN 0
+
+#undef SANITIZER_INTERCEPT_STRCMP
+#define SANITIZER_INTERCEPT_STRCMP 0
+
+#undef SANITIZER_INTERCEPT_STRSTR
+#define SANITIZER_INTERCEPT_STRSTR 0
+
+#undef SANITIZER_INTERCEPT_STRCASESTR
+#define SANITIZER_INTERCEPT_STRCASESTR 0
+
+#undef SANITIZER_INTERCEPT_STRTOK
+#define SANITIZER_INTERCEPT_STRTOK 0
+
+#undef SANITIZER_INTERCEPT_STRCHR
+#define SANITIZER_INTERCEPT_STRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRCHRNUL
+#define SANITIZER_INTERCEPT_STRCHRNUL 0
+
+#undef SANITIZER_INTERCEPT_STRRCHR
+#define SANITIZER_INTERCEPT_STRRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRSPN
+#define SANITIZER_INTERCEPT_STRSPN 0
+
+#undef SANITIZER_INTERCEPT_STRPBRK
+#define SANITIZER_INTERCEPT_STRPBRK 0
+
+#undef SANITIZER_INTERCEPT_TEXTDOMAIN
+#define SANITIZER_INTERCEPT_TEXTDOMAIN 0
+
+#undef SANITIZER_INTERCEPT_STRCASECMP
+#define SANITIZER_INTERCEPT_STRCASECMP 0
+
+#undef SANITIZER_INTERCEPT_MEMSET
+#define SANITIZER_INTERCEPT_MEMSET 0
+
+#undef SANITIZER_INTERCEPT_MEMMOVE
+#define SANITIZER_INTERCEPT_MEMMOVE 0
+
+#undef SANITIZER_INTERCEPT_MEMCPY
+#define SANITIZER_INTERCEPT_MEMCPY 0
+
+#undef SANITIZER_INTERCEPT_MEMCMP
+#define SANITIZER_INTERCEPT_MEMCMP 0
+
+#undef SANITIZER_INTERCEPT_BCMP
+#define SANITIZER_INTERCEPT_BCMP 0
+
+#undef SANITIZER_INTERCEPT_STRNDUP
+#define SANITIZER_INTERCEPT_STRNDUP 0
+
+#undef SANITIZER_INTERCEPT___STRNDUP
+#define SANITIZER_INTERCEPT___STRNDUP 0
+
+#undef SANITIZER_INTERCEPT_MEMMEM
+#define SANITIZER_INTERCEPT_MEMMEM 0
+
+#undef SANITIZER_INTERCEPT_MEMCHR
+#define SANITIZER_INTERCEPT_MEMCHR 0
+
+#undef SANITIZER_INTERCEPT_MEMRCHR
+#define SANITIZER_INTERCEPT_MEMRCHR 0
+
+#undef SANITIZER_INTERCEPT_READ
+#define SANITIZER_INTERCEPT_READ 0
+
+#undef SANITIZER_INTERCEPT_PREAD
+#define SANITIZER_INTERCEPT_PREAD 0
+
+#undef SANITIZER_INTERCEPT_WRITE
+#define SANITIZER_INTERCEPT_WRITE 0
+
+#undef SANITIZER_INTERCEPT_PWRITE
+#define SANITIZER_INTERCEPT_PWRITE 0
+
+#undef SANITIZER_INTERCEPT_FREAD
+#define SANITIZER_INTERCEPT_FREAD 0
+
+#undef SANITIZER_INTERCEPT_FWRITE
+#define SANITIZER_INTERCEPT_FWRITE 0
+
+#undef SANITIZER_INTERCEPT_FGETS
+#define SANITIZER_INTERCEPT_FGETS 0
+
+#undef SANITIZER_INTERCEPT_FPUTS
+#define SANITIZER_INTERCEPT_FPUTS 0
+
+#undef SANITIZER_INTERCEPT_PUTS
+#define SANITIZER_INTERCEPT_PUTS 0
+
+#undef SANITIZER_INTERCEPT_PREAD64
+#define SANITIZER_INTERCEPT_PREAD64 0
+
+#undef SANITIZER_INTERCEPT_PWRITE64
+#define SANITIZER_INTERCEPT_PWRITE64 0
+
+#undef SANITIZER_INTERCEPT_READV
+#define SANITIZER_INTERCEPT_READV 0
+
+#undef SANITIZER_INTERCEPT_WRITEV
+#define SANITIZER_INTERCEPT_WRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV
+#define SANITIZER_INTERCEPT_PREADV 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV
+#define SANITIZER_INTERCEPT_PWRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV64
+#define SANITIZER_INTERCEPT_PREADV64 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV64
+#define SANITIZER_INTERCEPT_PWRITEV64 0
+
+#undef SANITIZER_INTERCEPT_PRCTL
+#define SANITIZER_INTERCEPT_PRCTL 0
+
+#undef SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_STRPTIME
+#define SANITIZER_INTERCEPT_STRPTIME 0
+
+#undef SANITIZER_INTERCEPT_SCANF
+#define SANITIZER_INTERCEPT_SCANF 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_SCANF
+#define SANITIZER_INTERCEPT_ISOC99_SCANF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF
+#define SANITIZER_INTERCEPT_PRINTF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF_L
+#define SANITIZER_INTERCEPT_PRINTF_L 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF 0
+
+#undef SANITIZER_INTERCEPT___PRINTF_CHK
+#define SANITIZER_INTERCEPT___PRINTF_CHK 0
+
+#undef SANITIZER_INTERCEPT_FREXP
+#define SANITIZER_INTERCEPT_FREXP 0
+
+#undef SANITIZER_INTERCEPT_FREXPF_FREXPL
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT
+#define SANITIZER_INTERCEPT_GETPWENT 0
+
+#undef SANITIZER_INTERCEPT_FGETGRENT_R
+#define SANITIZER_INTERCEPT_FGETGRENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#define SANITIZER_INTERCEPT_FGETPWENT 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#define SANITIZER_INTERCEPT_GETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT_R
+#define SANITIZER_INTERCEPT_FGETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_SETPWENT
+#define SANITIZER_INTERCEPT_SETPWENT 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETTIME
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+
+#undef SANITIZER_INTERCEPT_GETITIMER
+#define SANITIZER_INTERCEPT_GETITIMER 0
+
+#undef SANITIZER_INTERCEPT_TIME
+#define SANITIZER_INTERCEPT_TIME 0
+
+#undef SANITIZER_INTERCEPT_GLOB
+#define SANITIZER_INTERCEPT_GLOB 0
+
+#undef SANITIZER_INTERCEPT_GLOB64
+#define SANITIZER_INTERCEPT_GLOB64 0
+
+#undef SANITIZER_INTERCEPT___B64_TO
+#define SANITIZER_INTERCEPT___B64_TO 0
+
+#undef SANITIZER_INTERCEPT_DN_COMP_EXPAND
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND 0
+
+#undef SANITIZER_INTERCEPT_POSIX_SPAWN
+#define SANITIZER_INTERCEPT_POSIX_SPAWN 0
+
+#undef SANITIZER_INTERCEPT_WAIT
+#define SANITIZER_INTERCEPT_WAIT 0
+
+#undef SANITIZER_INTERCEPT_INET
+#define SANITIZER_INTERCEPT_INET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM 0
+
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+#define SANITIZER_INTERCEPT_GETADDRINFO 0
+
+#undef SANITIZER_INTERCEPT_GETNAMEINFO
+#define SANITIZER_INTERCEPT_GETNAMEINFO 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKNAME
+#define SANITIZER_INTERCEPT_GETSOCKNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTENT_R
+#define SANITIZER_INTERCEPT_GETHOSTENT_R 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKOPT
+#define SANITIZER_INTERCEPT_GETSOCKOPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT
+#define SANITIZER_INTERCEPT_ACCEPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT4
+#define SANITIZER_INTERCEPT_ACCEPT4 0
+
+#undef SANITIZER_INTERCEPT_PACCEPT
+#define SANITIZER_INTERCEPT_PACCEPT 0
+
+#undef SANITIZER_INTERCEPT_MODF
+#define SANITIZER_INTERCEPT_MODF 0
+
+#undef SANITIZER_INTERCEPT_RECVMSG
+#define SANITIZER_INTERCEPT_RECVMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMSG
+#define SANITIZER_INTERCEPT_SENDMSG 0
+
+#undef SANITIZER_INTERCEPT_RECVMMSG
+#define SANITIZER_INTERCEPT_RECVMMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMMSG
+#define SANITIZER_INTERCEPT_SENDMMSG 0
+
+#undef SANITIZER_INTERCEPT_SYSMSG
+#define SANITIZER_INTERCEPT_SYSMSG 0
+
+#undef SANITIZER_INTERCEPT_GETPEERNAME
+#define SANITIZER_INTERCEPT_GETPEERNAME 0
+
+#undef SANITIZER_INTERCEPT_IOCTL
+#define SANITIZER_INTERCEPT_IOCTL 0
+
+#undef SANITIZER_INTERCEPT_INET_ATON
+#define SANITIZER_INTERCEPT_INET_ATON 0
+
+#undef SANITIZER_INTERCEPT_SYSINFO
+#define SANITIZER_INTERCEPT_SYSINFO 0
+
+#undef SANITIZER_INTERCEPT_READDIR
+#define SANITIZER_INTERCEPT_READDIR 0
+
+#undef SANITIZER_INTERCEPT_READDIR64
+#define SANITIZER_INTERCEPT_READDIR64 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_SETLOCALE
+#define SANITIZER_INTERCEPT_SETLOCALE 0
+
+#undef SANITIZER_INTERCEPT_GETCWD
+#define SANITIZER_INTERCEPT_GETCWD 0
+
+#undef SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME 0
+
+#undef SANITIZER_INTERCEPT_STRTOIMAX
+#define SANITIZER_INTERCEPT_STRTOIMAX 0
+
+#undef SANITIZER_INTERCEPT_MBSTOWCS
+#define SANITIZER_INTERCEPT_MBSTOWCS 0
+
+#undef SANITIZER_INTERCEPT_MBSNRTOWCS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS 0
+
+#undef SANITIZER_INTERCEPT_WCSTOMBS
+#define SANITIZER_INTERCEPT_WCSTOMBS 0
+
+#undef SANITIZER_INTERCEPT_STRXFRM
+#define SANITIZER_INTERCEPT_STRXFRM 0
+
+#undef SANITIZER_INTERCEPT___STRXFRM_L
+#define SANITIZER_INTERCEPT___STRXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSXFRM
+#define SANITIZER_INTERCEPT_WCSXFRM 0
+
+#undef SANITIZER_INTERCEPT___WCSXFRM_L
+#define SANITIZER_INTERCEPT___WCSXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSNRTOMBS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS 0
+
+#undef SANITIZER_INTERCEPT_WCRTOMB
+#define SANITIZER_INTERCEPT_WCRTOMB 0
+
+#undef SANITIZER_INTERCEPT_WCTOMB
+#define SANITIZER_INTERCEPT_WCTOMB 0
+
+#undef SANITIZER_INTERCEPT_TCGETATTR
+#define SANITIZER_INTERCEPT_TCGETATTR 0
+
+#undef SANITIZER_INTERCEPT_REALPATH
+#define SANITIZER_INTERCEPT_REALPATH 0
+
+#undef SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME 0
+
+#undef SANITIZER_INTERCEPT_CONFSTR
+#define SANITIZER_INTERCEPT_CONFSTR 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETPARAM
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM 0
+
+#undef SANITIZER_INTERCEPT_STRERROR
+#define SANITIZER_INTERCEPT_STRERROR 0
+
+#undef SANITIZER_INTERCEPT_STRERROR_R
+#define SANITIZER_INTERCEPT_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_XPG_STRERROR_R
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR
+#define SANITIZER_INTERCEPT_SCANDIR 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR64
+#define SANITIZER_INTERCEPT_SCANDIR64 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPS
+#define SANITIZER_INTERCEPT_GETGROUPS 0
+
+#undef SANITIZER_INTERCEPT_POLL
+#define SANITIZER_INTERCEPT_POLL 0
+
+#undef SANITIZER_INTERCEPT_PPOLL
+#define SANITIZER_INTERCEPT_PPOLL 0
+
+#undef SANITIZER_INTERCEPT_WORDEXP
+#define SANITIZER_INTERCEPT_WORDEXP 0
+
+#undef SANITIZER_INTERCEPT_SIGWAIT
+#define SANITIZER_INTERCEPT_SIGWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGWAITINFO
+#define SANITIZER_INTERCEPT_SIGWAITINFO 0
+
+#undef SANITIZER_INTERCEPT_SIGTIMEDWAIT
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGSETOPS
+#define SANITIZER_INTERCEPT_SIGSETOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGPENDING
+#define SANITIZER_INTERCEPT_SIGPENDING 0
+
+#undef SANITIZER_INTERCEPT_SIGPROCMASK
+#define SANITIZER_INTERCEPT_SIGPROCMASK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK 0
+
+#undef SANITIZER_INTERCEPT_BACKTRACE
+#define SANITIZER_INTERCEPT_BACKTRACE 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT
+#define SANITIZER_INTERCEPT_GETMNTENT 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT_R
+#define SANITIZER_INTERCEPT_GETMNTENT_R 0
+
+#undef SANITIZER_INTERCEPT_STATFS
+#define SANITIZER_INTERCEPT_STATFS 0
+
+#undef SANITIZER_INTERCEPT_STATFS64
+#define SANITIZER_INTERCEPT_STATFS64 0
+
+#undef SANITIZER_INTERCEPT_STATVFS
+#define SANITIZER_INTERCEPT_STATVFS 0
+
+#undef SANITIZER_INTERCEPT_STATVFS64
+#define SANITIZER_INTERCEPT_STATVFS64 0
+
+#undef SANITIZER_INTERCEPT_INITGROUPS
+#define SANITIZER_INTERCEPT_INITGROUPS 0
+
+#undef SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON 0
+
+#undef SANITIZER_INTERCEPT_ETHER_HOST
+#define SANITIZER_INTERCEPT_ETHER_HOST 0
+
+#undef SANITIZER_INTERCEPT_ETHER_R
+#define SANITIZER_INTERCEPT_ETHER_R 0
+
+#undef SANITIZER_INTERCEPT_SHMCTL
+#define SANITIZER_INTERCEPT_SHMCTL 0
+
+#undef SANITIZER_INTERCEPT_RANDOM_R
+#define SANITIZER_INTERCEPT_RANDOM_R 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_TRYJOIN
+#define SANITIZER_INTERCEPT_TRYJOIN 0
+
+#undef SANITIZER_INTERCEPT_TIMEDJOIN
+#define SANITIZER_INTERCEPT_TIMEDJOIN 0
+
+#undef SANITIZER_INTERCEPT_THR_EXIT
+#define SANITIZER_INTERCEPT_THR_EXIT 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM
+#define SANITIZER_INTERCEPT_TMPNAM 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM_R
+#define SANITIZER_INTERCEPT_TMPNAM_R 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME
+#define SANITIZER_INTERCEPT_PTSNAME 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME_R
+#define SANITIZER_INTERCEPT_PTSNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME
+#define SANITIZER_INTERCEPT_TTYNAME 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME_R
+#define SANITIZER_INTERCEPT_TTYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TEMPNAM
+#define SANITIZER_INTERCEPT_TEMPNAM 0
+
+#undef SANITIZER_INTERCEPT_SINCOS
+#define SANITIZER_INTERCEPT_SINCOS 0
+
+#undef SANITIZER_INTERCEPT_REMQUO
+#define SANITIZER_INTERCEPT_REMQUO 0
+
+#undef SANITIZER_INTERCEPT_REMQUOL
+#define SANITIZER_INTERCEPT_REMQUOL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA
+#define SANITIZER_INTERCEPT_LGAMMA 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL
+#define SANITIZER_INTERCEPT_LGAMMAL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA_R
+#define SANITIZER_INTERCEPT_LGAMMA_R 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL_R
+#define SANITIZER_INTERCEPT_LGAMMAL_R 0
+
+#undef SANITIZER_INTERCEPT_DRAND48_R
+#define SANITIZER_INTERCEPT_DRAND48_R 0
+
+#undef SANITIZER_INTERCEPT_RAND_R
+#define SANITIZER_INTERCEPT_RAND_R 0
+
+#undef SANITIZER_INTERCEPT_ICONV
+#define SANITIZER_INTERCEPT_ICONV 0
+
+#undef SANITIZER_INTERCEPT_TIMES
+#define SANITIZER_INTERCEPT_TIMES 0
+
+#undef SANITIZER_INTERCEPT_GETLINE
+#define SANITIZER_INTERCEPT_GETLINE 0
+
+#undef SANITIZER_INTERCEPT__EXIT
+#define SANITIZER_INTERCEPT__EXIT 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MUTEX
+#define SANITIZER_INTERCEPT___LIBC_MUTEX 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR 0
+
+#undef SANITIZER_INTERCEPT_LISTXATTR
+#define SANITIZER_INTERCEPT_LISTXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETXATTR
+#define SANITIZER_INTERCEPT_GETXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETRESID
+#define SANITIZER_INTERCEPT_GETRESID 0
+
+#undef SANITIZER_INTERCEPT_GETIFADDRS
+#define SANITIZER_INTERCEPT_GETIFADDRS 0
+
+#undef SANITIZER_INTERCEPT_IF_INDEXTONAME
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME 0
+
+#undef SANITIZER_INTERCEPT_CAPGET
+#define SANITIZER_INTERCEPT_CAPGET 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT___BZERO
+#define SANITIZER_INTERCEPT___BZERO 0
+
+#undef SANITIZER_INTERCEPT_BZERO
+#define SANITIZER_INTERCEPT_BZERO 0
+
+#undef SANITIZER_INTERCEPT_FTIME
+#define SANITIZER_INTERCEPT_FTIME 0
+
+#undef SANITIZER_INTERCEPT_XDR
+#define SANITIZER_INTERCEPT_XDR 0
+
+#undef SANITIZER_INTERCEPT_XDRREC
+#define SANITIZER_INTERCEPT_XDRREC 0
+
+#undef SANITIZER_INTERCEPT_TSEARCH
+#define SANITIZER_INTERCEPT_TSEARCH 0
+
+#undef SANITIZER_INTERCEPT_LIBIO_INTERNALS
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS 0
+
+#undef SANITIZER_INTERCEPT_FOPEN
+#define SANITIZER_INTERCEPT_FOPEN 0
+
+#undef SANITIZER_INTERCEPT_FOPEN64
+#define SANITIZER_INTERCEPT_FOPEN64 0
+
+#undef SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM 0
+
+#undef SANITIZER_INTERCEPT_OBSTACK
+#define SANITIZER_INTERCEPT_OBSTACK 0
+
+#undef SANITIZER_INTERCEPT_FFLUSH
+#define SANITIZER_INTERCEPT_FFLUSH 0
+
+#undef SANITIZER_INTERCEPT_FCLOSE
+#define SANITIZER_INTERCEPT_FCLOSE 0
+
+#undef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE 0
+
+#undef SANITIZER_INTERCEPT_GETPASS
+#define SANITIZER_INTERCEPT_GETPASS 0
+
+#undef SANITIZER_INTERCEPT_TIMERFD
+#define SANITIZER_INTERCEPT_TIMERFD 0
+
+#undef SANITIZER_INTERCEPT_MLOCKX
+#define SANITIZER_INTERCEPT_MLOCKX 0
+
+#undef SANITIZER_INTERCEPT_FOPENCOOKIE
+#define SANITIZER_INTERCEPT_FOPENCOOKIE 0
+
+#undef SANITIZER_INTERCEPT_SEM
+#define SANITIZER_INTERCEPT_SEM 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL 0
+
+#undef SANITIZER_INTERCEPT_MINCORE
+#define SANITIZER_INTERCEPT_MINCORE 0
+
+#undef SANITIZER_INTERCEPT_PROCESS_VM_READV
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV 0
+
+#undef SANITIZER_INTERCEPT_CTERMID
+#define SANITIZER_INTERCEPT_CTERMID 0
+
+#undef SANITIZER_INTERCEPT_CTERMID_R
+#define SANITIZER_INTERCEPT_CTERMID_R 0
+
+#undef SANITIZER_INTERCEPTOR_HOOKS
+#define SANITIZER_INTERCEPTOR_HOOKS 0
+
+#undef SANITIZER_INTERCEPT_RECV_RECVFROM
+#define SANITIZER_INTERCEPT_RECV_RECVFROM 0
+
+#undef SANITIZER_INTERCEPT_SEND_SENDTO
+#define SANITIZER_INTERCEPT_SEND_SENDTO 0
+
+#undef SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE 0
+
+#undef SANITIZER_INTERCEPT_STAT
+#define SANITIZER_INTERCEPT_STAT 0
+
+#undef SANITIZER_INTERCEPT_STAT64
+#define SANITIZER_INTERCEPT_STAT64 0
+
+#undef SANITIZER_INTERCEPT_LSTAT
+#define SANITIZER_INTERCEPT_LSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___XSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT64
+#define SANITIZER_INTERCEPT___XSTAT64 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT
+#define SANITIZER_INTERCEPT___LXSTAT 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT64
+#define SANITIZER_INTERCEPT___LXSTAT64 0
+
+#undef SANITIZER_INTERCEPT_UTMP
+#define SANITIZER_INTERCEPT_UTMP 0
+
+#undef SANITIZER_INTERCEPT_UTMPX
+#define SANITIZER_INTERCEPT_UTMPX 0
+
+#undef SANITIZER_INTERCEPT_GETLOADAVG
+#define SANITIZER_INTERCEPT_GETLOADAVG 0
+
+// #undef SANITIZER_INTERCEPT_MMAP
+// #define SANITIZER_INTERCEPT_MMAP 0
+
+#undef SANITIZER_INTERCEPT_MMAP64
+#define SANITIZER_INTERCEPT_MMAP64 0
+
+#undef SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO 0
+
+#undef SANITIZER_INTERCEPT_MEMALIGN
+#define SANITIZER_INTERCEPT_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MEMALIGN
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT_PVALLOC
+#define SANITIZER_INTERCEPT_PVALLOC 0
+
+#undef SANITIZER_INTERCEPT_CFREE
+#define SANITIZER_INTERCEPT_CFREE 0
+
+#undef SANITIZER_INTERCEPT_REALLOCARRAY
+#define SANITIZER_INTERCEPT_REALLOCARRAY 0
+
+#undef SANITIZER_INTERCEPT_ALIGNED_ALLOC
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC 0
+
+#undef SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE 0
+
+#undef SANITIZER_INTERCEPT_MCHECK_MPROBE
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE 0
+
+#undef SANITIZER_INTERCEPT_WCSLEN
+#define SANITIZER_INTERCEPT_WCSLEN 0
+
+#undef SANITIZER_INTERCEPT_WCSCAT
+#define SANITIZER_INTERCEPT_WCSCAT 0
+
+#undef SANITIZER_INTERCEPT_WCSDUP
+#define SANITIZER_INTERCEPT_WCSDUP 0
+
+#undef SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION 0
+
+#undef SANITIZER_INTERCEPT_BSD_SIGNAL
+#define SANITIZER_INTERCEPT_BSD_SIGNAL 0
+
+#undef SANITIZER_INTERCEPT_ACCT
+#define SANITIZER_INTERCEPT_ACCT 0
+
+#undef SANITIZER_INTERCEPT_USER_FROM_UID
+#define SANITIZER_INTERCEPT_USER_FROM_UID 0
+
+#undef SANITIZER_INTERCEPT_UID_FROM_USER
+#define SANITIZER_INTERCEPT_UID_FROM_USER 0
+
+#undef SANITIZER_INTERCEPT_GROUP_FROM_GID
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID 0
+
+#undef SANITIZER_INTERCEPT_GID_FROM_GROUP
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP 0
+
+#undef SANITIZER_INTERCEPT_ACCESS
+#define SANITIZER_INTERCEPT_ACCESS 0
+
+#undef SANITIZER_INTERCEPT_FACCESSAT
+#define SANITIZER_INTERCEPT_FACCESSAT 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPLIST
+#define SANITIZER_INTERCEPT_GETGROUPLIST 0
+
+#undef SANITIZER_INTERCEPT_STRLCPY
+#define SANITIZER_INTERCEPT_STRLCPY 0
+
+#undef SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_READLINK
+#define SANITIZER_INTERCEPT_READLINK 0
+
+#undef SANITIZER_INTERCEPT_READLINKAT
+#define SANITIZER_INTERCEPT_READLINKAT 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME
+#define SANITIZER_INTERCEPT_DEVNAME 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME_R
+#define SANITIZER_INTERCEPT_DEVNAME_R 0
+
+#undef SANITIZER_INTERCEPT_FGETLN
+#define SANITIZER_INTERCEPT_FGETLN 0
+
+#undef SANITIZER_INTERCEPT_STRMODE
+#define SANITIZER_INTERCEPT_STRMODE 0
+
+#undef SANITIZER_INTERCEPT_TTYENT
+#define SANITIZER_INTERCEPT_TTYENT 0
+
+#undef SANITIZER_INTERCEPT_TTYENTPATH
+#define SANITIZER_INTERCEPT_TTYENTPATH 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT
+#define SANITIZER_INTERCEPT_PROTOENT 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT_R
+#define SANITIZER_INTERCEPT_PROTOENT_R 0
+
+#undef SANITIZER_INTERCEPT_NETENT
+#define SANITIZER_INTERCEPT_NETENT 0
+
+#undef SANITIZER_INTERCEPT_SETVBUF
+#define SANITIZER_INTERCEPT_SETVBUF 0
+
+#undef SANITIZER_INTERCEPT_GETMNTINFO
+#define SANITIZER_INTERCEPT_GETMNTINFO 0
+
+#undef SANITIZER_INTERCEPT_MI_VECTOR_HASH
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH 0
+
+#undef SANITIZER_INTERCEPT_GETVFSSTAT
+#define SANITIZER_INTERCEPT_GETVFSSTAT 0
+
+#undef SANITIZER_INTERCEPT_REGEX
+#define SANITIZER_INTERCEPT_REGEX 0
+
+#undef SANITIZER_INTERCEPT_REGEXSUB
+#define SANITIZER_INTERCEPT_REGEXSUB 0
+
+#undef SANITIZER_INTERCEPT_FTS
+#define SANITIZER_INTERCEPT_FTS 0
+
+#undef SANITIZER_INTERCEPT_SYSCTL
+#define SANITIZER_INTERCEPT_SYSCTL 0
+
+#undef SANITIZER_INTERCEPT_ASYSCTL
+#define SANITIZER_INTERCEPT_ASYSCTL 0
+
+#undef SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO 0
+
+#undef SANITIZER_INTERCEPT_NL_LANGINFO
+#define SANITIZER_INTERCEPT_NL_LANGINFO 0
+
+#undef SANITIZER_INTERCEPT_MODCTL
+#define SANITIZER_INTERCEPT_MODCTL 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_STRTONUM
+#define SANITIZER_INTERCEPT_STRTONUM 0
+
+#undef SANITIZER_INTERCEPT_FPARSELN
+#define SANITIZER_INTERCEPT_FPARSELN 0
+
+#undef SANITIZER_INTERCEPT_STATVFS1
+#define SANITIZER_INTERCEPT_STATVFS1 0
+
+#undef SANITIZER_INTERCEPT_STRTOI
+#define SANITIZER_INTERCEPT_STRTOI 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_SHA1
+#define SANITIZER_INTERCEPT_SHA1 0
+
+#undef SANITIZER_INTERCEPT_MD4
+#define SANITIZER_INTERCEPT_MD4 0
+
+#undef SANITIZER_INTERCEPT_RMD160
+#define SANITIZER_INTERCEPT_RMD160 0
+
+#undef SANITIZER_INTERCEPT_MD5
+#define SANITIZER_INTERCEPT_MD5 0
+
+#undef SANITIZER_INTERCEPT_FSEEK
+#define SANITIZER_INTERCEPT_FSEEK 0
+
+#undef SANITIZER_INTERCEPT_MD2
+#define SANITIZER_INTERCEPT_MD2 0
+
+#undef SANITIZER_INTERCEPT_SHA2
+#define SANITIZER_INTERCEPT_SHA2 0
+
+#undef SANITIZER_INTERCEPT_CDB
+#define SANITIZER_INTERCEPT_CDB 0
+
+#undef SANITIZER_INTERCEPT_VIS
+#define SANITIZER_INTERCEPT_VIS 0
+
+#undef SANITIZER_INTERCEPT_POPEN
+#define SANITIZER_INTERCEPT_POPEN 0
+
+#undef SANITIZER_INTERCEPT_POPENVE
+#define SANITIZER_INTERCEPT_POPENVE 0
+
+#undef SANITIZER_INTERCEPT_PCLOSE
+#define SANITIZER_INTERCEPT_PCLOSE 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN
+#define SANITIZER_INTERCEPT_FUNOPEN 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN2
+#define SANITIZER_INTERCEPT_FUNOPEN2 0
+
+#undef SANITIZER_INTERCEPT_GETFSENT
+#define SANITIZER_INTERCEPT_GETFSENT 0
+
+#undef SANITIZER_INTERCEPT_ARC4RANDOM
+#define SANITIZER_INTERCEPT_ARC4RANDOM 0
+
+#undef SANITIZER_INTERCEPT_FDEVNAME
+#define SANITIZER_INTERCEPT_FDEVNAME 0
+
+#undef SANITIZER_INTERCEPT_GETUSERSHELL
+#define SANITIZER_INTERCEPT_GETUSERSHELL 0
+
+#undef SANITIZER_INTERCEPT_SL_INIT
+#define SANITIZER_INTERCEPT_SL_INIT 0
+
+#undef SANITIZER_INTERCEPT_GETRANDOM
+#define SANITIZER_INTERCEPT_GETRANDOM 0
+
+#undef SANITIZER_INTERCEPT___CXA_ATEXIT
+#define SANITIZER_INTERCEPT___CXA_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_ATEXIT
+#define SANITIZER_INTERCEPT_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATFORK
+#define SANITIZER_INTERCEPT_PTHREAD_ATFORK 0
+
+#undef SANITIZER_INTERCEPT_GETENTROPY
+#define SANITIZER_INTERCEPT_GETENTROPY 0
+
+#undef SANITIZER_INTERCEPT_QSORT
+#define SANITIZER_INTERCEPT_QSORT 0
+
+#undef SANITIZER_INTERCEPT_QSORT_R
+#define SANITIZER_INTERCEPT_QSORT_R 0
+
+#undef SANITIZER_INTERCEPT_BSEARCH
+#define SANITIZER_INTERCEPT_BSEARCH 0
+
+#undef SANITIZER_INTERCEPT_SIGALTSTACK
+#define SANITIZER_INTERCEPT_SIGALTSTACK 0
+
+#undef SANITIZER_INTERCEPT_UNAME
+#define SANITIZER_INTERCEPT_UNAME 0
+
+#undef SANITIZER_INTERCEPT___XUNAME
+#define SANITIZER_INTERCEPT___XUNAME 0
+
+#undef SANITIZER_INTERCEPT_FLOPEN
+#define SANITIZER_INTERCEPT_FLOPEN 0
+
+#undef SANITIZER_INTERCEPT_PROCCTL
+#define SANITIZER_INTERCEPT_PROCCTL 0
+
+#undef SANITIZER_INTERCEPT_HEXDUMP
+#define SANITIZER_INTERCEPT_HEXDUMP 0
+
+#undef SANITIZER_INTERCEPT_ARGP_PARSE
+#define SANITIZER_INTERCEPT_ARGP_PARSE 0
+
+#endif // HWASAN_PLATFORM_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
new file mode 100644
index 000000000000..48a140ffc923
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
@@ -0,0 +1,56 @@
+//===-- hwasan_registers.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the register state retrieved by hwasan when error reporting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REGISTERS_H
+#define HWASAN_REGISTERS_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if defined(__aarch64__)
+
+# define CAN_GET_REGISTERS 1
+
+struct Registers {
+ uptr x[32];
+};
+
+__attribute__((always_inline, unused)) static Registers GetRegisters() {
+ Registers regs;
+ __asm__ volatile(
+ "stp x0, x1, [%1, #(8 * 0)]\n"
+ "stp x2, x3, [%1, #(8 * 2)]\n"
+ "stp x4, x5, [%1, #(8 * 4)]\n"
+ "stp x6, x7, [%1, #(8 * 6)]\n"
+ "stp x8, x9, [%1, #(8 * 8)]\n"
+ "stp x10, x11, [%1, #(8 * 10)]\n"
+ "stp x12, x13, [%1, #(8 * 12)]\n"
+ "stp x14, x15, [%1, #(8 * 14)]\n"
+ "stp x16, x17, [%1, #(8 * 16)]\n"
+ "stp x18, x19, [%1, #(8 * 18)]\n"
+ "stp x20, x21, [%1, #(8 * 20)]\n"
+ "stp x22, x23, [%1, #(8 * 22)]\n"
+ "stp x24, x25, [%1, #(8 * 24)]\n"
+ "stp x26, x27, [%1, #(8 * 26)]\n"
+ "stp x28, x29, [%1, #(8 * 28)]\n"
+ : "=m"(regs)
+ : "r"(regs.x));
+ regs.x[30] = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
+ regs.x[31] = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+ return regs;
+}
+
+#else
+# define CAN_GET_REGISTERS 0
+#endif
+
+#endif // HWASAN_REGISTERS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
index 31e190a8ebbc..efe6f5770491 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -22,6 +22,7 @@
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
@@ -331,7 +332,7 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
chunk.End());
Printf("%s", d.Allocation());
- Printf("allocated here:\n");
+ Printf("allocated by thread T%u here:\n", chunk.GetAllocThreadId());
Printf("%s", d.Default());
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
return;
@@ -473,12 +474,12 @@ void PrintAddressDescription(
har.requested_size, UntagAddr(har.tagged_addr),
UntagAddr(har.tagged_addr) + har.requested_size);
Printf("%s", d.Allocation());
- Printf("freed by thread T%zd here:\n", t->unique_id());
+ Printf("freed by thread T%u here:\n", t->unique_id());
Printf("%s", d.Default());
GetStackTraceFromId(har.free_context_id).Print();
Printf("%s", d.Allocation());
- Printf("previously allocated here:\n", t);
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
Printf("%s", d.Default());
GetStackTraceFromId(har.alloc_context_id).Print();
@@ -501,7 +502,8 @@ void PrintAddressDescription(
}
// Print the remaining threads, as an extra information, 1 line per thread.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+ if (flags()->print_live_threads_info)
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
@@ -706,7 +708,8 @@ void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
sptr offset =
__hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
- CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
+ CHECK_GE(offset, 0);
+ CHECK_LT(offset, static_cast<sptr>(access_size));
tag_t ptr_tag = GetTagFromPointer(tagged_addr);
tag_t *tag_ptr =
reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
index 744748a5101f..0c0abb6de861 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
@@ -31,33 +31,37 @@
.section .text
.file "hwasan_setjmp_aarch64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
BTI_C
mov x1, #0
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
#if SANITIZER_ANDROID
// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
// current signal.
-.global __interceptor_setjmp_bionic
-ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
-__interceptor_setjmp_bionic:
+.global ASM_WRAPPER_NAME(setjmp_bionic)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp_bionic))
+ASM_WRAPPER_NAME(setjmp_bionic):
CFI_STARTPROC
BTI_C
mov x1, #1
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp_bionic)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp_bionic))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp_bionic)
#endif
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
CFI_STARTPROC
BTI_C
stp x19, x20, [x0, #0<<3]
@@ -77,22 +81,19 @@ __interceptor_sigsetjmp:
// This function is defined in hwasan_interceptors.cc
b __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
#if SANITIZER_ANDROID
-WEAK_ALIAS __interceptor_sigsetjmp, sigsetjmp
-WEAK_ALIAS __interceptor_setjmp_bionic, setjmp
+ASM_TRAMPOLINE_ALIAS(sigsetjmp, sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(setjmp, setjmp_bionic)
#else
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
#endif
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
index 43f9c3c26b4e..c01f4e25e8a4 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
@@ -31,18 +31,18 @@
.section .text
.file "hwasan_setjmp_riscv64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
addi x11, x0, 0
- tail __interceptor_sigsetjmp
+ tail ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
CFI_STARTPROC
sd ra, 0<<3(x10)
sd s0, 1<<3(x10)
@@ -80,17 +80,12 @@ __interceptor_sigsetjmp:
// This function is defined in hwasan_interceptors.cc
tail __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
-
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
-
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
-
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
index 7566c1ea0a57..9804e8d7ceca 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
@@ -31,19 +31,20 @@
.section .text
.file "hwasan_setjmp_x86_64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
_CET_ENDBR
xorl %esi, %esi
- jmp __interceptor_sigsetjmp
+ jmp .Linterceptor_sigsetjmp
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+.Linterceptor_sigsetjmp:
CFI_STARTPROC
_CET_ENDBR
@@ -66,16 +67,12 @@ __interceptor_sigsetjmp:
jmp __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
-
-.macro WEAK_ALIAS first second
- .weak \second
- .equ \second\(), \first
-.endm
-
-WEAK_ALIAS __interceptor_sigsetjmp, __sigsetjmp
-WEAK_ALIAS __interceptor_setjmp, _setjmp
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
index 4a78f6062903..ce36547580e6 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -44,6 +44,8 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
static atomic_uint64_t unique_id;
unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
+ if (!IsMainThread())
+ os_id_ = GetTid();
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
@@ -56,6 +58,16 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
#endif
InitStackAndTls(state);
dtls_ = DTLS_Get();
+ AllocatorThreadStart(allocator_cache());
+
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
@@ -77,28 +89,23 @@ void Thread::InitStackRingBuffer(uptr stack_buffer_start,
CHECK(MemIsApp(stack_bottom_));
CHECK(MemIsApp(stack_top_ - 1));
}
-
- if (flags()->verbose_threads) {
- if (IsMainThread()) {
- Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
- sizeof(Thread), heap_allocations_->SizeInBytes(),
- stack_allocations_->size() * sizeof(uptr));
- }
- Print("Creating : ");
- }
}
void Thread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
- TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
+ TagMemory(UntagAddr(stack_bottom_),
+ UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
+ GetTagFromPointer(stack_top_));
if (tls_begin_ != tls_end_)
- TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
+ TagMemory(UntagAddr(tls_begin_),
+ UntagAddr(tls_end_) - UntagAddr(tls_begin_),
+ GetTagFromPointer(tls_begin_));
}
void Thread::Destroy() {
if (flags()->verbose_threads)
Print("Destroying: ");
- AllocatorSwallowThreadLocalCache(allocator_cache());
+ AllocatorThreadFinish(allocator_cache());
ClearShadowForThreadStackAndTLS();
if (heap_allocations_)
heap_allocations_->Delete();
@@ -149,6 +156,12 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
return tag;
}
+void EnsureMainThreadIDIsCorrect() {
+ auto *t = __hwasan::GetCurrentThread();
+ if (t && (t->IsMainThread()))
+ t->set_os_id(GetTid());
+}
+
} // namespace __hwasan
// --- Implementation of LSan-specific functions --- {{{1
@@ -165,16 +178,18 @@ static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
}
-void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
-
-void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
+void LockThreads() {
+ __hwasan::hwasanThreadList().Lock();
+ __hwasan::hwasanThreadArgRetval().Lock();
+}
-void EnsureMainThreadIDIsCorrect() {
- auto *t = __hwasan::GetCurrentThread();
- if (t && (t->IsMainThread()))
- t->set_os_id(GetTid());
+void UnlockThreads() {
+ __hwasan::hwasanThreadArgRetval().Unlock();
+ __hwasan::hwasanThreadList().Unlock();
}
+void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
+
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
@@ -198,7 +213,10 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
-void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {}
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
index 9727585ef754..9e1b438e48f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -110,6 +110,9 @@ class Thread {
Thread *GetCurrentThread();
uptr *GetCurrentThreadLongPtr();
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+
struct ScopedTaggingDisabler {
ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
index fa46e658b69d..7df4dd3d7851 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
@@ -1,15 +1,28 @@
#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+
namespace __hwasan {
-static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
+
static HwasanThreadList *hwasan_thread_list;
+static ThreadArgRetval *thread_data;
HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
void InitThreadList(uptr storage, uptr size) {
- CHECK(hwasan_thread_list == nullptr);
+ CHECK_EQ(hwasan_thread_list, nullptr);
+
+ static ALIGNED(alignof(
+ HwasanThreadList)) char thread_list_placeholder[sizeof(HwasanThreadList)];
hwasan_thread_list =
new (thread_list_placeholder) HwasanThreadList(storage, size);
+
+ CHECK_EQ(thread_data, nullptr);
+
+ static ALIGNED(alignof(
+ ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
}
-} // namespace __hwasan
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
index 97485b195b64..82f6c70a03f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -47,8 +47,8 @@
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
-
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@@ -131,9 +131,9 @@ class SANITIZER_MUTEX HwasanThreadList {
void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
RemoveThreadStats(t);
+ RemoveThreadFromLiveList(t);
t->Destroy();
DontNeedThread(t);
- RemoveThreadFromLiveList(t);
SpinMutexLock l(&free_list_mutex_);
free_list_.push_back(t);
}
@@ -157,7 +157,7 @@ class SANITIZER_MUTEX HwasanThreadList {
}
template <class CB>
- Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(stats_mutex_) {
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
CheckLocked();
for (Thread *t : live_list_)
if (cb(t))
@@ -199,7 +199,7 @@ class SANITIZER_MUTEX HwasanThreadList {
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
free_space_ += thread_alloc_size_;
- CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ CHECK_LE(free_space_, free_space_end_);
return t;
}
@@ -222,5 +222,6 @@ class SANITIZER_MUTEX HwasanThreadList {
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
+ThreadArgRetval &hwasanThreadArgRetval();
} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception.h b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
index d97974ee9074..078d33b61be3 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
@@ -14,9 +14,10 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
+#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
!SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system."
@@ -67,24 +68,50 @@ typedef __sanitizer::OFF64_T OFF64_T;
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
-// How it works:
-// To replace system functions on Linux we just need to declare functions
-// with same names in our library and then obtain the real function pointers
+// How it works on Linux
+// ---------------------
+//
+// To replace system functions on Linux we just need to declare functions with
+// the same names in our library and then obtain the real function pointers
// using dlsym().
-// There is one complication. A user may also intercept some of the functions
-// we intercept. To resolve this we declare our interceptors with __interceptor_
-// prefix, and then make actual interceptors weak aliases to __interceptor_
-// functions.
//
-// This is not so on Mac OS, where the two-level namespace makes
-// our replacement functions invisible to other libraries. This may be overcomed
-// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
-// libraries in Chromium were noticed when doing so.
+// There is one complication: a user may also intercept some of the functions we
+// intercept. To allow for up to 3 interceptors (including ours) of a given
+// function "func", the interceptor implementation is in ___interceptor_func,
+// which is aliased by a weak function __interceptor_func, which in turn is
+// aliased (via a trampoline) by weak wrapper function "func".
+//
+// Most user interceptors should define a foreign interceptor as follows:
+//
+// - provide a non-weak function "func" that performs interception;
+// - if __interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead.
+//
+// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
+// may be defined as follows (on supported architectures):
+//
+// - provide a non-weak function __interceptor_func that performs interception;
+// - if ___interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead;
+// - provide a weak function "func" that is an alias to __interceptor_func.
+//
+// With this protocol, sanitizer interceptors, foreign user interceptors, and
+// foreign interceptors of other dynamic analysis runtimes, or any combination
+// thereof, may co-exist simultaneously.
+//
+// How it works on Mac OS
+// ----------------------
+//
+// This is not so on Mac OS, where the two-level namespace makes our replacement
+// functions invisible to other libraries. This may be overcomed using the
+// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
+// Chromium were noticed when doing so.
+//
// Instead we create a dylib containing a __DATA,__interpose section that
// associates library functions with their wrappers. When this dylib is
-// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
-// the calls to interposed functions done through stubs to the wrapper
-// functions.
+// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
+// calls to interposed functions done through stubs to the wrapper functions.
+//
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
@@ -100,53 +127,102 @@ struct interpose_substitution {
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
-#define INTERPOSER(func_name) __attribute__((used)) \
+#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(WRAP(func_name)), \
- reinterpret_cast<const uptr>(func_name) } \
+ { reinterpret_cast<const uptr>(WRAP(func_name)), \
+ reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
-const interpose_substitution substitution_##func_name[] \
- __attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(wrapper_name), \
- reinterpret_cast<const uptr>(func_name) } \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(wrapper_name), \
+ reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_WINDOWS
# define WRAP(x) __asan_wrap_##x
-# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
-# define DECLARE_WRAPPER(ret_type, func, ...) \
+# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
-# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
+# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
+#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Weak aliases of weak aliases do not work, therefore we need to set up a
+// trampoline function. The function "func" is a weak alias to the trampoline
+// (so that we may check if "func" was overridden), which calls the weak
+// function __interceptor_func, which in turn aliases the actual interceptor
+// implementation ___interceptor_func:
+//
+// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
+// |
+// +--------(tail call)-------+
+// |
+// v
+// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
+//
+// We use inline assembly to define most of this, because not all compilers
+// support functions with the "naked" attribute with every architecture.
+# define WRAP(x) ___interceptor_ ## x
+# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((alias("__interceptor_" #func), visibility("default")));
-#elif !SANITIZER_FUCHSIA
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
-# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+# define __ASM_WEAK_WRAPPER(func)
+# else
+# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__); \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
+ asm( \
+ ".text\n" \
+ __ASM_WEAK_WRAPPER(func) \
+ ".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
+ SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
+ SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
+ SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
+ SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func)) "\n" \
+ SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
+ ".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
+ ".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ );
+# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Some architectures cannot implement efficient interceptor trampolines with
+// just a plain jump due to complexities of resolving a preemptible symbol. In
+// those cases, revert to just this scheme:
+//
+// [wrapper "func": weak] --(alias)--> [WRAP(func)]
+//
+# define WRAP(x) __interceptor_ ## x
+# define TRAMPOLINE(x) WRAP(x)
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define __ATTRIBUTE_WEAK_WRAPPER
+# else
+# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
+# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
#endif
#if SANITIZER_FUCHSIA
@@ -162,10 +238,10 @@ const interpose_substitution substitution_##func_name[] \
# define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_type
-# define DECLARE_REAL(ret_type, func, ...) \
+# define DECLARE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // SANITIZER_APPLE
@@ -176,14 +252,16 @@ const interpose_substitution substitution_##func_name[] \
#endif // SANITIZER_APPLE
#if !SANITIZER_FUCHSIA
-# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type WRAP(func)(__VA_ARGS__);
// Declare an interceptor and its wrapper defined in a different translation
// unit (ex. asm).
-# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- extern "C" ret_type func(__VA_ARGS__);
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ extern "C" ret_type func(__VA_ARGS__);
#else
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
@@ -215,12 +293,10 @@ const interpose_substitution substitution_##func_name[] \
#elif !SANITIZER_APPLE
-#define INTERCEPTOR(ret_type, func, ...) \
- DEFINE_REAL(ret_type, func, __VA_ARGS__) \
- DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type WRAP(func)(__VA_ARGS__)
+#define INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
@@ -228,10 +304,10 @@ const interpose_substitution substitution_##func_name[] \
#else // SANITIZER_APPLE
-#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) suffix; \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- INTERPOSER(func); \
+#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) suffix; \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
@@ -246,14 +322,12 @@ const interpose_substitution substitution_##func_name[] \
#endif
#if SANITIZER_WINDOWS
-# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- FUNC_TYPE(func) PTR_TO_REAL(func); \
- } \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type __stdcall WRAP(func)(__VA_ARGS__)
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ } \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
#endif
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
index 5111a87f0a6c..ef8136eb4fc7 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
@@ -33,7 +33,7 @@ static int StrCmp(const char *s1, const char *s2) {
}
#endif
-static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
+static void *GetFuncAddr(const char *name, uptr trampoline) {
#if SANITIZER_NETBSD
// FIXME: Find a better way to handle renames
if (StrCmp(name, "sigaction"))
@@ -50,17 +50,17 @@ static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
// We don't want to intercept the wrapper and have it point to itself.
- if ((uptr)addr == wrapper_addr)
+ if ((uptr)addr == trampoline)
addr = nullptr;
}
return addr;
}
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper) {
- void *addr = GetFuncAddr(name, wrapper);
+ uptr trampoline) {
+ void *addr = GetFuncAddr(name, trampoline);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
// dlvsym is a GNU extension supported by some other platforms.
@@ -70,12 +70,12 @@ static void *GetFuncAddr(const char *name, const char *ver) {
}
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper) {
+ uptr func, uptr trampoline) {
void *addr = GetFuncAddr(name, ver);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
-#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
+# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
index a08f8cb98c40..433a3d9bd7fa 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
@@ -15,7 +15,7 @@
SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
-# error "interception_linux.h should be included from interception library only"
+# error interception_linux.h should be included from interception library only
#endif
#ifndef INTERCEPTION_LINUX_H
@@ -23,26 +23,26 @@
namespace __interception {
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper);
+ uptr trampoline);
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper);
+ uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
index faaa8ee15381..00c317510e42 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
@@ -141,8 +141,29 @@ static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
+# if defined(_MSC_VER)
+# define INTERCEPTION_FORMAT(f, a)
+# else
+# define INTERCEPTION_FORMAT(f, a) __attribute__((format(printf, f, a)))
+# endif
+
+static void (*ErrorReportCallback)(const char *format, ...)
+ INTERCEPTION_FORMAT(1, 2);
+
+void SetErrorReportCallback(void (*callback)(const char *format, ...)) {
+ ErrorReportCallback = callback;
+}
+
+# define ReportError(...) \
+ do { \
+ if (ErrorReportCallback) \
+ ErrorReportCallback(__VA_ARGS__); \
+ } while (0)
+
static void InterceptionFailed() {
- // Do we have a good way to abort with an error message here?
+ ReportError("interception_win: failed due to an unrecoverable error.\n");
+ // This acts like an abort when no debugger is attached. According to an old
+ // comment, calling abort() leads to an infinite recursion in CheckFailed.
__debugbreak();
}
@@ -249,8 +270,13 @@ static void WritePadding(uptr from, uptr size) {
}
static void WriteJumpInstruction(uptr from, uptr target) {
- if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target)) {
+ ReportError(
+ "interception_win: cannot write jmp further than 2GB away, from %p to "
+ "%p.\n",
+ (void *)from, (void *)target);
InterceptionFailed();
+ }
ptrdiff_t offset = target - from - kJumpInstructionLength;
*(u8*)from = 0xE9;
*(u32*)(from + 1) = offset;
@@ -274,6 +300,10 @@ static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
int offset = indirect_target - from - kIndirectJumpInstructionLength;
if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
indirect_target)) {
+ ReportError(
+ "interception_win: cannot write indirect jmp with target further than "
+ "2GB away, from %p to %p.\n",
+ (void *)from, (void *)indirect_target);
InterceptionFailed();
}
*(u16*)from = 0x25FF;
@@ -492,6 +522,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xE589: // 89 E5 : mov ebp, esp
case 0xC18B: // 8B C1 : mov eax, ecx
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
@@ -641,6 +672,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x245C8B: // 8B 5C 24 XX : mov ebx, dword ptr [esp + XX]
+ case 0x246C8B: // 8B 6C 24 XX : mov ebp, dword ptr [esp + XX]
case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
return 4;
@@ -652,12 +685,20 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
#endif
- // Unknown instruction!
- // FIXME: Unknown instruction failures might happen when we add a new
- // interceptor or a new compiler version. In either case, they should result
- // in visible and readable error messages. However, merely calling abort()
- // leads to an infinite recursion in CheckFailed.
- InterceptionFailed();
+ // Unknown instruction! This might happen when we add a new interceptor, use
+ // a new compiler version, or if Windows changed how some functions are
+ // compiled. In either case, we print the address and 8 bytes of instructions
+ // to notify the user about the error and to help identify the unknown
+ // instruction. Don't treat this as a fatal error, though we can break the
+ // debugger if one has been attached.
+ u8 *bytes = (u8 *)address;
+ ReportError(
+ "interception_win: unhandled instruction at %p: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x\n",
+ (void *)address, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
+ bytes[5], bytes[6], bytes[7]);
+ if (::IsDebuggerPresent())
+ __debugbreak();
return 0;
}
@@ -678,6 +719,8 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
while (cursor != size) {
size_t rel_offset = 0;
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ if (!instruction_size)
+ return false;
_memcpy((void*)(to + cursor), (void*)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
@@ -895,6 +938,10 @@ static void **InterestingDLLsAvailable() {
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
+#if (defined(__MINGW32__) && defined(__i386__))
+ "libc++.dll", // libc++
+ "libunwind.dll", // libunwind
+#endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll", NULL};
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
index 4590013019e3..f6eca82191cb 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
@@ -41,6 +41,11 @@ bool OverrideImportedFunction(const char *module_to_patch,
const char *function_name, uptr new_function,
uptr *orig_old_func);
+// Sets a callback to be used for reporting errors by interception_win. The
+// callback will be called with printf-like arguments. Intended to be used with
+// __sanitizer::Report. Pass nullptr to disable error reporting (default).
+void SetErrorReportCallback(void (*callback)(const char *format, ...));
+
#if !SANITIZER_WINDOWS64
// Exposed for unittests
bool OverrideFunctionWithDetour(
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
index 489c5ca01fed..6b223603c6a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
@@ -36,7 +36,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
using namespace __lsan;
uptr stack_top = 0, stack_bottom = 0;
- if (ThreadContext *t = CurrentThreadContext()) {
+ if (ThreadContextLsanBase *t = GetCurrentThread()) {
stack_top = t->stack_end();
stack_bottom = t->stack_begin();
}
@@ -97,7 +97,7 @@ extern "C" void __lsan_init() {
ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
- InitializeThreadRegistry();
+ InitializeThreads();
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
InitializeMainThread();
InstallAtExitCheckLeaks();
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
index b18d829a1a2a..12d579a9385b 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -49,8 +49,11 @@ void InitializeAllocator() {
max_malloc_size = kMaxAllowedMallocSize;
}
+void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }
+
void AllocatorThreadFinish() {
allocator.SwallowCache(GetAllocatorCache());
+ allocator.DestroyCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -65,12 +68,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+ RunMallocHooks(p, size);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
+ RunFreeHooks(p);
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
@@ -104,7 +109,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
- RunMallocHooks(p, size);
return p;
}
@@ -119,7 +123,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
}
void Deallocate(void *p) {
- RunFreeHooks(p);
RegisterDeallocation(p);
allocator.Deallocate(GetAllocatorCache(), p);
}
@@ -145,6 +148,22 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(AllocatorCache);
}
+static const void *GetMallocBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ ChunkMetadata *m = Metadata(beg);
+ if (!m)
+ return nullptr;
+ if (!m->allocated)
+ return nullptr;
+ if (m->requested_size == 0)
+ return nullptr;
+ return (const void *)beg;
+}
+
uptr GetMallocUsableSize(const void *p) {
if (!p)
return 0;
@@ -153,6 +172,10 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+uptr GetMallocUsableSizeFast(const void *p) {
+ return Metadata(p)->requested_size;
+}
+
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
const StackTrace &stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
@@ -275,6 +298,10 @@ uptr GetUserBegin(uptr chunk) {
return chunk;
}
+uptr GetUserAddr(uptr chunk) {
+ return chunk;
+}
+
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
@@ -304,7 +331,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
allocator.ForEachChunk(callback, arg);
}
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+IgnoreObjectResult IgnoreObject(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
ChunkMetadata *m = Metadata(chunk);
@@ -319,15 +346,6 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
}
}
-void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
- // This function can be used to treat memory reachable from `tctx` as live.
- // This is useful for threads that have been created but not yet started.
-
- // This is currently a no-op because the LSan `pthread_create()` interceptor
- // blocks until the child thread starts which keeps the thread's `arg` pointer
- // live.
-}
-
} // namespace __lsan
using namespace __lsan;
@@ -348,7 +366,7 @@ uptr __sanitizer_get_heap_size() {
}
SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_free_bytes() { return 0; }
+uptr __sanitizer_get_free_bytes() { return 1; }
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_unmapped_bytes() { return 0; }
@@ -357,11 +375,29 @@ SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+int __sanitizer_get_ownership(const void *p) {
+ return GetMallocBegin(p) != nullptr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const void * __sanitizer_get_allocated_begin(const void *p) {
+ return GetMallocBegin(p);
+}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
}
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = GetMallocUsableSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
index b67d9d7750ef..84cce4c6baeb 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
@@ -32,6 +32,7 @@ template<typename Callable>
void ForEachChunk(const Callable &callback);
void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadStart();
void AllocatorThreadFinish();
void InitializeAllocator();
@@ -68,13 +69,13 @@ using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
# if SANITIZER_FUCHSIA || defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-#elif defined(__s390x__)
-const uptr kAllocatorSpace = 0x40000000000ULL;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# else
+# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# endif
+# else
+const uptr kAllocatorSpace = 0x500000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
index 1b47e83a1056..9b73ddbdc756 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
@@ -34,15 +34,13 @@
# else
# define OBJC_DATA_MASK 0x00007ffffffffff8UL
# endif
-// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
-# define OBJC_FAST_IS_RW 0x8000000000000000UL
# endif
namespace __lsan {
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
// also to protect the global list of root regions.
-Mutex global_mutex;
+static Mutex global_mutex;
Flags lsan_flags;
@@ -173,13 +171,11 @@ static uptr GetCallerPC(const StackTrace &stack) {
}
# if SANITIZER_APPLE
-// Objective-C class data pointers are stored with flags in the low bits, so
-// they need to be transformed back into something that looks like a pointer.
-static inline void *MaybeTransformPointer(void *p) {
+// Several pointers in the Objective-C runtime (method cache and class_rw_t,
+// for example) are tagged with additional bits we need to strip.
+static inline void *TransformPointer(void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
- if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
- ptr &= OBJC_DATA_MASK;
- return reinterpret_cast<void *>(ptr);
+ return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
}
# endif
@@ -241,12 +237,6 @@ static LeakSuppressionContext *GetSuppressionContext() {
return suppression_ctx;
}
-static InternalMmapVectorNoCtor<RootRegion> root_regions;
-
-InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
- return &root_regions;
-}
-
void InitCommonLsan() {
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
@@ -270,13 +260,22 @@ static inline bool MaybeUserPointer(uptr p) {
if (p < kMinAddress)
return false;
# if defined(__x86_64__)
- // Accept only canonical form user-space addresses.
- return ((p >> 47) == 0);
+ // TODO: support LAM48 and 5 level page tables.
+ // LAM_U57 mask format
+ // * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
+ // * top-1 byte: 0xff because it should be 0
+ // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
+ constexpr uptr kLAM_U57Mask = 0x81ff80;
+ constexpr uptr kPointerMask = kLAM_U57Mask << 40;
+ return ((p & kPointerMask) == 0);
# elif defined(__mips64)
return ((p >> 40) == 0);
# elif defined(__aarch64__)
+ // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
+ // address translation and can be used to store a tag.
+ constexpr uptr kPointerMask = 255ULL << 48;
// Accept up to 48 bit VMA.
- return ((p >> 48) == 0);
+ return ((p & kPointerMask) == 0);
# elif defined(__loongarch_lp64)
// Allow 47-bit user-space VMA at current.
return ((p >> 47) == 0);
@@ -303,7 +302,7 @@ void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
for (; pp + sizeof(void *) <= end; pp += alignment) {
void *p = *reinterpret_cast<void **>(pp);
# if SANITIZER_APPLE
- p = MaybeTransformPointer(p);
+ p = TransformPointer(p);
# endif
if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
continue;
@@ -523,38 +522,52 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
# endif // SANITIZER_FUCHSIA
-void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
- uptr region_begin, uptr region_end, bool is_readable) {
- uptr intersection_begin = Max(root_region.begin, region_begin);
- uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
- if (intersection_begin >= intersection_end)
- return;
- LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
- (void *)root_region.begin,
- (void *)(root_region.begin + root_region.size),
- (void *)region_begin, (void *)region_end,
- is_readable ? "readable" : "unreadable");
- if (is_readable)
- ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
- kReachable);
+// A map that contains [region_begin, region_end) pairs.
+using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
+
+static RootRegions &GetRootRegionsLocked() {
+ global_mutex.CheckLocked();
+ static RootRegions *regions = nullptr;
+ alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
+ if (!regions)
+ regions = new (placeholder) RootRegions();
+ return *regions;
}
-static void ProcessRootRegion(Frontier *frontier,
- const RootRegion &root_region) {
- MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
- MemoryMappedSegment segment;
- while (proc_maps.Next(&segment)) {
- ScanRootRegion(frontier, root_region, segment.start, segment.end,
- segment.IsReadable());
+bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
+
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &mapped_regions) {
+ if (!flags()->use_root_regions)
+ return;
+
+ InternalMmapVector<Region> regions;
+ GetRootRegionsLocked().forEach([&](const auto &kv) {
+ regions.push_back({kv.first.first, kv.first.second});
+ return true;
+ });
+
+ InternalMmapVector<Region> intersection;
+ Intersect(mapped_regions, regions, intersection);
+
+ for (const Region &r : intersection) {
+ LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
+ (void *)r.begin, (void *)r.end);
+ ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
}
}
// Scans root regions for heap pointers.
static void ProcessRootRegions(Frontier *frontier) {
- if (!flags()->use_root_regions)
+ if (!flags()->use_root_regions || !HasRootRegions())
return;
- for (uptr i = 0; i < root_regions.size(); i++)
- ProcessRootRegion(frontier, root_regions[i]);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ InternalMmapVector<Region> mapped_regions;
+ while (proc_maps.Next(&segment))
+ if (segment.IsReadable())
+ mapped_regions.push_back({segment.start, segment.end});
+ ScanRootRegions(frontier, mapped_regions);
}
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
@@ -859,7 +872,7 @@ void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
leaks_.push_back(leak);
}
if (flags()->report_objects) {
- LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
leaked_objects_.push_back(obj);
}
}
@@ -944,7 +957,7 @@ void LeakReport::PrintSummary() {
uptr LeakReport::ApplySuppressions() {
LeakSuppressionContext *suppressions = GetSuppressionContext();
- uptr new_suppressions = false;
+ uptr new_suppressions = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
leaks_[i].total_size)) {
@@ -993,7 +1006,7 @@ void __lsan_ignore_object(const void *p) {
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
// locked.
Lock l(&global_mutex);
- IgnoreObjectResult res = IgnoreObjectLocked(p);
+ IgnoreObjectResult res = IgnoreObject(p);
if (res == kIgnoreObjectInvalid)
VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
if (res == kIgnoreObjectAlreadyIgnored)
@@ -1009,36 +1022,37 @@ void __lsan_ignore_object(const void *p) {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_register_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
- Lock l(&global_mutex);
- RootRegion region = {reinterpret_cast<uptr>(begin), size};
- root_regions.push_back(region);
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+
+ Lock l(&global_mutex);
+ ++GetRootRegionsLocked()[{b, e}];
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_unregister_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
- Lock l(&global_mutex);
- bool removed = false;
- for (uptr i = 0; i < root_regions.size(); i++) {
- RootRegion region = root_regions[i];
- if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
- removed = true;
- uptr last_index = root_regions.size() - 1;
- root_regions[i] = root_regions[last_index];
- root_regions.pop_back();
- VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
- break;
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+ VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
+
+ {
+ Lock l(&global_mutex);
+ if (auto *f = GetRootRegionsLocked().find({b, e})) {
+ if (--(f->second) == 0)
+ GetRootRegionsLocked().erase(f);
+ return;
}
}
- if (!removed) {
- Report(
- "__lsan_unregister_root_region(): region at %p of size %zu has not "
- "been registered.\n",
- begin, size);
- Die();
- }
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %zu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
#endif // CAN_SANITIZE_LEAKS
}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
index 0d5c00310842..d3e768363e93 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_range.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@@ -79,11 +80,6 @@ enum IgnoreObjectResult {
kIgnoreObjectInvalid
};
-struct Range {
- uptr begin;
- uptr end;
-};
-
//// --------------------------------------------------------------------------
//// Poisoning prototypes.
//// --------------------------------------------------------------------------
@@ -96,8 +92,8 @@ bool WordIsPoisoned(uptr addr);
//// --------------------------------------------------------------------------
// Wrappers for ThreadRegistry access.
-void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
-void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call
@@ -131,6 +127,9 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
uptr PointsIntoChunk(void *p);
// Returns address of user-visible chunk contained in this allocator chunk.
uptr GetUserBegin(uptr chunk);
+// Returns user-visible address for chunk. If memory tagging is used this
+// function will return the tagged address.
+uptr GetUserAddr(uptr chunk);
// Wrapper for chunk metadata operations.
class LsanMetadata {
@@ -151,19 +150,19 @@ class LsanMetadata {
void ForEachChunk(ForEachChunkCallback callback, void *arg);
// Helper for __lsan_ignore_object().
-IgnoreObjectResult IgnoreObjectLocked(const void *p);
+IgnoreObjectResult IgnoreObject(const void *p);
// The rest of the LSan interface which is implemented by library.
struct ScopedStopTheWorldLock {
ScopedStopTheWorldLock() {
- LockThreadRegistry();
+ LockThreads();
LockAllocator();
}
~ScopedStopTheWorldLock() {
UnlockAllocator();
- UnlockThreadRegistry();
+ UnlockThreads();
}
ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
@@ -236,11 +235,6 @@ void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
-struct RootRegion {
- uptr begin;
- uptr size;
-};
-
// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
// this Frontier vector before the StopTheWorldCallback actually runs.
// This is used when the OS has a unified callback API for suspending
@@ -253,9 +247,11 @@ struct CheckForLeaksParam {
bool success = false;
};
-InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
-void ScanRootRegion(Frontier *frontier, RootRegion const &region,
- uptr region_begin, uptr region_end, bool is_readable);
+using Region = Range;
+
+bool HasRootRegions();
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &region);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
index b6b15095744d..4e5198979b95 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
@@ -25,6 +25,8 @@
# include "sanitizer_common/sanitizer_allocator_internal.h"
namespace __lsan {
+class ThreadContextLsanBase;
+
enum class SeenRegion {
None = 0,
AllocOnce = 1 << 0,
@@ -50,18 +52,18 @@ struct RegionScanState {
typedef struct {
int disable_counter;
- u32 current_thread_id;
+ ThreadContextLsanBase *current_thread;
AllocatorCache cache;
} thread_local_data_t;
static pthread_key_t key;
static pthread_once_t key_once = PTHREAD_ONCE_INIT;
-// The main thread destructor requires the current thread id,
-// so we can't destroy it until it's been used and reset to invalid tid
+// The main thread destructor requires the current thread,
+// so we can't destroy it until it's been used and reset.
void restore_tid_data(void *ptr) {
thread_local_data_t *data = (thread_local_data_t *)ptr;
- if (data->current_thread_id != kInvalidTid)
+ if (data->current_thread)
pthread_setspecific(key, data);
}
@@ -76,7 +78,7 @@ static thread_local_data_t *get_tls_val(bool alloc) {
if (ptr == NULL && alloc) {
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
ptr->disable_counter = 0;
- ptr->current_thread_id = kInvalidTid;
+ ptr->current_thread = nullptr;
ptr->cache = AllocatorCache();
pthread_setspecific(key, ptr);
}
@@ -99,12 +101,14 @@ void EnableInThisThread() {
--*disable_counter;
}
-u32 GetCurrentThread() {
+ThreadContextLsanBase *GetCurrentThread() {
thread_local_data_t *data = get_tls_val(false);
- return data ? data->current_thread_id : kInvalidTid;
+ return data ? data->current_thread : nullptr;
}
-void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+void SetCurrentThread(ThreadContextLsanBase *tctx) {
+ get_tls_val(true)->current_thread = tctx;
+}
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
@@ -161,7 +165,8 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
- InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
+ InternalMmapVector<Region> mapped_regions;
+ bool use_root_regions = flags()->use_root_regions && HasRootRegions();
RegionScanState scan_state;
while (err == KERN_SUCCESS) {
@@ -199,8 +204,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
// Recursing over the full memory map is very slow, break out
// early if we don't need the full iteration.
- if (scan_state.seen_regions == SeenRegion::All &&
- !(flags()->use_root_regions && root_regions->size() > 0)) {
+ if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) {
break;
}
@@ -211,15 +215,12 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
//
// TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
// behavior as sanitizer_procmaps_linux and traverses all memory regions
- if (flags()->use_root_regions) {
- for (uptr i = 0; i < root_regions->size(); i++) {
- ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
- info.protection & kProtectionRead);
- }
- }
+ if (use_root_regions && (info.protection & kProtectionRead))
+ mapped_regions.push_back({address, end_address});
address = end_address;
}
+ ScanRootRegions(frontier, mapped_regions);
}
// On darwin, we can intercept _exit gracefully, and return a failing exit code
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
index 03ac0afbabff..4edac9757a9c 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
@@ -46,6 +46,7 @@ struct OnStartedArgs {
};
void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
@@ -98,7 +99,7 @@ void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
OnCreatedArgs args;
args.stack_begin = reinterpret_cast<uptr>(stack_base);
args.stack_end = args.stack_begin + stack_size;
- u32 parent_tid = GetCurrentThread();
+ u32 parent_tid = GetCurrentThreadId();
u32 tid = ThreadCreate(parent_tid, detached, &args);
return reinterpret_cast<void *>(static_cast<uptr>(tid));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
index 3a1b2afdbb74..fac6133ddf21 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -197,7 +197,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
#endif // SANITIZER_INTERCEPT_PVALLOC
#if SANITIZER_INTERCEPT_CFREE
-INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAP(free));
#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
#else
#define LSAN_MAYBE_INTERCEPT_CFREE
@@ -415,16 +415,10 @@ INTERCEPTOR(char *, strerror, int errnum) {
#if SANITIZER_POSIX
-struct ThreadParam {
- void *(*callback)(void *arg);
- void *param;
- atomic_uintptr_t tid;
-};
-
-extern "C" void *__lsan_thread_start_func(void *arg) {
- ThreadParam *p = (ThreadParam*)arg;
- void* (*callback)(void *arg) = p->callback;
- void *param = p->param;
+template <bool Detached>
+static void *ThreadStartFunc(void *arg) {
+ u32 parent_tid = (uptr)arg;
+ uptr tid = ThreadCreate(parent_tid, Detached);
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
@@ -433,55 +427,103 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
-#endif
- int tid = 0;
- while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
- internal_sched_yield();
+# endif
ThreadStart(tid, GetTid());
- atomic_store(&p->tid, 0, memory_order_release);
- return callback(param);
+ auto self = GetThreadSelf();
+ auto args = GetThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ GetThreadArgRetval().Finish(self, retval);
+ return retval;
}
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
+
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+
__sanitizer_pthread_attr_t myattr;
if (!attr) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSize(attr);
- int detached = 0;
- pthread_attr_getdetachstate(attr, &detached);
- ThreadParam p;
- p.callback = callback;
- p.param = param;
- atomic_store(&p.tid, 0, memory_order_relaxed);
- int res;
+ uptr this_tid = GetCurrentThreadId();
+ int result;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
ScopedInterceptorDisabler disabler;
- res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
- }
- if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), IsStateDetached(detached));
- CHECK_NE(tid, kMainTid);
- atomic_store(&p.tid, tid, memory_order_release);
- while (atomic_load(&p.tid, memory_order_acquire) != 0)
- internal_sched_yield();
+ GetThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(
+ th, attr, detached ? ThreadStartFunc<true> : ThreadStartFunc<false>,
+ (void *)this_tid);
+ return result ? 0 : *(uptr *)(th);
+ });
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
- return res;
+ return result;
}
-INTERCEPTOR(int, pthread_join, void *t, void **arg) {
- return REAL(pthread_join)(t, arg);
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ GetThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_exit, void *retval) {
+ GetThreadArgRetval().Finish(GetThreadSelf(), retval);
+ return REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_INTERCEPT_TRYJOIN
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN INTERCEPT_FUNCTION(pthread_tryjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN
+# endif // SANITIZER_INTERCEPT_TRYJOIN
+
+# if SANITIZER_INTERCEPT_TIMEDJOIN
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
}
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN \
+ INTERCEPT_FUNCTION(pthread_timedjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN
+# endif // SANITIZER_INTERCEPT_TIMEDJOIN
DEFINE_REAL_PTHREAD_FUNCTIONS
@@ -491,6 +533,7 @@ INTERCEPTOR(void, _exit, int status) {
}
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_LSAN_INITED
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
#endif // SANITIZER_POSIX
@@ -518,6 +561,10 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+ LSAN_MAYBE_INTERCEPT_TIMEDJOIN;
+ LSAN_MAYBE_INTERCEPT_TRYJOIN;
INTERCEPT_FUNCTION(_exit);
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
index 47c2f21b5a6b..5074cee1296a 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
@@ -14,13 +14,14 @@
#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
-#include "lsan_allocator.h"
+# include "lsan_allocator.h"
+# include "lsan_thread.h"
namespace __lsan {
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
-u32 GetCurrentThread() { return current_thread_tid; }
-void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+static THREADLOCAL ThreadContextLsanBase *current_thread = nullptr;
+ThreadContextLsanBase *GetCurrentThread() { return current_thread; }
+void SetCurrentThread(ThreadContextLsanBase *tctx) { current_thread = tctx; }
static THREADLOCAL AllocatorCache allocator_cache;
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
index 6964a9ba28df..990954a8b687 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
@@ -67,10 +67,9 @@ typedef struct {
ALWAYS_INLINE
void lsan_register_worker_thread(int parent_tid) {
- if (GetCurrentThread() == kInvalidTid) {
+ if (GetCurrentThreadId() == kInvalidTid) {
u32 tid = ThreadCreate(parent_tid, true);
ThreadStart(tid, GetTid());
- SetCurrentThread(tid);
}
}
@@ -81,7 +80,7 @@ extern "C" void lsan_dispatch_call_block_and_release(void *block) {
VReport(2,
"lsan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
- block, pthread_self());
+ block, (void*)pthread_self());
lsan_register_worker_thread(context->parent_tid);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -101,7 +100,7 @@ extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
lsan_ctxt->block = ctxt;
lsan_ctxt->func = func;
- lsan_ctxt->parent_tid = GetCurrentThread();
+ lsan_ctxt->parent_tid = GetCurrentThreadId();
return lsan_ctxt;
}
@@ -146,13 +145,13 @@ void dispatch_source_set_event_handler(dispatch_source_t ds,
void (^work)(void));
}
-#define GET_LSAN_BLOCK(work) \
- void (^lsan_block)(void); \
- int parent_tid = GetCurrentThread(); \
- lsan_block = ^(void) { \
- lsan_register_worker_thread(parent_tid); \
- work(); \
- }
+# define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThreadId(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
GET_LSAN_BLOCK(work);
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
index 3c7bc15a851a..d99e1cc0105e 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
@@ -35,6 +35,7 @@ struct OnStartedArgs {
};
void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
stack_begin_ = args->stack_begin;
stack_end_ = args->stack_end;
@@ -88,7 +89,7 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
}
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
- HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ HandleDeadlySignal(siginfo, context, GetCurrentThreadId(), &OnStackUnwind,
nullptr);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
index 137c7e4e4f12..8aa3111eecf7 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
@@ -24,24 +24,41 @@
namespace __lsan {
static ThreadRegistry *thread_registry;
+static ThreadArgRetval *thread_arg_retval;
+
+static Mutex mu_for_thread_context;
+static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *CreateThreadContext(u32 tid) {
- void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
- return new (mem) ThreadContext(tid);
+ Lock lock(&mu_for_thread_context);
+ return new (allocator_for_thread_context) ThreadContext(tid);
}
-void InitializeThreadRegistry() {
- static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+void InitializeThreads() {
+ static ALIGNED(alignof(
+ ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry =
new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
+
+ static ALIGNED(alignof(ThreadArgRetval)) char
+ thread_arg_retval_placeholder[sizeof(ThreadArgRetval)];
+ thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval();
}
+ThreadArgRetval &GetThreadArgRetval() { return *thread_arg_retval; }
+
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
: ThreadContextBase(tid) {}
+void ThreadContextLsanBase::OnStarted(void *arg) {
+ SetCurrentThread(this);
+ AllocatorThreadStart();
+}
+
void ThreadContextLsanBase::OnFinished() {
AllocatorThreadFinish();
DTLS_Destroy();
+ SetCurrentThread(nullptr);
}
u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
@@ -51,26 +68,13 @@ u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
ThreadType thread_type, void *arg) {
thread_registry->StartThread(tid, os_id, thread_type, arg);
- SetCurrentThread(tid);
-}
-
-void ThreadFinish() {
- thread_registry->FinishThread(GetCurrentThread());
- SetCurrentThread(kInvalidTid);
}
-ThreadContext *CurrentThreadContext() {
- if (!thread_registry)
- return nullptr;
- if (GetCurrentThread() == kInvalidTid)
- return nullptr;
- // No lock needed when getting current thread.
- return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
-}
+void ThreadFinish() { thread_registry->FinishThread(GetCurrentThreadId()); }
void EnsureMainThreadIDIsCorrect() {
- if (GetCurrentThread() == kMainTid)
- CurrentThreadContext()->os_id = GetTid();
+ if (GetCurrentThreadId() == kMainTid)
+ GetCurrentThread()->os_id = GetTid();
}
///// Interface to the common LSan module. /////
@@ -79,9 +83,15 @@ void GetThreadExtraStackRangesLocked(tid_t os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
-void LockThreadRegistry() { thread_registry->Lock(); }
+void LockThreads() {
+ thread_registry->Lock();
+ thread_arg_retval->Lock();
+}
-void UnlockThreadRegistry() { thread_registry->Unlock(); }
+void UnlockThreads() {
+ thread_arg_retval->Unlock();
+ thread_registry->Unlock();
+}
ThreadRegistry *GetLsanThreadRegistryLocked() {
thread_registry->CheckLocked();
@@ -99,4 +109,8 @@ void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
threads);
}
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ GetThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
index 049c7e203801..222066ee93cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
@@ -14,6 +14,7 @@
#ifndef LSAN_THREAD_H
#define LSAN_THREAD_H
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __lsan {
@@ -21,6 +22,7 @@ namespace __lsan {
class ThreadContextLsanBase : public ThreadContextBase {
public:
explicit ThreadContextLsanBase(int tid);
+ void OnStarted(void *arg) override;
void OnFinished() override;
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
@@ -42,17 +44,21 @@ class ThreadContextLsanBase : public ThreadContextBase {
// This subclass of ThreadContextLsanBase is declared in an OS-specific header.
class ThreadContext;
-void InitializeThreadRegistry();
+void InitializeThreads();
void InitializeMainThread();
ThreadRegistry *GetLsanThreadRegistryLocked();
+ThreadArgRetval &GetThreadArgRetval();
u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
void ThreadFinish();
-u32 GetCurrentThread();
-void SetCurrentThread(u32 tid);
-ThreadContext *CurrentThreadContext();
+ThreadContextLsanBase *GetCurrentThread();
+inline u32 GetCurrentThreadId() {
+ ThreadContextLsanBase *ctx = GetCurrentThread();
+ return ctx ? ctx->tid : kInvalidTid;
+}
+void SetCurrentThread(ThreadContextLsanBase *tctx);
void EnsureMainThreadIDIsCorrect();
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
index c21e4e8a5694..efdfa5ad04a6 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -23,11 +23,12 @@
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include <sched.h>
@@ -75,7 +76,7 @@ static int GetCpuId(void) {
// _memprof_preinit is called via the preinit_array, which subsequently calls
// malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
// will seg fault as the address of __vdso_getcpu will be null.
- if (!memprof_init_done)
+ if (!memprof_inited)
return -1;
return sched_getcpu();
}
@@ -189,6 +190,7 @@ void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
+
void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
@@ -295,8 +297,10 @@ struct Allocator {
// memprof_rawprofile.h.
char *Buffer = nullptr;
- MemoryMappingLayout Layout(/*cache_enabled=*/true);
- u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer);
+ __sanitizer::ListOfModules List;
+ List.init();
+ ArrayRef<LoadedModule> Modules(List.begin(), List.end());
+ u64 BytesSerialized = SerializeToRawProfile(MIBMap, Modules, Buffer);
CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
report_file.Write(Buffer, BytesSerialized);
}
@@ -445,8 +449,7 @@ struct Allocator {
u64 user_requested_size =
atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
- if (memprof_inited && memprof_init_done &&
- atomic_load_relaxed(&constructed) &&
+ if (memprof_inited && atomic_load_relaxed(&constructed) &&
!atomic_load_relaxed(&destructing)) {
u64 c = GetShadowCount(p, user_requested_size);
long curtime = GetTimestamp();
@@ -554,6 +557,10 @@ struct Allocator {
return user_requested_size;
}
+ uptr AllocationSizeFast(uptr p) {
+ return reinterpret_cast<MemprofChunk *>(p - kChunkHeaderSize)->UsedSize();
+ }
+
void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
void PrintStats() { allocator.PrintStats(); }
@@ -680,6 +687,18 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
+static const void *memprof_malloc_begin(const void *p) {
+ u64 user_requested_size;
+ MemprofChunk *m =
+ instance.GetMemprofChunkByAddr((uptr)p, user_requested_size);
+ if (!m)
+ return nullptr;
+ if (user_requested_size == 0)
+ return nullptr;
+
+ return (const void *)m->Beg();
+}
+
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
if (!ptr)
return 0;
@@ -698,10 +717,21 @@ int __sanitizer_get_ownership(const void *p) {
return memprof_malloc_usable_size(p, 0, 0) != 0;
}
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return memprof_malloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return memprof_malloc_usable_size(p, 0, 0);
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
int __memprof_profile_dump() {
instance.FinishAndWrite();
// In the future we may want to return non-zero if there are any errors
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
index 001502cde08a..14c61c7325e3 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
@@ -39,6 +39,10 @@ void InitializeAllocator();
struct MemprofMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ OnMap(p, size);
+ }
void OnUnmap(uptr p, uptr size) const;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
index 459ad03e8dfe..8925ec5bbaa3 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
@@ -52,11 +52,6 @@ using namespace __memprof;
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
-#define MEMPROF_INTERCEPTOR_ENTER(ctx, func) \
- ctx = 0; \
- (void)ctx;
-
-#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
MEMPROF_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
@@ -105,24 +100,6 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
*begin = *end = 0; \
}
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memmove); \
- MEMPROF_MEMMOVE_IMPL(to, from, size); \
- } while (false)
-
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memcpy); \
- MEMPROF_MEMCPY_IMPL(to, from, size); \
- } while (false)
-
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memset); \
- MEMPROF_MEMSET_IMPL(block, c, size); \
- } while (false)
-
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) MEMPROF_READ_RANGE(p, s)
@@ -192,7 +169,7 @@ INTERCEPTOR(int, pthread_join, void *t, void **arg) {
DEFINE_REAL_PTHREAD_FUNCTIONS
INTERCEPTOR(char *, index, const char *string, int c)
-ALIAS(WRAPPER_NAME(strchr));
+ALIAS(WRAP(strchr));
// For both strcat() and strncat() we need to check the validity of |to|
// argument irrespective of the |from| length.
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
index 879a1e1061e5..20edef42a515 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
@@ -57,4 +57,10 @@ DECLARE_REAL(char *, strstr, const char *s1, const char *s2)
ver, #name); \
} while (0)
+#define MEMPROF_INTERCEPTOR_ENTER(ctx, func) \
+ ctx = 0; \
+ (void)ctx;
+
+#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
+
#endif // MEMPROF_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
index 4eb409362b57..56bd11614d6a 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
@@ -11,11 +11,74 @@
// MemProf versions of memcpy, memmove, and memset.
//===---------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "memprof_interceptors_memintrinsics.h"
+
+#include "memprof_interceptors.h"
#include "memprof_stack.h"
using namespace __memprof;
+// memcpy is called during __memprof_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define MEMPROF_MEMCPY_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memcpy(to, from, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define MEMPROF_MEMSET_IMPL(block, c, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memset(block, c, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_WRITE_RANGE(block, size); \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define MEMPROF_MEMMOVE_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memmove(to, from, size); \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memmove); \
+ MEMPROF_MEMMOVE_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memcpy); \
+ MEMPROF_MEMCPY_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memset); \
+ MEMPROF_MEMSET_IMPL(block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
void *__memprof_memcpy(void *to, const void *from, uptr size) {
MEMPROF_MEMCPY_IMPL(to, from, size);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
index 348461d55c41..0b87a6f3522a 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
@@ -32,45 +32,6 @@ namespace __memprof {
__memprof_record_access_range(offset, size); \
} while (0)
-// memcpy is called during __memprof_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define MEMPROF_MEMCPY_IMPL(to, from, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memcpy(to, from, size); \
- if (memprof_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_READ_RANGE(from, size); \
- MEMPROF_WRITE_RANGE(to, size); \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-// memset is called inside Printf.
-#define MEMPROF_MEMSET_IMPL(block, c, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memset(block, c, size); \
- if (memprof_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_WRITE_RANGE(block, size); \
- return REAL(memset)(block, c, size); \
- } while (0)
-
-#define MEMPROF_MEMMOVE_IMPL(to, from, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memmove(to, from, size); \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_READ_RANGE(from, size); \
- MEMPROF_WRITE_RANGE(to, size); \
- return internal_memmove(to, from, size); \
- } while (0)
-
#define MEMPROF_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
#define MEMPROF_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
index bba465e60d82..990e62ce1a55 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
@@ -76,7 +76,6 @@ void *MemprofDlSymNext(const char *sym);
extern int memprof_inited;
extern int memprof_timestamp_inited;
-extern int memprof_init_done;
// Used to avoid infinite recursion in __memprof_init().
extern bool memprof_init_is_running;
extern void (*death_callback)(void);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp
index f065e8dbcabc..fa92fa0e4b53 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp
@@ -5,6 +5,7 @@
#include "memprof_rawprofile.h"
#include "profile/MemProfData.inc"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -20,7 +21,7 @@ using SegmentEntry = ::llvm::memprof::SegmentEntry;
using Header = ::llvm::memprof::Header;
namespace {
-template <class T> char *WriteBytes(T Pod, char *&Buffer) {
+template <class T> char *WriteBytes(const T &Pod, char *Buffer) {
*(T *)Buffer = Pod;
return Buffer + sizeof(T);
}
@@ -33,12 +34,14 @@ void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
}
} // namespace
-u64 SegmentSizeBytes(MemoryMappingLayoutBase &Layout) {
+u64 SegmentSizeBytes(ArrayRef<LoadedModule> Modules) {
u64 NumSegmentsToRecord = 0;
- MemoryMappedSegment segment;
- for (Layout.Reset(); Layout.Next(&segment);)
- if (segment.IsReadable() && segment.IsExecutable())
- NumSegmentsToRecord++;
+ for (const auto &Module : Modules) {
+ for (const auto &Segment : Module.ranges()) {
+ if (Segment.executable)
+ NumSegmentsToRecord++;
+ }
+ }
return sizeof(u64) // A header which stores the number of records.
+ sizeof(SegmentEntry) * NumSegmentsToRecord;
@@ -51,28 +54,31 @@ u64 SegmentSizeBytes(MemoryMappingLayoutBase &Layout) {
// Start
// End
// Offset
-// BuildID 32B
+// UuidSize
+// Uuid 32B
// ----------
// ...
-void SerializeSegmentsToBuffer(MemoryMappingLayoutBase &Layout,
+void SerializeSegmentsToBuffer(ArrayRef<LoadedModule> Modules,
const u64 ExpectedNumBytes, char *&Buffer) {
char *Ptr = Buffer;
// Reserve space for the final count.
Ptr += sizeof(u64);
u64 NumSegmentsRecorded = 0;
- MemoryMappedSegment segment;
-
- for (Layout.Reset(); Layout.Next(&segment);) {
- if (segment.IsReadable() && segment.IsExecutable()) {
- // TODO: Record segment.uuid when it is implemented for Linux-Elf.
- SegmentEntry Entry(segment.start, segment.end, segment.offset);
- memcpy(Ptr, &Entry, sizeof(SegmentEntry));
- Ptr += sizeof(SegmentEntry);
- NumSegmentsRecorded++;
+
+ for (const auto &Module : Modules) {
+ for (const auto &Segment : Module.ranges()) {
+ if (Segment.executable) {
+ SegmentEntry Entry(Segment.beg, Segment.end, Module.base_address());
+ CHECK(Module.uuid_size() <= MEMPROF_BUILDID_MAX_SIZE);
+ Entry.BuildIdSize = Module.uuid_size();
+ memcpy(Entry.BuildId, Module.uuid(), Module.uuid_size());
+ memcpy(Ptr, &Entry, sizeof(SegmentEntry));
+ Ptr += sizeof(SegmentEntry);
+ NumSegmentsRecorded++;
+ }
}
}
-
// Store the number of segments we recorded in the space we reserved.
*((u64 *)Buffer) = NumSegmentsRecorded;
CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
@@ -198,11 +204,11 @@ void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
// ----------
// Optional Padding Bytes
// ...
-u64 SerializeToRawProfile(MIBMapTy &MIBMap, MemoryMappingLayoutBase &Layout,
+u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules,
char *&Buffer) {
// Each section size is rounded up to 8b since the first entry in each section
// is a u64 which holds the number of entries in the section by convention.
- const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Layout), 8);
+ const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Modules), 8);
Vector<u64> StackIds;
MIBMap.ForEach(RecordStackId, reinterpret_cast<void *>(&StackIds));
@@ -232,7 +238,7 @@ u64 SerializeToRawProfile(MIBMapTy &MIBMap, MemoryMappingLayoutBase &Layout,
sizeof(Header) + NumSegmentBytes + NumMIBInfoBytes};
Ptr = WriteBytes(header, Ptr);
- SerializeSegmentsToBuffer(Layout, NumSegmentBytes, Ptr);
+ SerializeSegmentsToBuffer(Modules, NumSegmentBytes, Ptr);
Ptr += NumSegmentBytes;
SerializeMIBInfoToBuffer(MIBMap, StackIds, NumMIBInfoBytes, Ptr);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h
index 575104e7e34e..e2494175f165 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h
@@ -2,12 +2,13 @@
#define MEMPROF_RAWPROFILE_H_
#include "memprof_mibmap.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
namespace __memprof {
// Serialize the in-memory representation of the memprof profile to the raw
// binary format. The format itself is documented memprof_rawprofile.cpp.
-u64 SerializeToRawProfile(MIBMapTy &BlockCache, MemoryMappingLayoutBase &Layout,
+u64 SerializeToRawProfile(MIBMapTy &BlockCache, ArrayRef<LoadedModule> Modules,
char *&Buffer);
} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
index d30b80304f6b..5e2e7bc2be3f 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -65,7 +65,6 @@ static void CheckUnwind() {
// -------------------------- Globals --------------------- {{{1
int memprof_inited;
-int memprof_init_done;
bool memprof_init_is_running;
int memprof_timestamp_inited;
long memprof_init_timestamp_s;
@@ -195,11 +194,6 @@ static void MemprofInitInternal() {
InitializeAllocator();
- // On Linux MemprofThread::ThreadStart() calls malloc() that's why
- // memprof_inited should be set to 1 prior to initializing the threads.
- memprof_inited = 1;
- memprof_init_is_running = false;
-
if (flags()->atexit)
Atexit(memprof_atexit);
@@ -218,7 +212,8 @@ static void MemprofInitInternal() {
VReport(1, "MemProfiler Init done\n");
- memprof_init_done = 1;
+ memprof_init_is_running = false;
+ memprof_inited = 1;
}
void MemprofInitTime() {
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp
index 7f6398d1cf86..c5dfdca890be 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp
@@ -4,6 +4,7 @@
#include <memory>
#include "profile/MemProfData.inc"
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -15,27 +16,14 @@ namespace {
using ::__memprof::MIBMapTy;
using ::__memprof::SerializeToRawProfile;
-using ::__sanitizer::MemoryMappedSegment;
-using ::__sanitizer::MemoryMappingLayoutBase;
using ::__sanitizer::StackDepotPut;
using ::__sanitizer::StackTrace;
using ::llvm::memprof::MemInfoBlock;
-using ::testing::_;
-using ::testing::Action;
-using ::testing::DoAll;
-using ::testing::Return;
-using ::testing::SetArgPointee;
-
-class MockMemoryMappingLayout final : public MemoryMappingLayoutBase {
-public:
- MOCK_METHOD(bool, Next, (MemoryMappedSegment *), (override));
- MOCK_METHOD(void, Reset, (), (override));
-};
-
-uint64_t PopulateFakeMap(const MemInfoBlock &FakeMIB, uint64_t StackPCBegin,
+
+uint64_t PopulateFakeMap(const MemInfoBlock &FakeMIB, uintptr_t StackPCBegin,
MIBMapTy &FakeMap) {
constexpr int kSize = 5;
- uint64_t array[kSize];
+ uintptr_t array[kSize];
for (int i = 0; i < kSize; i++) {
array[i] = StackPCBegin + i;
}
@@ -56,26 +44,13 @@ template <class T = uint64_t> T Read(char *&Buffer) {
}
TEST(MemProf, Basic) {
- MockMemoryMappingLayout Layout;
- MemoryMappedSegment FakeSegment;
- memset(&FakeSegment, 0, sizeof(FakeSegment));
- FakeSegment.start = 0x10;
- FakeSegment.end = 0x20;
- FakeSegment.offset = 0x10;
- uint8_t uuid[__sanitizer::kModuleUUIDSize] = {0xC, 0x0, 0xF, 0xF, 0xE, 0xE};
- memcpy(FakeSegment.uuid, uuid, __sanitizer::kModuleUUIDSize);
- FakeSegment.protection =
- __sanitizer::kProtectionExecute | __sanitizer::kProtectionRead;
-
- const Action<bool(MemoryMappedSegment *)> SetSegment =
- DoAll(SetArgPointee<0>(FakeSegment), Return(true));
- EXPECT_CALL(Layout, Next(_))
- .WillOnce(SetSegment)
- .WillOnce(Return(false))
- .WillOnce(SetSegment)
- .WillRepeatedly(Return(false));
-
- EXPECT_CALL(Layout, Reset).Times(2);
+ __sanitizer::LoadedModule FakeModule;
+ FakeModule.addAddressRange(/*begin=*/0x10, /*end=*/0x20, /*executable=*/true,
+ /*writable=*/false, /*name=*/"");
+ const char uuid[MEMPROF_BUILDID_MAX_SIZE] = {0xC, 0x0, 0xF, 0xF, 0xE, 0xE};
+ FakeModule.setUuid(uuid, MEMPROF_BUILDID_MAX_SIZE);
+ __sanitizer::ArrayRef<__sanitizer::LoadedModule> Modules(&FakeModule,
+ (&FakeModule) + 1);
MIBMapTy FakeMap;
MemInfoBlock FakeMIB;
@@ -90,7 +65,7 @@ TEST(MemProf, Basic) {
FakeIds[1] = PopulateFakeMap(FakeMIB, /*StackPCBegin=*/3, FakeMap);
char *Ptr = nullptr;
- uint64_t NumBytes = SerializeToRawProfile(FakeMap, Layout, Ptr);
+ uint64_t NumBytes = SerializeToRawProfile(FakeMap, Modules, Ptr);
const char *Buffer = Ptr;
ASSERT_GT(NumBytes, 0ULL);
@@ -111,16 +86,16 @@ TEST(MemProf, Basic) {
// Should be equal to the size of the raw profile header.
EXPECT_EQ(SegmentOffset, 48ULL);
- // We expect only 1 segment entry, 8b for the count and 56b for SegmentEntry
+ // We expect only 1 segment entry, 8b for the count and 64b for SegmentEntry
// in memprof_rawprofile.cpp.
- EXPECT_EQ(MIBOffset - SegmentOffset, 64ULL);
+ EXPECT_EQ(MIBOffset - SegmentOffset, 72ULL);
- EXPECT_EQ(MIBOffset, 112ULL);
+ EXPECT_EQ(MIBOffset, 120ULL);
// We expect 2 mib entry, 8b for the count and sizeof(uint64_t) +
// sizeof(MemInfoBlock) contains stack id + MeminfoBlock.
EXPECT_EQ(StackOffset - MIBOffset, 8 + 2 * (8 + sizeof(MemInfoBlock)));
- EXPECT_EQ(StackOffset, 336ULL);
+ EXPECT_EQ(StackOffset, 408ULL);
// We expect 2 stack entries, with 5 frames - 8b for total count,
// 2 * (8b for id, 8b for frame count and 5*8b for fake frames).
// Since this is the last section, there may be additional padding at the end
@@ -128,16 +103,15 @@ TEST(MemProf, Basic) {
EXPECT_GE(TotalSize - StackOffset, 8ULL + 2 * (8 + 8 + 5 * 8));
// ============= Check contents.
- // The Uuid field is not yet populated on Linux-Elf by the sanitizer procmaps
- // library, so we expect it to be filled with 0 for now.
- unsigned char ExpectedSegmentBytes[64] = {
- 0x01, 0, 0, 0, 0, 0, 0, 0, // Number of entries
- 0x10, 0, 0, 0, 0, 0, 0, 0, // Start
- 0x20, 0, 0, 0, 0, 0, 0, 0, // End
- 0x10, 0, 0, 0, 0, 0, 0, 0, // Offset
- 0x0, // Uuid
+ unsigned char ExpectedSegmentBytes[72] = {
+ 0x01, 0, 0, 0, 0, 0, 0, 0, // Number of entries
+ 0x10, 0, 0, 0, 0, 0, 0, 0, // Start
+ 0x20, 0, 0, 0, 0, 0, 0, 0, // End
+ 0x0, 0, 0, 0, 0, 0, 0, 0, // Offset
+ 0x20, 0, 0, 0, 0, 0, 0, 0, // UuidSize
+ 0xC, 0x0, 0xF, 0xF, 0xE, 0xE // Uuid
};
- EXPECT_EQ(memcmp(Buffer + SegmentOffset, ExpectedSegmentBytes, 64), 0);
+ EXPECT_EQ(memcmp(Buffer + SegmentOffset, ExpectedSegmentBytes, 72), 0);
// Check that the number of entries is 2.
EXPECT_EQ(*reinterpret_cast<const uint64_t *>(Buffer + MIBOffset), 2ULL);
@@ -193,5 +167,4 @@ TEST(MemProf, Basic) {
sizeof(ExpectedStackBytes[1])),
0);
}
-
} // namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
index 3e3bc3cb979f..987c5560825d 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
@@ -345,8 +345,7 @@ using namespace __msan;
#define MSAN_MAYBE_WARNING(type, size) \
void __msan_maybe_warning_##size(type s, u32 o) { \
- GET_CALLER_PC_BP_SP; \
- (void) sp; \
+ GET_CALLER_PC_BP; \
if (UNLIKELY(s)) { \
PrintWarningWithOrigin(pc, bp, o); \
if (__msan::flags()->halt_on_error) { \
@@ -365,8 +364,7 @@ MSAN_MAYBE_WARNING(u64, 8)
void __msan_maybe_store_origin_##size(type s, void *p, u32 o) { \
if (UNLIKELY(s)) { \
if (__msan_get_track_origins() > 1) { \
- GET_CALLER_PC_BP_SP; \
- (void) sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
o = ChainOrigin(o, &stack); \
} \
@@ -380,8 +378,7 @@ MSAN_MAYBE_STORE_ORIGIN(u32, 4)
MSAN_MAYBE_STORE_ORIGIN(u64, 8)
void __msan_warning() {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, 0);
if (__msan::flags()->halt_on_error) {
if (__msan::flags()->print_stats)
@@ -392,8 +389,7 @@ void __msan_warning() {
}
void __msan_warning_noreturn() {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, 0);
if (__msan::flags()->print_stats)
ReportStats();
@@ -402,8 +398,7 @@ void __msan_warning_noreturn() {
}
void __msan_warning_with_origin(u32 origin) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, origin);
if (__msan::flags()->halt_on_error) {
if (__msan::flags()->print_stats)
@@ -414,8 +409,7 @@ void __msan_warning_with_origin(u32 origin) {
}
void __msan_warning_with_origin_noreturn(u32 origin) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, origin);
if (__msan::flags()->print_stats)
ReportStats();
@@ -513,8 +507,7 @@ void __msan_set_expect_umr(int expect_umr) {
if (expect_umr) {
msan_expected_umr_found = 0;
} else if (!msan_expected_umr_found) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
ReportExpectedUMRNotFound(&stack);
Die();
@@ -562,8 +555,7 @@ void __msan_check_mem_is_initialized(const void *x, uptr size) {
if (offset < 0)
return;
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
ReportUMRInsideAddressRange(__func__, x, size, offset);
__msan::PrintWarningWithOrigin(pc, bp,
__msan_get_origin(((const char *)x) + offset));
@@ -622,8 +614,7 @@ void __msan_set_alloca_origin_no_descr(void *a, uptr size, u32 *id_ptr) {
}
u32 __msan_chain_origin(u32 id) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
GET_STORE_STACK_TRACE_PC_BP(pc, bp);
return ChainOrigin(id, &stack);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan.h b/contrib/llvm-project/compiler-rt/lib/msan/msan.h
index 5d8ea52668ab..b3a9c641b4fb 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan.h
@@ -89,6 +89,27 @@ const MappingDesc kMemoryLayout[] = {
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0xB00000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x200000000000ULL)
+#elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
+// LoongArch64 maps:
+// - 0x000000000000-0x010000000000: Program own segments
+// - 0x555500000000-0x555600000000: PIE program segments
+// - 0x7fff00000000-0x7fffffffffff: libraries segments.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
+ {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
+# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x100000000000ULL)
+
#elif SANITIZER_LINUX && SANITIZER_PPC64
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "low memory"},
@@ -269,31 +290,33 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
const int STACK_TRACE_TAG_FIELDS = STACK_TRACE_TAG_POISON + 1;
const int STACK_TRACE_TAG_VPTR = STACK_TRACE_TAG_FIELDS + 1;
-#define GET_MALLOC_STACK_TRACE \
- BufferedStackTrace stack; \
- if (__msan_get_track_origins() && msan_inited) \
- stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
- nullptr, common_flags()->fast_unwind_on_malloc, \
- common_flags()->malloc_context_size)
+#define GET_MALLOC_STACK_TRACE \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (__msan_get_track_origins() && msan_inited) { \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ common_flags()->fast_unwind_on_malloc, \
+ common_flags()->malloc_context_size); \
+ }
// For platforms which support slow unwinder only, we restrict the store context
// size to 1, basically only storing the current pc. We do this because the slow
// unwinder which is based on libunwind is not async signal safe and causes
// random freezes in forking applications as well as in signal handlers.
-#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
- BufferedStackTrace stack; \
- if (__msan_get_track_origins() > 1 && msan_inited) { \
- int size = flags()->store_context_size; \
- if (!SANITIZER_CAN_FAST_UNWIND) \
- size = Min(size, 1); \
- stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_malloc, size);\
+#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (__msan_get_track_origins() > 1 && msan_inited) { \
+ int size = flags()->store_context_size; \
+ if (!SANITIZER_CAN_FAST_UNWIND) \
+ size = Min(size, 1); \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_malloc, \
+ size); \
}
#define GET_STORE_STACK_TRACE \
GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
- BufferedStackTrace stack; \
+ UNINITIALIZED BufferedStackTrace stack; \
if (msan_inited) { \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal); \
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
index 3308ee7053a6..c3b0f8512e82 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
@@ -11,16 +11,18 @@
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
+#include "msan_allocator.h"
+
+#include "msan.h"
+#include "msan_interface_internal.h"
+#include "msan_origin.h"
+#include "msan_poisoning.h"
+#include "msan_thread.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_errno.h"
-#include "msan.h"
-#include "msan_allocator.h"
-#include "msan_origin.h"
-#include "msan_thread.h"
-#include "msan_poisoning.h"
namespace __msan {
@@ -30,6 +32,8 @@ struct Metadata {
struct MsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {}
void OnUnmap(uptr p, uptr size) const {
__msan_unpoison((void *)p, size);
@@ -78,6 +82,22 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__loongarch_lp64)
+const uptr kAllocatorSpace = 0x700000000000ULL;
+const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
#elif defined(__powerpc64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
@@ -145,8 +165,13 @@ AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
}
+void MsanThreadLocalMallocStorage::Init() {
+ allocator.InitCache(GetAllocatorCache(this));
+}
+
void MsanThreadLocalMallocStorage::CommitBack() {
allocator.SwallowCache(GetAllocatorCache(this));
+ allocator.DestroyCache(GetAllocatorCache(this));
}
static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
@@ -183,7 +208,10 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
if (zeroise) {
- __msan_clear_and_unpoison(allocated, size);
+ if (allocator.FromPrimary(allocated))
+ __msan_clear_and_unpoison(allocated, size);
+ else
+ __msan_unpoison(allocated, size); // Mem is already zeroed.
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
@@ -206,8 +234,9 @@ void MsanDeallocate(StackTrace *stack, void *p) {
uptr size = meta->requested_size;
meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
- // poisoned.
- if (flags()->poison_in_free) {
+ // poisoned. The secondary allocator will unmap and unpoison by
+ // MsanMapUnmapCallback, no need to poison it here.
+ if (flags()->poison_in_free && allocator.FromPrimary(p)) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_DEALLOC;
@@ -260,6 +289,21 @@ static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
}
+static const void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+
+ return (const void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p) return 0;
const void *beg = allocator.GetBlockBegin(p);
@@ -268,6 +312,10 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
+static uptr AllocationSizeFast(const void *p) {
+ return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
void *msan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
}
@@ -373,4 +421,17 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
index 365af4d0c4dd..364331d96406 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
@@ -20,6 +20,7 @@ namespace __msan {
struct MsanThreadLocalMallocStorage {
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+ void Init();
void CommitBack();
private:
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp
new file mode 100644
index 000000000000..4f9ba52cf47d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp
@@ -0,0 +1,65 @@
+//===-- msan_dl.cpp -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Helper functions for unpoisoning results of dladdr and dladdr1.
+//===----------------------------------------------------------------------===//
+
+#include "msan_dl.h"
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <link.h>
+
+#include "msan_poisoning.h"
+
+namespace __msan {
+
+void UnpoisonDllAddrInfo(void *info) {
+ Dl_info *ptr = (Dl_info *)(info);
+ __msan_unpoison(ptr, sizeof(*ptr));
+ if (ptr->dli_fname)
+ __msan_unpoison(ptr->dli_fname, internal_strlen(ptr->dli_fname) + 1);
+ if (ptr->dli_sname)
+ __msan_unpoison(ptr->dli_sname, internal_strlen(ptr->dli_sname) + 1);
+}
+
+#if SANITIZER_GLIBC
+void UnpoisonDllAddr1ExtraInfo(void **extra_info, int flags) {
+ if (flags == RTLD_DL_SYMENT) {
+ __msan_unpoison(extra_info, sizeof(void *));
+
+ ElfW(Sym) *s = *((ElfW(Sym) **)(extra_info));
+ __msan_unpoison(s, sizeof(ElfW(Sym)));
+ } else if (flags == RTLD_DL_LINKMAP) {
+ __msan_unpoison(extra_info, sizeof(void *));
+
+ struct link_map *map = *((struct link_map **)(extra_info));
+
+ // Walk forward
+ for (auto *ptr = map; ptr; ptr = ptr->l_next) {
+ __msan_unpoison(ptr, sizeof(struct link_map));
+ if (ptr->l_name)
+ __msan_unpoison(ptr->l_name, internal_strlen(ptr->l_name) + 1);
+ }
+
+ if (!map)
+ return;
+
+ // Walk backward
+ for (auto *ptr = map->l_prev; ptr; ptr = ptr->l_prev) {
+ __msan_unpoison(ptr, sizeof(struct link_map));
+ if (ptr->l_name)
+ __msan_unpoison(ptr->l_name, internal_strlen(ptr->l_name) + 1);
+ }
+ }
+}
+#endif
+
+} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h
new file mode 100644
index 000000000000..c0105ac3c600
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h
@@ -0,0 +1,30 @@
+//===-- msan_dl.h ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Helper functions for unpoisoning results of dladdr and dladdr1.
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_DL_H
+#define MSAN_DL_H
+
+#include "msan.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+void UnpoisonDllAddrInfo(void *info);
+
+#if SANITIZER_GLIBC
+void UnpoisonDllAddr1ExtraInfo(void **extra_info, int flags);
+#endif
+
+} // namespace __msan
+
+#endif // MSAN_DL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
index 058c10a19424..f5e0d3cb9a67 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -14,9 +14,12 @@
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "interception/interception.h"
#include "msan.h"
#include "msan_chained_origin_depot.h"
+#include "msan_dl.h"
#include "msan_origin.h"
#include "msan_poisoning.h"
#include "msan_report.h"
@@ -93,8 +96,7 @@ struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
if (__msan::IsInSymbolizerOrUnwider()) \
break; \
if (__offset >= 0 && __msan::flags()->report_umrs) { \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
ReportUMRInsideAddressRange(__func__, x, n, __offset); \
__msan::PrintWarningWithOrigin( \
pc, bp, __msan_get_origin((const char *)x + __offset)); \
@@ -1110,17 +1112,35 @@ INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key,
#if SANITIZER_NETBSD
INTERCEPTOR(int, __libc_thr_keycreate, __sanitizer_pthread_key_t *m,
void (*dtor)(void *value))
-ALIAS(WRAPPER_NAME(pthread_key_create));
+ALIAS(WRAP(pthread_key_create));
#endif
-INTERCEPTOR(int, pthread_join, void *th, void **retval) {
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(pthread_join)(thread, retval);
+ if (!res && retval)
+ __msan_unpoison(retval, sizeof(*retval));
+ return res;
+}
+
+#if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **retval) {
ENSURE_MSAN_INITED();
- int res = REAL(pthread_join)(th, retval);
+ int res = REAL(pthread_tryjoin_np)(thread, retval);
if (!res && retval)
__msan_unpoison(retval, sizeof(*retval));
return res;
}
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **retval,
+ const struct timespec *abstime) {
+ int res = REAL(pthread_timedjoin_np)(thread, retval, abstime);
+ if (!res && retval)
+ __msan_unpoison(retval, sizeof(*retval));
+ return res;
+}
+#endif
+
DEFINE_REAL_PTHREAD_FUNCTIONS
extern char *tzname[2];
@@ -1404,6 +1424,7 @@ int OnExit() {
} while (false)
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
#include "sanitizer_common/sanitizer_common_interceptors.inc"
static uptr signal_impl(int signo, uptr cb);
@@ -1420,6 +1441,8 @@ static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
return REAL(func)(signo, handler); \
}
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_MSAN_INITED()
+
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
@@ -1500,26 +1523,31 @@ INTERCEPTOR(const char *, strsignal, int sig) {
return res;
}
-struct dlinfo {
- char *dli_fname;
- void *dli_fbase;
- char *dli_sname;
- void *dli_saddr;
-};
-
-INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) {
+INTERCEPTOR(int, dladdr, void *addr, void *info) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, dladdr, addr, info);
int res = REAL(dladdr)(addr, info);
+ if (res != 0)
+ UnpoisonDllAddrInfo(info);
+ return res;
+}
+
+#if SANITIZER_GLIBC
+INTERCEPTOR(int, dladdr1, void *addr, void *info, void **extra_info,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, dladdr1, addr, info, extra_info, flags);
+ int res = REAL(dladdr1)(addr, info, extra_info, flags);
if (res != 0) {
- __msan_unpoison(info, sizeof(*info));
- if (info->dli_fname)
- __msan_unpoison(info->dli_fname, internal_strlen(info->dli_fname) + 1);
- if (info->dli_sname)
- __msan_unpoison(info->dli_sname, internal_strlen(info->dli_sname) + 1);
+ UnpoisonDllAddrInfo(info);
+ UnpoisonDllAddr1ExtraInfo(extra_info, flags);
}
return res;
}
+# define MSAN_MAYBE_INTERCEPT_DLADDR1 MSAN_INTERCEPT_FUNC(dladdr1)
+#else
+#define MSAN_MAYBE_INTERCEPT_DLADDR1
+#endif
INTERCEPTOR(char *, dlerror, int fake) {
void *ctx;
@@ -1768,6 +1796,7 @@ void InitializeInterceptors() {
MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;
INTERCEPT_FUNCTION(strsignal);
INTERCEPT_FUNCTION(dladdr);
+ MSAN_MAYBE_INTERCEPT_DLADDR1;
INTERCEPT_FUNCTION(dlerror);
INTERCEPT_FUNCTION(dl_iterate_phdr);
INTERCEPT_FUNCTION(getrusage);
@@ -1778,6 +1807,10 @@ void InitializeInterceptors() {
#endif
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(pthread_key_create);
+#if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+#endif
#if SANITIZER_NETBSD
INTERCEPT_FUNCTION(__libc_thr_keycreate);
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
index af01aa69f78f..1889e980bfc0 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
@@ -216,6 +216,13 @@ void SetShadow(const void *ptr, uptr size, u8 value) {
}
if (!MmapFixedSuperNoReserve(page_beg, page_end - page_beg))
Die();
+
+ if (__msan_get_track_origins()) {
+ // No need to set origin for zero shadow, but we can release pages.
+ uptr origin_beg = RoundUpTo(MEM_TO_ORIGIN(ptr), PageSize);
+ if (!MmapFixedSuperNoReserve(origin_beg, page_end - page_beg))
+ Die();
+ }
}
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
index d1ef36d9a322..90164e50ca3a 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
@@ -11,16 +11,18 @@
// Error reporting.
//===----------------------------------------------------------------------===//
+#include "msan_report.h"
+
#include "msan.h"
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
-#include "msan_report.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __sanitizer;
@@ -265,12 +267,13 @@ void DescribeMemoryRange(const void *x, uptr size) {
}
}
-void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
- uptr offset) {
+void ReportUMRInsideAddressRange(const char *function, const void *start,
+ uptr size, uptr offset) {
+ function = StripFunctionName(function);
Decorator d;
Printf("%s", d.Warning());
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
- d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
+ d.Warning(), d.Name(), function, d.Warning(), offset, start, size,
d.Default());
if (__sanitizer::Verbosity())
DescribeMemoryRange(start, size);
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
index 0965b8cb6813..2ad81c37cf60 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
@@ -25,8 +25,8 @@ void ReportExpectedUMRNotFound(StackTrace *stack);
void ReportStats();
void ReportAtExitStatistics();
void DescribeMemoryRange(const void *x, uptr size);
-void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
- uptr offset);
+void ReportUMRInsideAddressRange(const char *function, const void *start,
+ uptr size, uptr offset);
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
index 40ad6a5019c4..ff9b90bb81f0 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
@@ -47,6 +47,7 @@ void MsanThread::Init() {
CHECK(MEM_IS_APP(stack_.bottom));
CHECK(MEM_IS_APP(stack_.top - 1));
ClearShadowForThreadStackAndTLS();
+ malloc_storage().Init();
}
void MsanThread::TSDDtor(void *tsd) {
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp
index 83ce07b1bcf8..78c86c855dcb 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp
@@ -595,19 +595,19 @@ void *COFFPlatformRuntimeState::findJITDylibBaseByPC(uint64_t PC) {
return Range.Header;
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_platform_bootstrap(char *ArgData, size_t ArgSize) {
COFFPlatformRuntimeState::initialize();
return WrapperFunctionResult().release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_platform_shutdown(char *ArgData, size_t ArgSize) {
COFFPlatformRuntimeState::destroy();
return WrapperFunctionResult().release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_register_jitdylib(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSString, SPSExecutorAddr)>::handle(
ArgData, ArgSize,
@@ -618,7 +618,7 @@ __orc_rt_coff_register_jitdylib(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_deregister_jitdylib(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr)>::handle(
ArgData, ArgSize,
@@ -629,7 +629,7 @@ __orc_rt_coff_deregister_jitdylib(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_register_object_sections(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr, SPSCOFFObjectSectionsMap,
bool)>::
@@ -644,7 +644,7 @@ __orc_rt_coff_register_object_sections(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_coff_deregister_object_sections(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr, SPSCOFFObjectSectionsMap)>::
handle(ArgData, ArgSize,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/common.h b/contrib/llvm-project/compiler-rt/lib/orc/common.h
index 5e01feee759b..73c5c4a2bd8d 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/common.h
@@ -41,7 +41,7 @@ ORC_RT_IMPORT __orc_rt_Opaque __orc_rt_jit_dispatch_ctx ORC_RT_WEAK_IMPORT;
/// This is declared for use by the runtime, but should be implemented in the
/// executor or provided by a definition added to the JIT before the runtime
/// is loaded.
-ORC_RT_IMPORT __orc_rt_CWrapperFunctionResult
+ORC_RT_IMPORT orc_rt_CWrapperFunctionResult
__orc_rt_jit_dispatch(__orc_rt_Opaque *DispatchCtx, const void *FnTag,
const char *Data, size_t Size) ORC_RT_WEAK_IMPORT;
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/debug.h b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
index 4605d441c7cb..a0bc653d032e 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/debug.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
@@ -53,4 +53,4 @@ void printdbg(const char *format, ...);
#define ORC_RT_DEBUG(X) ORC_RT_DEBUG_WITH_TYPE(DEBUG_TYPE, X)
-#endif // ORC_RT_COMMON_H
+#endif // ORC_RT_DEBUG_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp b/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp
index c513aae97bb3..ece63da2cb48 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp
@@ -22,7 +22,7 @@ extern "C" const char *__orc_rt_jit_dlerror();
extern "C" void *__orc_rt_jit_dlopen(const char *path, int mode);
extern "C" int __orc_rt_jit_dlclose(void *dso_handle);
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_jit_dlerror_wrapper(const char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSString()>::handle(
ArgData, ArgSize,
@@ -30,7 +30,7 @@ __orc_rt_jit_dlerror_wrapper(const char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_jit_dlopen_wrapper(const char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSExecutorAddr(SPSString, int32_t)>::handle(
ArgData, ArgSize,
@@ -41,7 +41,7 @@ __orc_rt_jit_dlopen_wrapper(const char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_jit_dlclose_wrapper(const char *ArgData, size_t ArgSize) {
return WrapperFunction<int32_t(SPSExecutorAddr)>::handle(
ArgData, ArgSize,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp
index 771e21d72e2d..c087e71038f9 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp
@@ -12,6 +12,7 @@
#include "elfnix_platform.h"
#include "common.h"
+#include "compiler.h"
#include "error.h"
#include "wrapper_function_utils.h"
@@ -33,8 +34,13 @@ ORC_RT_JIT_DISPATCH_TAG(__orc_rt_elfnix_symbol_lookup_tag)
// eh-frame registration functions, made available via aliases
// installed by the Platform
-extern "C" void __orc_rt_register_eh_frame_section(const void *);
-extern "C" void __orc_rt_deregister_eh_frame_section(const void *);
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+extern "C" void
+__unw_add_dynamic_eh_frame_section(const void *) ORC_RT_WEAK_IMPORT;
+extern "C" void
+__unw_remove_dynamic_eh_frame_section(const void *) ORC_RT_WEAK_IMPORT;
namespace {
@@ -96,8 +102,7 @@ public:
static ELFNixPlatformRuntimeState &get();
static void destroy();
- ELFNixPlatformRuntimeState(void *DSOHandle)
- : PlatformJDDSOHandle(DSOHandle) {}
+ ELFNixPlatformRuntimeState(void *DSOHandle);
// Delete copy and move constructors.
ELFNixPlatformRuntimeState(const ELFNixPlatformRuntimeState &) = delete;
@@ -143,6 +148,10 @@ private:
void *PlatformJDDSOHandle;
+ // Frame registration functions:
+ void (*registerEHFrameSection)(const void *) = nullptr;
+ void (*deregisterEHFrameSection)(const void *) = nullptr;
+
// FIXME: Move to thread-state.
std::string DLFcnError;
@@ -171,11 +180,22 @@ void ELFNixPlatformRuntimeState::destroy() {
delete MOPS;
}
+ELFNixPlatformRuntimeState::ELFNixPlatformRuntimeState(void *DSOHandle)
+ : PlatformJDDSOHandle(DSOHandle) {
+ if (__unw_add_dynamic_eh_frame_section &&
+ __unw_remove_dynamic_eh_frame_section) {
+ registerEHFrameSection = __unw_add_dynamic_eh_frame_section;
+ deregisterEHFrameSection = __unw_remove_dynamic_eh_frame_section;
+ } else {
+ registerEHFrameSection = __register_frame;
+ deregisterEHFrameSection = __deregister_frame;
+ }
+}
+
Error ELFNixPlatformRuntimeState::registerObjectSections(
ELFNixPerObjectSectionsToRegister POSR) {
if (POSR.EHFrameSection.Start)
- __orc_rt_register_eh_frame_section(
- POSR.EHFrameSection.Start.toPtr<const char *>());
+ registerEHFrameSection(POSR.EHFrameSection.Start.toPtr<const char *>());
if (POSR.ThreadDataSection.Start) {
if (auto Err = registerThreadDataSection(
@@ -189,8 +209,7 @@ Error ELFNixPlatformRuntimeState::registerObjectSections(
Error ELFNixPlatformRuntimeState::deregisterObjectSections(
ELFNixPerObjectSectionsToRegister POSR) {
if (POSR.EHFrameSection.Start)
- __orc_rt_deregister_eh_frame_section(
- POSR.EHFrameSection.Start.toPtr<const char *>());
+ deregisterEHFrameSection(POSR.EHFrameSection.Start.toPtr<const char *>());
return Error::success();
}
@@ -451,7 +470,7 @@ void destroyELFNixTLVMgr(void *ELFNixTLVMgr) {
// JIT entry points
//------------------------------------------------------------------------------
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_elfnix_platform_bootstrap(char *ArgData, size_t ArgSize) {
return WrapperFunction<void(uint64_t)>::handle(
ArgData, ArgSize,
@@ -462,14 +481,14 @@ __orc_rt_elfnix_platform_bootstrap(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_elfnix_platform_shutdown(char *ArgData, size_t ArgSize) {
ELFNixPlatformRuntimeState::destroy();
return WrapperFunctionResult().release();
}
/// Wrapper function for registering metadata on a per-object basis.
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_elfnix_register_object_sections(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSELFNixPerObjectSectionsToRegister)>::
handle(ArgData, ArgSize,
@@ -481,7 +500,7 @@ __orc_rt_elfnix_register_object_sections(char *ArgData, size_t ArgSize) {
}
/// Wrapper for releasing per-object metadat.
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_elfnix_deregister_object_sections(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSELFNixPerObjectSectionsToRegister)>::
handle(ArgData, ArgSize,
@@ -517,7 +536,7 @@ ORC_RT_INTERFACE ptrdiff_t ___orc_rt_elfnix_tlsdesc_resolver_impl(
return TLVPtr - ThreadPointer;
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_elfnix_create_pthread_key(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSExpected<uint64_t>(void)>::handle(
ArgData, ArgSize,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
index 9b5b954921c1..cb248aae0666 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
@@ -36,40 +36,18 @@ using namespace __orc_rt::macho;
ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_push_initializers_tag)
ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_symbol_lookup_tag)
-// Objective-C types.
-struct objc_class;
struct objc_image_info;
-struct objc_object;
-struct objc_selector;
-
-using Class = objc_class *;
-using id = objc_object *;
-using SEL = objc_selector *;
+struct mach_header;
// Objective-C registration functions.
// These are weakly imported. If the Objective-C runtime has not been loaded
// then code containing Objective-C sections will generate an error.
-extern "C" id objc_msgSend(id, SEL, ...) ORC_RT_WEAK_IMPORT;
-extern "C" Class objc_readClassPair(Class,
- const objc_image_info *) ORC_RT_WEAK_IMPORT;
-extern "C" SEL sel_registerName(const char *) ORC_RT_WEAK_IMPORT;
-
-// Swift types.
-class ProtocolRecord;
-class ProtocolConformanceRecord;
-class TypeMetadataRecord;
-
extern "C" void
-swift_registerProtocols(const ProtocolRecord *begin,
- const ProtocolRecord *end) ORC_RT_WEAK_IMPORT;
+_objc_map_images(unsigned count, const char *const paths[],
+ const mach_header *const mhdrs[]) ORC_RT_WEAK_IMPORT;
-extern "C" void swift_registerProtocolConformances(
- const ProtocolConformanceRecord *begin,
- const ProtocolConformanceRecord *end) ORC_RT_WEAK_IMPORT;
-
-extern "C" void swift_registerTypeMetadataRecords(
- const TypeMetadataRecord *begin,
- const TypeMetadataRecord *end) ORC_RT_WEAK_IMPORT;
+extern "C" void _objc_load_image(const char *path,
+ const mach_header *mh) ORC_RT_WEAK_IMPORT;
// Libunwind prototypes.
struct unw_dynamic_unwind_sections {
@@ -290,11 +268,7 @@ private:
std::unordered_map<void *, size_t> ZeroInitRanges;
UnwindSectionsMap UnwindSections;
RecordSectionsTracker<void (*)()> ModInitsSections;
- RecordSectionsTracker<void *> ObjCClassListSections;
- RecordSectionsTracker<void *> ObjCSelRefsSections;
- RecordSectionsTracker<char> Swift5ProtocolsSections;
- RecordSectionsTracker<char> Swift5ProtocolConformancesSections;
- RecordSectionsTracker<char> Swift5TypesSections;
+ RecordSectionsTracker<char> ObjCRuntimeRegistrationObjects;
bool referenced() const {
return LinkedAgainstRefCount != 0 || DlRefCount != 0;
@@ -357,11 +331,7 @@ private:
static Error registerEHFrames(span<const char> EHFrameSection);
static Error deregisterEHFrames(span<const char> EHFrameSection);
- static Error registerObjCSelectors(JITDylibState &JDS);
- static Error registerObjCClasses(JITDylibState &JDS);
- static Error registerSwift5Protocols(JITDylibState &JDS);
- static Error registerSwift5ProtocolConformances(JITDylibState &JDS);
- static Error registerSwift5Types(JITDylibState &JDS);
+ static Error registerObjCRegistrationObjects(JITDylibState &JDS);
static Error runModInits(std::unique_lock<std::mutex> &JDStatesLock,
JITDylibState &JDS);
@@ -437,8 +407,7 @@ Error MachOPlatformRuntimeState::initialize() {
}
Error MachOPlatformRuntimeState::shutdown() {
- if (__unw_add_find_dynamic_unwind_sections &&
- __unw_remove_find_dynamic_unwind_sections) {
+ if (UseCallbackStyleUnwindInfo) {
if (__unw_remove_find_dynamic_unwind_sections(&findDynamicUnwindSections)) {
ORC_RT_DEBUG(
{ printdbg("__unw_remove_find_dynamic_unwind_sections failed.\n"); });
@@ -581,22 +550,12 @@ Error MachOPlatformRuntimeState::registerObjectPlatformSections(
JDS->DataSectionContent[KV.second.Start.toPtr<char *>()] =
std::vector<char>(S.begin(), S.end());
} else if (KV.first == "__DATA,__common") {
- // fprintf(stderr, "Adding zero-init range %llx -- %llx\n",
- // KV.second.Start.getValue(), KV.second.size());
JDS->ZeroInitRanges[KV.second.Start.toPtr<char *>()] = KV.second.size();
} else if (KV.first == "__DATA,__thread_data") {
if (auto Err = registerThreadDataSection(KV.second.toSpan<const char>()))
return Err;
- } else if (KV.first == "__DATA,__objc_selrefs")
- JDS->ObjCSelRefsSections.add(KV.second.toSpan<void *>());
- else if (KV.first == "__DATA,__objc_classlist")
- JDS->ObjCClassListSections.add(KV.second.toSpan<void *>());
- else if (KV.first == "__TEXT,__swift5_protos")
- JDS->Swift5ProtocolsSections.add(KV.second.toSpan<char>());
- else if (KV.first == "__TEXT,__swift5_proto")
- JDS->Swift5ProtocolConformancesSections.add(KV.second.toSpan<char>());
- else if (KV.first == "__TEXT,__swift5_types")
- JDS->Swift5TypesSections.add(KV.second.toSpan<char>());
+ } else if (KV.first == "__llvm_jitlink_ObjCRuntimeRegistrationObject")
+ JDS->ObjCRuntimeRegistrationObjects.add(KV.second.toSpan<char>());
else if (KV.first == "__DATA,__mod_init_func")
JDS->ModInitsSections.add(KV.second.toSpan<void (*)()>());
else {
@@ -676,16 +635,8 @@ Error MachOPlatformRuntimeState::deregisterObjectPlatformSections(
if (auto Err =
deregisterThreadDataSection(KV.second.toSpan<const char>()))
return Err;
- } else if (KV.first == "__DATA,__objc_selrefs")
- JDS->ObjCSelRefsSections.removeIfPresent(KV.second);
- else if (KV.first == "__DATA,__objc_classlist")
- JDS->ObjCClassListSections.removeIfPresent(KV.second);
- else if (KV.first == "__TEXT,__swift5_protos")
- JDS->Swift5ProtocolsSections.removeIfPresent(KV.second);
- else if (KV.first == "__TEXT,__swift5_proto")
- JDS->Swift5ProtocolConformancesSections.removeIfPresent(KV.second);
- else if (KV.first == "__TEXT,__swift5_types")
- JDS->Swift5TypesSections.removeIfPresent(KV.second);
+ } else if (KV.first == "__llvm_jitlink_ObjCRuntimeRegistrationObject")
+ JDS->ObjCRuntimeRegistrationObjects.removeIfPresent(KV.second);
else if (KV.first == "__DATA,__mod_init_func")
JDS->ModInitsSections.removeIfPresent(KV.second);
else {
@@ -906,115 +857,29 @@ Error MachOPlatformRuntimeState::deregisterEHFrames(
return Error::success();
}
-Error MachOPlatformRuntimeState::registerObjCSelectors(JITDylibState &JDS) {
- if (!JDS.ObjCSelRefsSections.hasNewSections())
- return Error::success();
-
- if (ORC_RT_UNLIKELY(!sel_registerName))
- return make_error<StringError>("sel_registerName is not available");
-
- JDS.ObjCSelRefsSections.processNewSections([](span<void *> SelRefs) {
- for (void *&SelEntry : SelRefs) {
- const char *SelName = reinterpret_cast<const char *>(SelEntry);
- auto Sel = sel_registerName(SelName);
- *reinterpret_cast<SEL *>(&SelEntry) = Sel;
- }
- });
-
- return Error::success();
-}
-
-Error MachOPlatformRuntimeState::registerObjCClasses(JITDylibState &JDS) {
- if (!JDS.ObjCClassListSections.hasNewSections())
- return Error::success();
-
- if (ORC_RT_UNLIKELY(!objc_msgSend))
- return make_error<StringError>("objc_msgSend is not available");
- if (ORC_RT_UNLIKELY(!objc_readClassPair))
- return make_error<StringError>("objc_readClassPair is not available");
-
- struct ObjCClassCompiled {
- void *Metaclass;
- void *Parent;
- void *Cache1;
- void *Cache2;
- void *Data;
- };
-
- auto ClassSelector = sel_registerName("class");
-
- return JDS.ObjCClassListSections.processNewSections(
- [&](span<void *> ClassPtrs) -> Error {
- for (void *ClassPtr : ClassPtrs) {
- auto *Cls = reinterpret_cast<Class>(ClassPtr);
- auto *ClassCompiled = reinterpret_cast<ObjCClassCompiled *>(ClassPtr);
- objc_msgSend(reinterpret_cast<id>(ClassCompiled->Parent),
- ClassSelector);
- auto Registered = objc_readClassPair(Cls, JDS.ObjCImageInfo);
- // FIXME: Improve diagnostic by reporting the failed class's name.
- if (Registered != Cls)
- return make_error<StringError>(
- "Unable to register Objective-C class");
- }
- return Error::success();
- });
-}
-
-Error MachOPlatformRuntimeState::registerSwift5Protocols(JITDylibState &JDS) {
-
- if (!JDS.Swift5ProtocolsSections.hasNewSections())
- return Error::success();
-
- if (ORC_RT_UNLIKELY(!swift_registerProtocols))
- return make_error<StringError>("swift_registerProtocols is not available");
-
- JDS.Swift5ProtocolsSections.processNewSections([](span<char> ProtoSec) {
- swift_registerProtocols(
- reinterpret_cast<const ProtocolRecord *>(ProtoSec.data()),
- reinterpret_cast<const ProtocolRecord *>(ProtoSec.data() +
- ProtoSec.size()));
- });
-
- return Error::success();
-}
-
-Error MachOPlatformRuntimeState::registerSwift5ProtocolConformances(
+Error MachOPlatformRuntimeState::registerObjCRegistrationObjects(
JITDylibState &JDS) {
+ ORC_RT_DEBUG(printdbg("Registering Objective-C / Swift metadata.\n"));
- if (!JDS.Swift5ProtocolConformancesSections.hasNewSections())
- return Error::success();
-
- if (ORC_RT_UNLIKELY(!swift_registerProtocolConformances))
- return make_error<StringError>(
- "swift_registerProtocolConformances is not available");
-
- JDS.Swift5ProtocolConformancesSections.processNewSections(
- [](span<char> ProtoConfSec) {
- swift_registerProtocolConformances(
- reinterpret_cast<const ProtocolConformanceRecord *>(
- ProtoConfSec.data()),
- reinterpret_cast<const ProtocolConformanceRecord *>(
- ProtoConfSec.data() + ProtoConfSec.size()));
- });
-
- return Error::success();
-}
-
-Error MachOPlatformRuntimeState::registerSwift5Types(JITDylibState &JDS) {
+ std::vector<char *> RegObjBases;
+ JDS.ObjCRuntimeRegistrationObjects.processNewSections(
+ [&](span<char> RegObj) { RegObjBases.push_back(RegObj.data()); });
- if (!JDS.Swift5TypesSections.hasNewSections())
+ if (RegObjBases.empty())
return Error::success();
- if (ORC_RT_UNLIKELY(!swift_registerTypeMetadataRecords))
+ if (!_objc_map_images || !_objc_load_image)
return make_error<StringError>(
- "swift_registerTypeMetadataRecords is not available");
+ "Could not register Objective-C / Swift metadata: _objc_map_images / "
+ "_objc_load_image not found");
- JDS.Swift5TypesSections.processNewSections([&](span<char> TypesSec) {
- swift_registerTypeMetadataRecords(
- reinterpret_cast<const TypeMetadataRecord *>(TypesSec.data()),
- reinterpret_cast<const TypeMetadataRecord *>(TypesSec.data() +
- TypesSec.size()));
- });
+ std::vector<char *> Paths;
+ Paths.resize(RegObjBases.size());
+ _objc_map_images(RegObjBases.size(), Paths.data(),
+ reinterpret_cast<mach_header **>(RegObjBases.data()));
+
+ for (void *RegObjBase : RegObjBases)
+ _objc_load_image(nullptr, reinterpret_cast<mach_header *>(RegObjBase));
return Error::success();
}
@@ -1152,15 +1017,7 @@ Error MachOPlatformRuntimeState::dlopenInitialize(
}
// Initialize this JITDylib.
- if (auto Err = registerObjCSelectors(JDS))
- return Err;
- if (auto Err = registerObjCClasses(JDS))
- return Err;
- if (auto Err = registerSwift5Protocols(JDS))
- return Err;
- if (auto Err = registerSwift5ProtocolConformances(JDS))
- return Err;
- if (auto Err = registerSwift5Types(JDS))
+ if (auto Err = registerObjCRegistrationObjects(JDS))
return Err;
if (auto Err = runModInits(JDStatesLock, JDS))
return Err;
@@ -1281,7 +1138,7 @@ Error runWrapperFunctionCalls(std::vector<WrapperFunctionCall> WFCs) {
// JIT entry points
//------------------------------------------------------------------------------
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_platform_bootstrap(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError()>::handle(
ArgData, ArgSize,
@@ -1289,7 +1146,7 @@ __orc_rt_macho_platform_bootstrap(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_platform_shutdown(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError()>::handle(
ArgData, ArgSize,
@@ -1297,7 +1154,7 @@ __orc_rt_macho_platform_shutdown(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_register_jitdylib(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSString, SPSExecutorAddr)>::handle(
ArgData, ArgSize,
@@ -1308,7 +1165,7 @@ __orc_rt_macho_register_jitdylib(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_deregister_jitdylib(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr)>::handle(
ArgData, ArgSize,
@@ -1319,7 +1176,7 @@ __orc_rt_macho_deregister_jitdylib(char *ArgData, size_t ArgSize) {
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_register_object_platform_sections(char *ArgData,
size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr,
@@ -1336,7 +1193,7 @@ __orc_rt_macho_register_object_platform_sections(char *ArgData,
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_deregister_object_platform_sections(char *ArgData,
size_t ArgSize) {
return WrapperFunction<SPSError(SPSExecutorAddr,
@@ -1353,7 +1210,7 @@ __orc_rt_macho_deregister_object_platform_sections(char *ArgData,
.release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_run_wrapper_function_calls(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSError(SPSSequence<SPSWrapperFunctionCall>)>::handle(
ArgData, ArgSize, runWrapperFunctionCalls)
@@ -1379,7 +1236,7 @@ ORC_RT_INTERFACE void *__orc_rt_macho_tlv_get_addr_impl(TLVDescriptor *D) {
reinterpret_cast<char *>(static_cast<uintptr_t>(D->DataAddress)));
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_create_pthread_key(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSExpected<uint64_t>(void)>::handle(
ArgData, ArgSize,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp b/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
index bb4edc56655e..24a7b4fc3cbe 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
@@ -22,7 +22,7 @@ extern "C" int64_t __orc_rt_run_program(const char *JITDylibName,
const char *EntrySymbolName, int argc,
char *argv[]);
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_run_program_wrapper(const char *ArgData, size_t ArgSize) {
return WrapperFunction<int64_t(SPSString, SPSString,
SPSSequence<SPSString>)>::
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp
index ad3f055b8253..497cb937e2af 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp
@@ -14,24 +14,24 @@
#include "gtest/gtest.h"
TEST(CAPITest, CWrapperFunctionResultInit) {
- __orc_rt_CWrapperFunctionResult R;
- __orc_rt_CWrapperFunctionResultInit(&R);
+ orc_rt_CWrapperFunctionResult R;
+ orc_rt_CWrapperFunctionResultInit(&R);
EXPECT_EQ(R.Size, 0U);
EXPECT_EQ(R.Data.ValuePtr, nullptr);
// Check that this value isn't treated as an out-of-band error.
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultAllocSmall) {
constexpr size_t SmallAllocSize = sizeof(const char *);
- auto R = __orc_rt_CWrapperFunctionResultAllocate(SmallAllocSize);
- char *DataPtr = __orc_rt_CWrapperFunctionResultData(&R);
+ auto R = orc_rt_CWrapperFunctionResultAllocate(SmallAllocSize);
+ char *DataPtr = orc_rt_CWrapperFunctionResultData(&R);
for (size_t I = 0; I != SmallAllocSize; ++I)
DataPtr[I] = 0x55 + I;
@@ -44,24 +44,24 @@ TEST(CAPITest, CWrapperFunctionResultAllocSmall) {
<< "Unexpected value at index " << I;
// Check that this value isn't treated as an out-of-band error.
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
- // Check that __orc_rt_CWrapperFunctionResult(Data|Result|Size) and
- // __orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultData(&R), R.Data.Value);
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultSize(&R), SmallAllocSize);
- EXPECT_FALSE(__orc_rt_CWrapperFunctionResultEmpty(&R));
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+ // Check that orc_rt_CWrapperFunctionResult(Data|Result|Size) and
+ // orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultData(&R), R.Data.Value);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultSize(&R), SmallAllocSize);
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultAllocLarge) {
constexpr size_t LargeAllocSize = sizeof(const char *) + 1;
- auto R = __orc_rt_CWrapperFunctionResultAllocate(LargeAllocSize);
- char *DataPtr = __orc_rt_CWrapperFunctionResultData(&R);
+ auto R = orc_rt_CWrapperFunctionResultAllocate(LargeAllocSize);
+ char *DataPtr = orc_rt_CWrapperFunctionResultData(&R);
for (size_t I = 0; I != LargeAllocSize; ++I)
DataPtr[I] = 0x55 + I;
@@ -75,17 +75,17 @@ TEST(CAPITest, CWrapperFunctionResultAllocLarge) {
<< "Unexpected value at index " << I;
// Check that this value isn't treated as an out-of-band error.
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
- // Check that __orc_rt_CWrapperFunctionResult(Data|Result|Size) and
- // __orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultData(&R), R.Data.ValuePtr);
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultSize(&R), LargeAllocSize);
- EXPECT_FALSE(__orc_rt_CWrapperFunctionResultEmpty(&R));
- EXPECT_EQ(__orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+ // Check that orc_rt_CWrapperFunctionResult(Data|Result|Size) and
+ // orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultData(&R), R.Data.ValuePtr);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultSize(&R), LargeAllocSize);
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultFromRangeSmall) {
@@ -95,8 +95,8 @@ TEST(CAPITest, CWrapperFunctionResultFromRangeSmall) {
for (size_t I = 0; I != SmallAllocSize; ++I)
Source[I] = 0x55 + I;
- __orc_rt_CWrapperFunctionResult R =
- __orc_rt_CreateCWrapperFunctionResultFromRange(Source, SmallAllocSize);
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromRange(Source, SmallAllocSize);
// Check that the inline storage in R.Data.Value contains the expected
// sequence.
@@ -106,7 +106,7 @@ TEST(CAPITest, CWrapperFunctionResultFromRangeSmall) {
<< "Unexpected value at index " << I;
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultFromRangeLarge) {
@@ -116,8 +116,8 @@ TEST(CAPITest, CWrapperFunctionResultFromRangeLarge) {
for (size_t I = 0; I != LargeAllocSize; ++I)
Source[I] = 0x55 + I;
- __orc_rt_CWrapperFunctionResult R =
- __orc_rt_CreateCWrapperFunctionResultFromRange(Source, LargeAllocSize);
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromRange(Source, LargeAllocSize);
// Check that the inline storage in R.Data.Value contains the expected
// sequence.
@@ -127,7 +127,7 @@ TEST(CAPITest, CWrapperFunctionResultFromRangeLarge) {
<< "Unexpected value at index " << I;
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultFromStringSmall) {
@@ -138,8 +138,8 @@ TEST(CAPITest, CWrapperFunctionResultFromStringSmall) {
Source[I] = 'a' + I;
Source[SmallAllocSize - 1] = '\0';
- __orc_rt_CWrapperFunctionResult R =
- __orc_rt_CreateCWrapperFunctionResultFromString(Source);
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromString(Source);
// Check that the inline storage in R.Data.Value contains the expected
// sequence.
@@ -151,7 +151,7 @@ TEST(CAPITest, CWrapperFunctionResultFromStringSmall) {
<< "Unexpected value at index " << (SmallAllocSize - 1);
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultFromStringLarge) {
@@ -162,8 +162,8 @@ TEST(CAPITest, CWrapperFunctionResultFromStringLarge) {
Source[I] = 'a' + I;
Source[LargeAllocSize - 1] = '\0';
- __orc_rt_CWrapperFunctionResult R =
- __orc_rt_CreateCWrapperFunctionResultFromString(Source);
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromString(Source);
// Check that the inline storage in R.Data.Value contains the expected
// sequence.
@@ -175,26 +175,26 @@ TEST(CAPITest, CWrapperFunctionResultFromStringLarge) {
<< "Unexpected value at index " << (LargeAllocSize - 1);
// Check that we can dispose of the value.
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
TEST(CAPITest, CWrapperFunctionResultFromOutOfBandError) {
constexpr const char *ErrMsg = "test error message";
- __orc_rt_CWrapperFunctionResult R =
- __orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(ErrMsg);
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(ErrMsg);
#ifndef NDEBUG
- EXPECT_DEATH({ __orc_rt_CWrapperFunctionResultData(&R); },
+ EXPECT_DEATH({ orc_rt_CWrapperFunctionResultData(&R); },
"Cannot get data for out-of-band error value");
- EXPECT_DEATH({ __orc_rt_CWrapperFunctionResultSize(&R); },
+ EXPECT_DEATH({ orc_rt_CWrapperFunctionResultSize(&R); },
"Cannot get size for out-of-band error value");
#endif
- EXPECT_FALSE(__orc_rt_CWrapperFunctionResultEmpty(&R));
- const char *OOBErrMsg = __orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ const char *OOBErrMsg = orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
EXPECT_NE(OOBErrMsg, nullptr);
EXPECT_NE(OOBErrMsg, ErrMsg);
EXPECT_TRUE(strcmp(OOBErrMsg, ErrMsg) == 0);
- __orc_rt_DisposeCWrapperFunctionResult(&R);
+ orc_rt_DisposeCWrapperFunctionResult(&R);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp
index 8d4b9b3cba2b..f10c5093046d 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp
@@ -27,8 +27,8 @@ TEST(WrapperFunctionUtilsTest, DefaultWrapperFunctionResult) {
}
TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromCStruct) {
- __orc_rt_CWrapperFunctionResult CR =
- __orc_rt_CreateCWrapperFunctionResultFromString(TestString);
+ orc_rt_CWrapperFunctionResult CR =
+ orc_rt_CreateCWrapperFunctionResultFromString(TestString);
WrapperFunctionResult R(CR);
EXPECT_EQ(R.size(), strlen(TestString) + 1);
EXPECT_TRUE(strcmp(R.data(), TestString) == 0);
@@ -72,13 +72,13 @@ TEST(WrapperFunctionUtilsTest, WrapperFunctionCCallCreateEmpty) {
static void voidNoop() {}
-static __orc_rt_CWrapperFunctionResult voidNoopWrapper(const char *ArgData,
- size_t ArgSize) {
+static orc_rt_CWrapperFunctionResult voidNoopWrapper(const char *ArgData,
+ size_t ArgSize) {
return WrapperFunction<void()>::handle(ArgData, ArgSize, voidNoop).release();
}
-static __orc_rt_CWrapperFunctionResult addWrapper(const char *ArgData,
- size_t ArgSize) {
+static orc_rt_CWrapperFunctionResult addWrapper(const char *ArgData,
+ size_t ArgSize) {
return WrapperFunction<int32_t(int32_t, int32_t)>::handle(
ArgData, ArgSize,
[](int32_t X, int32_t Y) -> int32_t { return X + Y; })
@@ -87,11 +87,11 @@ static __orc_rt_CWrapperFunctionResult addWrapper(const char *ArgData,
extern "C" __orc_rt_Opaque __orc_rt_jit_dispatch_ctx{};
-extern "C" __orc_rt_CWrapperFunctionResult
+extern "C" orc_rt_CWrapperFunctionResult
__orc_rt_jit_dispatch(__orc_rt_Opaque *Ctx, const void *FnTag,
const char *ArgData, size_t ArgSize) {
using WrapperFunctionType =
- __orc_rt_CWrapperFunctionResult (*)(const char *, size_t);
+ orc_rt_CWrapperFunctionResult (*)(const char *, size_t);
return reinterpret_cast<WrapperFunctionType>(const_cast<void *>(FnTag))(
ArgData, ArgSize);
@@ -117,8 +117,8 @@ private:
int32_t X;
};
-static __orc_rt_CWrapperFunctionResult addMethodWrapper(const char *ArgData,
- size_t ArgSize) {
+static orc_rt_CWrapperFunctionResult addMethodWrapper(const char *ArgData,
+ size_t ArgSize) {
return WrapperFunction<int32_t(SPSExecutorAddr, int32_t)>::handle(
ArgData, ArgSize, makeMethodWrapperHandler(&AddClass::addMethod))
.release();
@@ -132,8 +132,8 @@ TEST(WrapperFunctionUtilsTest, WrapperFunctionMethodCallAndHandleRet) {
EXPECT_EQ(Result, (int32_t)3);
}
-static __orc_rt_CWrapperFunctionResult sumArrayWrapper(const char *ArgData,
- size_t ArgSize) {
+static orc_rt_CWrapperFunctionResult sumArrayWrapper(const char *ArgData,
+ size_t ArgSize) {
return WrapperFunction<int8_t(SPSExecutorAddrRange)>::handle(
ArgData, ArgSize,
[](ExecutorAddrRange R) {
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
index b48891b3b750..dcb6d0e6addb 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
@@ -27,66 +27,66 @@ namespace __orc_rt {
class WrapperFunctionResult {
public:
/// Create a default WrapperFunctionResult.
- WrapperFunctionResult() { __orc_rt_CWrapperFunctionResultInit(&R); }
+ WrapperFunctionResult() { orc_rt_CWrapperFunctionResultInit(&R); }
/// Create a WrapperFunctionResult from a CWrapperFunctionResult. This
/// instance takes ownership of the result object and will automatically
/// call dispose on the result upon destruction.
- WrapperFunctionResult(__orc_rt_CWrapperFunctionResult R) : R(R) {}
+ WrapperFunctionResult(orc_rt_CWrapperFunctionResult R) : R(R) {}
WrapperFunctionResult(const WrapperFunctionResult &) = delete;
WrapperFunctionResult &operator=(const WrapperFunctionResult &) = delete;
WrapperFunctionResult(WrapperFunctionResult &&Other) {
- __orc_rt_CWrapperFunctionResultInit(&R);
+ orc_rt_CWrapperFunctionResultInit(&R);
std::swap(R, Other.R);
}
WrapperFunctionResult &operator=(WrapperFunctionResult &&Other) {
- __orc_rt_CWrapperFunctionResult Tmp;
- __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ orc_rt_CWrapperFunctionResult Tmp;
+ orc_rt_CWrapperFunctionResultInit(&Tmp);
std::swap(Tmp, Other.R);
std::swap(R, Tmp);
return *this;
}
- ~WrapperFunctionResult() { __orc_rt_DisposeCWrapperFunctionResult(&R); }
+ ~WrapperFunctionResult() { orc_rt_DisposeCWrapperFunctionResult(&R); }
/// Relinquish ownership of and return the
- /// __orc_rt_CWrapperFunctionResult.
- __orc_rt_CWrapperFunctionResult release() {
- __orc_rt_CWrapperFunctionResult Tmp;
- __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ /// orc_rt_CWrapperFunctionResult.
+ orc_rt_CWrapperFunctionResult release() {
+ orc_rt_CWrapperFunctionResult Tmp;
+ orc_rt_CWrapperFunctionResultInit(&Tmp);
std::swap(R, Tmp);
return Tmp;
}
/// Get a pointer to the data contained in this instance.
- char *data() { return __orc_rt_CWrapperFunctionResultData(&R); }
+ char *data() { return orc_rt_CWrapperFunctionResultData(&R); }
/// Returns the size of the data contained in this instance.
- size_t size() const { return __orc_rt_CWrapperFunctionResultSize(&R); }
+ size_t size() const { return orc_rt_CWrapperFunctionResultSize(&R); }
/// Returns true if this value is equivalent to a default-constructed
/// WrapperFunctionResult.
- bool empty() const { return __orc_rt_CWrapperFunctionResultEmpty(&R); }
+ bool empty() const { return orc_rt_CWrapperFunctionResultEmpty(&R); }
/// Create a WrapperFunctionResult with the given size and return a pointer
/// to the underlying memory.
static WrapperFunctionResult allocate(size_t Size) {
WrapperFunctionResult R;
- R.R = __orc_rt_CWrapperFunctionResultAllocate(Size);
+ R.R = orc_rt_CWrapperFunctionResultAllocate(Size);
return R;
}
/// Copy from the given char range.
static WrapperFunctionResult copyFrom(const char *Source, size_t Size) {
- return __orc_rt_CreateCWrapperFunctionResultFromRange(Source, Size);
+ return orc_rt_CreateCWrapperFunctionResultFromRange(Source, Size);
}
/// Copy from the given null-terminated string (includes the null-terminator).
static WrapperFunctionResult copyFrom(const char *Source) {
- return __orc_rt_CreateCWrapperFunctionResultFromString(Source);
+ return orc_rt_CreateCWrapperFunctionResultFromString(Source);
}
/// Copy from the given std::string (includes the null terminator).
@@ -96,7 +96,7 @@ public:
/// Create an out-of-band error by copying the given string.
static WrapperFunctionResult createOutOfBandError(const char *Msg) {
- return __orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(Msg);
+ return orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(Msg);
}
/// Create an out-of-band error by copying the given string.
@@ -117,11 +117,11 @@ public:
/// If this value is an out-of-band error then this returns the error message,
/// otherwise returns nullptr.
const char *getOutOfBandError() const {
- return __orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
+ return orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
}
private:
- __orc_rt_CWrapperFunctionResult R;
+ orc_rt_CWrapperFunctionResult R;
};
namespace detail {
@@ -434,7 +434,7 @@ public:
/// Run call returning raw WrapperFunctionResult.
WrapperFunctionResult run() const {
using FnTy =
- __orc_rt_CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
+ orc_rt_CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
return WrapperFunctionResult(
FnAddr.toPtr<FnTy *>()(ArgData.data(), ArgData.size()));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
index fdb7b7cd806c..0dd5ff5ae633 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
@@ -20,6 +20,14 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "profile/InstrProfData.inc"
+static uint32_t __llvm_profile_global_timestamp = 1;
+
+COMPILER_RT_VISIBILITY
+void INSTR_PROF_PROFILE_SET_TIMESTAMP(uint64_t *Probe) {
+ if (*Probe == 0 || *Probe == (uint64_t)-1)
+ *Probe = __llvm_profile_global_timestamp++;
+}
+
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
@@ -42,6 +50,9 @@ COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_version(void) {
}
COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_TEMPORAL_PROF)
+ __llvm_profile_global_timestamp = 1;
+
char *I = __llvm_profile_begin_counters();
char *E = __llvm_profile_end_counters();
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 57f8b68919b1..61ac5d9c0285 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -107,7 +107,8 @@ void __llvm_profile_get_padding_sizes_for_counters(
uint64_t *PaddingBytesAfterNames) {
if (!needsCounterPadding()) {
*PaddingBytesBeforeCounters = 0;
- *PaddingBytesAfterCounters = 0;
+ *PaddingBytesAfterCounters =
+ __llvm_profile_get_num_padding_bytes(CountersSize);
*PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize);
return;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
index f74be55bcd5e..54e3030d5899 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -424,13 +424,10 @@ static void createProfileDir(const char *Filename) {
* its instrumented shared libraries dump profile data into their own data file.
*/
static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) {
- FILE *ProfileFile = NULL;
+ FILE *ProfileFile = getProfileFile();
int rc;
- ProfileFile = getProfileFile();
- if (ProfileFile) {
- lprofLockFileHandle(ProfileFile);
- } else {
+ if (!ProfileFile) {
createProfileDir(ProfileFileName);
ProfileFile = lprofOpenFileEx(ProfileFileName);
}
@@ -481,9 +478,6 @@ static int writeFile(const char *OutputName) {
if (OutputFile == getProfileFile()) {
fflush(OutputFile);
- if (doMerging()) {
- lprofUnlockFileHandle(OutputFile);
- }
} else {
fclose(OutputFile);
}
@@ -655,7 +649,14 @@ static void initializeProfileForContinuousMode(void) {
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
free((void *)lprofCurFilename.FilenamePat);
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
}
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
lprofCurFilename.FilenamePat = DefaultProfileName;
@@ -697,6 +698,10 @@ static int parseFilenamePattern(const char *FilenamePat,
int MergingEnabled = 0;
int FilenamePatLen = strlen(FilenamePat);
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
/* Clean up cached prefix and filename. */
if (lprofCurFilename.ProfilePathPrefix)
free((void *)lprofCurFilename.ProfilePathPrefix);
@@ -704,6 +709,9 @@ static int parseFilenamePattern(const char *FilenamePat,
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
free((void *)lprofCurFilename.FilenamePat);
}
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
index b2ce11067abd..360165e32ab3 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -198,4 +198,12 @@ extern void (*VPMergeHook)(struct ValueProfData *, __llvm_profile_data *);
*/
int __llvm_write_binary_ids(ProfDataWriter *Writer);
+/*
+ * Write binary id length and then its data, because binary id does not
+ * have a fixed length.
+ */
+int lprofWriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
+ const uint8_t *BinaryIdData,
+ uint64_t BinaryIdPadding);
+
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
index 4da88b7d7bdb..432e824955f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -38,6 +38,11 @@ uint64_t lprofGetLoadModuleSignature(void) {
__llvm_profile_get_magic();
}
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
/* Returns 1 if profile is not structurally compatible. */
COMPILER_RT_VISIBILITY
int __llvm_profile_check_compatibility(const char *ProfileData,
@@ -183,3 +188,7 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
return 0;
}
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
new file mode 100644
index 000000000000..63219da18ae3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
@@ -0,0 +1,210 @@
+/*===- InstrProfilingPlatformAIX.c - Profile data AIX platform ------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if defined(_AIX)
+
+#ifdef __64BIT__
+#define __XCOFF64__
+#endif
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ldr.h>
+#include <xcoff.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
+#define BIN_ID_PREFIX "xcoff_binary_id:"
+
+// If found, write the build-id into the Result buffer.
+static size_t FindBinaryId(char *Result, size_t Size) {
+ unsigned long EntryAddr = (unsigned long)__builtin_return_address(0);
+
+ // Use loadquery to get information about loaded modules; loadquery writes
+ // its result into a buffer of unknown size.
+ char Buf[1024];
+ size_t BufSize = sizeof(Buf);
+ char *BufPtr = Buf;
+ int RC = -1;
+
+ errno = 0;
+ RC = loadquery(L_GETXINFO | L_IGNOREUNLOAD, BufPtr, (unsigned int)BufSize);
+ if (RC == -1 && errno == ENOMEM) {
+ BufSize = 64000; // should be plenty for any program.
+ BufPtr = malloc(BufSize);
+ if (BufPtr != 0)
+ RC = loadquery(L_GETXINFO | L_IGNOREUNLOAD, BufPtr, (unsigned int)BufSize);
+ }
+
+ if (RC == -1)
+ goto done;
+
+ // Locate the ld_xinfo corresponding to this module.
+ struct ld_xinfo *CurInfo = (struct ld_xinfo *)BufPtr;
+ while (1) {
+ unsigned long CurTextStart = (uint64_t)CurInfo->ldinfo_textorg;
+ unsigned long CurTextEnd = CurTextStart + CurInfo->ldinfo_textsize;
+ if (CurTextStart <= EntryAddr && EntryAddr < CurTextEnd) {
+ // Found my slot. Now search for the build-id.
+ char *p = (char *)CurInfo->ldinfo_textorg;
+
+ FILHDR *f = (FILHDR *)p;
+ AOUTHDR *a = (AOUTHDR *)(p + FILHSZ);
+ SCNHDR *s =
+ (SCNHDR *)(p + FILHSZ + f->f_opthdr + SCNHSZ * (a->o_snloader - 1));
+ LDHDR *ldhdr = (LDHDR *)(p + s->s_scnptr);
+ // This is the loader string table
+ char *lstr = (char *)ldhdr + ldhdr->l_stoff;
+
+ // If the build-id exists, it's the first entry.
+ // Each entry is comprised of a 2-byte size component, followed by the
+ // data.
+ size_t len = *(short *)lstr;
+ char *str = (char *)(lstr + 2);
+ size_t PrefixLen = sizeof(BIN_ID_PREFIX) - 1;
+ if (len > PrefixLen && (len - PrefixLen) <= Size &&
+ strncmp(str, BIN_ID_PREFIX, PrefixLen) == 0) {
+ memcpy(Result, str + PrefixLen, len - PrefixLen);
+ RC = len - PrefixLen;
+ goto done;
+ }
+ break;
+ }
+ if (CurInfo->ldinfo_next == 0u)
+ break;
+ CurInfo = (struct ld_xinfo *)((char *)CurInfo + CurInfo->ldinfo_next);
+ }
+done:
+ if (BufSize != sizeof(Buf) && BufPtr != 0)
+ free(BufPtr);
+ return RC;
+}
+
+static int StrToHexError = 0;
+static uint8_t StrToHex(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 0xa;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 0xa;
+ StrToHexError = 1;
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ // 200 bytes should be enough for the build-id hex string.
+ static char Buf[200];
+ // Profile reading tools expect this to be 8-bytes long.
+ static int64_t BinaryIdLen = 0;
+ static uint8_t *BinaryIdData = 0;
+
+ // -1 means we already checked for a BinaryId and didn't find one.
+ if (BinaryIdLen == -1)
+ return 0;
+
+ // Are we being called for the first time?
+ if (BinaryIdLen == 0) {
+ if (getenv("LLVM_PROFILE_NO_BUILD_ID"))
+ goto fail;
+
+ int BuildIdLen = FindBinaryId(Buf, sizeof(Buf));
+ if (BuildIdLen <= 0)
+ goto fail;
+
+ if (Buf[BuildIdLen - 1] == '\0')
+ BuildIdLen--;
+
+ // assume even number of digits/chars, so 0xabc must be 0x0abc
+ if ((BuildIdLen % 2) != 0 || BuildIdLen == 0)
+ goto fail;
+
+ // The numeric ID is represented as an ascii string in the loader section,
+ // so convert it to raw binary.
+ BinaryIdLen = BuildIdLen / 2;
+ BinaryIdData = (uint8_t *)Buf;
+
+ // Skip "0x" prefix if it exists.
+ if (Buf[0] == '0' && Buf[1] == 'x') {
+ BinaryIdLen -= 1;
+ BinaryIdData += 2;
+ }
+
+ StrToHexError = 0;
+ for (int i = 0; i < BinaryIdLen; i++)
+ BinaryIdData[i] = (StrToHex(BinaryIdData[2 * i]) << 4) +
+ StrToHex(BinaryIdData[2 * i + 1]);
+
+ if (StrToHexError)
+ goto fail;
+
+ if (getenv("LLVM_PROFILE_VERBOSE")) {
+ char *StrBuf = (char *)COMPILER_RT_ALLOCA(2 * BinaryIdLen + 1);
+ for (int i = 0; i < (int)BinaryIdLen; i++)
+ sprintf(&StrBuf[2 * i], "%02x", BinaryIdData[i]);
+ PROF_NOTE("Writing binary id: %s\n", StrBuf);
+ }
+ }
+
+ uint8_t BinaryIdPadding = __llvm_profile_get_num_padding_bytes(BinaryIdLen);
+ if (Writer && lprofWriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData,
+ BinaryIdPadding) == -1)
+ return -1; // Return -1 rather goto fail to match the NT_GNU_BUILD_ID path.
+
+ return sizeof(BinaryIdLen) + BinaryIdLen + BinaryIdPadding;
+
+fail:
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ fprintf(stderr, "no or invalid binary id: %.*s\n", (int)sizeof(Buf), Buf);
+ BinaryIdLen = -1;
+ return 0;
+}
+
+// Empty stubs to allow linking object files using the registration-based scheme
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_function(void *Data_) {}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_names_function(void *NamesStart,
+ uint64_t NamesSize) {}
+
+// The __start_SECNAME and __stop_SECNAME symbols (for SECNAME \in
+// {"__llvm_prf_cnts", "__llvm_prf_data", "__llvm_prf_name", "__llvm_prf_vnds"})
+// are always live when linking on AIX, regardless if the .o's being linked
+// reference symbols from the profile library (for example when no files were
+// compiled with -fprofile-generate). That's because these symbols are kept
+// alive through references in constructor functions that are always live in the
+// default linking model on AIX (-bcdtors:all). The __start_SECNAME and
+// __stop_SECNAME symbols are only resolved by the linker when the SECNAME
+// section exists. So for the scenario where the user objects have no such
+// section (i.e. when they are compiled with -fno-profile-generate), we always
+// define these zero length variables in each of the above 4 sections.
+static int dummy_cnts[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_CNTS_SECT_NAME);
+static int dummy_data[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_DATA_SECT_NAME);
+static const int dummy_name[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_NAME_SECT_NAME);
+static int dummy_vnds[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_VNODES_SECT_NAME);
+
+// To avoid GC'ing of the dummy variables by the linker, reference them in an
+// array and reference the array in the runtime registration code
+// (InstrProfilingRuntime.cpp)
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+COMPILER_RT_VISIBILITY
+void *__llvm_profile_keep[] = {(void *)&dummy_cnts, (void *)&dummy_data,
+ (void *)&dummy_name, (void *)&dummy_vnds};
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index adf4132c6b4d..2cce0a4b2c48 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -94,26 +94,6 @@ static size_t RoundUp(size_t size, size_t align) {
}
/*
- * Write binary id length and then its data, because binary id does not
- * have a fixed length.
- */
-static int WriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
- const uint8_t *BinaryIdData,
- uint64_t BinaryIdPadding) {
- ProfDataIOVec BinaryIdIOVec[] = {
- {&BinaryIdLen, sizeof(uint64_t), 1, 0},
- {BinaryIdData, sizeof(uint8_t), BinaryIdLen, 0},
- {NULL, sizeof(uint8_t), BinaryIdPadding, 1},
- };
- if (Writer->Write(Writer, BinaryIdIOVec,
- sizeof(BinaryIdIOVec) / sizeof(*BinaryIdIOVec)))
- return -1;
-
- /* Successfully wrote binary id, report success. */
- return 0;
-}
-
-/*
* Look for the note that has the name "GNU\0" and type NT_GNU_BUILD_ID
* that contains build id. If build id exists, write binary id.
*
@@ -135,8 +115,9 @@ static int WriteBinaryIdForNote(ProfDataWriter *Writer,
const uint8_t *BinaryIdData =
(const uint8_t *)(NoteName + RoundUp(Note->n_namesz, 4));
uint8_t BinaryIdPadding = __llvm_profile_get_num_padding_bytes(BinaryIdLen);
- if (Writer != NULL && WriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData,
- BinaryIdPadding) == -1)
+ if (Writer != NULL &&
+ lprofWriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData,
+ BinaryIdPadding) == -1)
return -1;
BinaryIdSize = sizeof(BinaryIdLen) + BinaryIdLen + BinaryIdPadding;
@@ -220,7 +201,7 @@ COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
return TotalBinaryIdsSize;
}
-#else /* !NT_GNU_BUILD_ID */
+#elif !defined(_AIX) /* !NT_GNU_BUILD_ID */
/*
* Fallback implementation for targets that don't support the GNU
* extensions NT_GNU_BUILD_ID and __ehdr_start.
@@ -230,41 +211,4 @@ COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
}
#endif
-#if defined(_AIX)
-// Empty stubs to allow linking object files using the registration-based scheme
-COMPILER_RT_VISIBILITY
-void __llvm_profile_register_function(void *Data_) {}
-
-COMPILER_RT_VISIBILITY
-void __llvm_profile_register_names_function(void *NamesStart,
- uint64_t NamesSize) {}
-
-// The __start_SECNAME and __stop_SECNAME symbols (for SECNAME \in
-// {"__llvm_prf_cnts", "__llvm_prf_data", "__llvm_prf_name", "__llvm_prf_vnds"})
-// are always live when linking on AIX, regardless if the .o's being linked
-// reference symbols from the profile library (for example when no files were
-// compiled with -fprofile-generate). That's because these symbols are kept
-// alive through references in constructor functions that are always live in the
-// default linking model on AIX (-bcdtors:all). The __start_SECNAME and
-// __stop_SECNAME symbols are only resolved by the linker when the SECNAME
-// section exists. So for the scenario where the user objects have no such
-// section (i.e. when they are compiled with -fno-profile-generate), we always
-// define these zero length variables in each of the above 4 sections.
-static int dummy_cnts[0] COMPILER_RT_SECTION(
- COMPILER_RT_SEG INSTR_PROF_CNTS_SECT_NAME);
-static int dummy_data[0] COMPILER_RT_SECTION(
- COMPILER_RT_SEG INSTR_PROF_DATA_SECT_NAME);
-static const int dummy_name[0] COMPILER_RT_SECTION(
- COMPILER_RT_SEG INSTR_PROF_NAME_SECT_NAME);
-static int dummy_vnds[0] COMPILER_RT_SECTION(
- COMPILER_RT_SEG INSTR_PROF_VNODES_SECT_NAME);
-
-// To avoid GC'ing of the dummy variables by the linker, reference them in an
-// array and reference the array in the runtime registration code
-// (InstrProfilingRuntime.cpp)
-COMPILER_RT_VISIBILITY
-void *__llvm_profile_keep[] = {(void *)&dummy_cnts, (void *)&dummy_data,
- (void *)&dummy_name, (void *)&dummy_vnds};
-#endif
-
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
index c819a38553f3..3d7c245f795f 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
@@ -59,7 +59,14 @@ COMPILER_RT_VISIBILITY void lprofSetMaxValsPerSite(uint32_t MaxVals) {
COMPILER_RT_VISIBILITY void
__llvm_profile_set_num_value_sites(__llvm_profile_data *Data,
uint32_t ValueKind, uint16_t NumValueSites) {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
*((uint16_t *)&Data->NumValueSites[ValueKind]) = NumValueSites;
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
}
/* This method is only used in value profiler mock testing. */
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
index 366451a686c1..4a392984fe6b 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -336,3 +336,24 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd);
}
+
+/*
+ * Write binary id length and then its data, because binary id does not
+ * have a fixed length.
+ */
+COMPILER_RT_VISIBILITY
+int lprofWriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
+ const uint8_t *BinaryIdData,
+ uint64_t BinaryIdPadding) {
+ ProfDataIOVec BinaryIdIOVec[] = {
+ {&BinaryIdLen, sizeof(uint64_t), 1, 0},
+ {BinaryIdData, sizeof(uint8_t), BinaryIdLen, 0},
+ {NULL, sizeof(uint8_t), BinaryIdPadding, 1},
+ };
+ if (Writer->Write(Writer, BinaryIdIOVec,
+ sizeof(BinaryIdIOVec) / sizeof(*BinaryIdIOVec)))
+ return -1;
+
+ /* Successfully wrote binary id, report success. */
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index 25a43a59f047..03392b61503b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -146,12 +146,10 @@ void *LowLevelAllocator::Allocate(uptr size) {
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
- allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __func__);
+ allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
- low_level_alloc_callback((uptr)allocated_current_,
- size_to_allocate);
+ low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
}
}
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 76b936ff5eaa..0b28f86d1408 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -62,6 +62,13 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
*rand_state = state;
}
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {}
+ void OnUnmap(uptr p, uptr size) const {}
+};
+
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index b76d36dcf5a4..49940d9b5d50 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -29,9 +29,9 @@ class CombinedAllocator {
LargeMmapAllocatorPtrArray,
typename PrimaryAllocator::AddressSpaceView>;
- void InitLinkerInitialized(s32 release_to_os_interval_ms) {
- stats_.InitLinkerInitialized();
- primary_.Init(release_to_os_interval_ms);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms,
+ uptr heap_start = 0) {
+ primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.InitLinkerInitialized();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index c1b27563e2fc..de2b271fb0ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,8 +21,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
+ const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size_fast(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
index 38994736877a..62523c7ae187 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -51,7 +51,6 @@ void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
void InternalAllocatorLock();
void InternalAllocatorUnlock();
InternalAllocator *internal_allocator();
-
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index f2471efced61..52fe3fe3d15b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -353,7 +353,7 @@ class SizeClassAllocator32 {
DCHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
constexpr uptr kShuffleArraySize = 48;
- uptr shuffle_array[kShuffleArraySize];
+ UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = region; i < region + n_chunks * size; i += size) {
shuffle_array[count++] = i;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 66ba71d325da..fa43ac50c61e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -635,8 +635,8 @@ class SizeClassAllocator64 {
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize should be able to satisfy the largest size class.
+ static_assert(kRegionSize >= SizeClassMap::kMaxSize);
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 157645555604..0607819e7ef7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -82,7 +82,7 @@ class LargeMmapAllocator {
InitLinkerInitialized();
}
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
@@ -99,11 +99,11 @@ class LargeMmapAllocator {
if (!map_beg)
return nullptr;
CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
+ MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
CHECK(IsAligned(res, alignment));
CHECK(IsAligned(res, page_size_));
CHECK_GE(res + size, map_beg);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
index 6f14e3863c31..ae4dac9c8c96 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
@@ -25,19 +25,13 @@ typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
+ void Init() { internal_memset(this, 0, sizeof(*this)); }
void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
}
void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
}
void Set(AllocatorStat i, uptr v) {
@@ -58,17 +52,13 @@ class AllocatorStats {
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
}
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
+ LazyInit();
s->next_ = next_;
s->prev_ = this;
next_->prev_ = s;
@@ -87,7 +77,7 @@ class AllocatorGlobalStats : public AllocatorStats {
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
- for (;;) {
+ for (; stats;) {
for (int i = 0; i < AllocatorStatCount; i++)
s[i] += stats->Get(AllocatorStat(i));
stats = stats->next_;
@@ -100,6 +90,13 @@ class AllocatorGlobalStats : public AllocatorStats {
}
private:
+ void LazyInit() {
+ if (!next_) {
+ next_ = this;
+ prev_ = this;
+ }
+ }
+
mutable StaticSpinMutex mu_;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h
new file mode 100644
index 000000000000..28d125383da4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h
@@ -0,0 +1,123 @@
+//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ARRAY_REF_H
+#define SANITIZER_ARRAY_REF_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+/// ArrayRef - Represent a constant reference to an array (0 or more elements
+/// consecutively in memory), i.e. a start pointer and a length. It allows
+/// various APIs to take consecutive elements easily and conveniently.
+///
+/// This class does not own the underlying data, it is expected to be used in
+/// situations where the data resides in some other buffer, whose lifetime
+/// extends past that of the ArrayRef. For this reason, it is not in general
+/// safe to store an ArrayRef.
+///
+/// This is intended to be trivially copyable, so it should be passed by
+/// value.
+template <typename T>
+class ArrayRef {
+ public:
+ constexpr ArrayRef() {}
+ constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
+ DCHECK(empty() || begin);
+ }
+ constexpr ArrayRef(const T *data, uptr length)
+ : ArrayRef(data, data + length) {}
+ template <uptr N>
+ constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
+ template <typename C>
+ constexpr ArrayRef(const C &src)
+ : ArrayRef(src.data(), src.data() + src.size()) {}
+ ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
+
+ const T *data() const { return empty() ? nullptr : begin_; }
+
+ const T *begin() const { return begin_; }
+ const T *end() const { return end_; }
+
+ bool empty() const { return begin_ == end_; }
+
+ uptr size() const { return end_ - begin_; }
+
+ /// equals - Check for element-wise equality.
+ bool equals(ArrayRef rhs) const {
+ if (size() != rhs.size())
+ return false;
+ auto r = rhs.begin();
+ for (auto &l : *this) {
+ if (!(l == *r))
+ return false;
+ ++r;
+ }
+ return true;
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ ArrayRef<T> slice(uptr N, uptr M) const {
+ DCHECK_LE(N + M, size());
+ return ArrayRef<T>(data() + N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
+
+ /// Drop the first \p N elements of the array.
+ ArrayRef<T> drop_front(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(N, size() - N);
+ }
+
+ /// Drop the last \p N elements of the array.
+ ArrayRef<T> drop_back(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(0, size() - N);
+ }
+
+ /// Return a copy of *this with only the first \p N elements.
+ ArrayRef<T> take_front(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a copy of *this with only the last \p N elements.
+ ArrayRef<T> take_back(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ const T &operator[](uptr index) const {
+ DCHECK_LT(index, size());
+ return begin_[index];
+ }
+
+ private:
+ const T *begin_ = nullptr;
+ const T *end_ = nullptr;
+};
+
+template <typename T>
+inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return lhs.equals(rhs);
+}
+
+template <typename T>
+inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ARRAY_REF_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
index 9ebba91da73f..3c9bbdc9678b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
@@ -42,13 +42,57 @@
# define CFI_RESTORE(reg)
#endif
+#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
+# define ASM_TAIL_CALL jmp
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__powerpc__) || defined(__loongarch_lp64)
+# define ASM_TAIL_CALL b
+#elif defined(__s390__)
+# define ASM_TAIL_CALL jg
+#elif defined(__riscv)
+# define ASM_TAIL_CALL tail
+#endif
+
+#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
+ defined(__riscv)
+# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
+#else
+# define ASM_PREEMPTIBLE_SYM(sym) sym
+#endif
+
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
-# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
+ defined(__sparc__)
+// For details, see interception.h
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, ASM_WRAPPER_NAME(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
+# else // Architecture supports interceptor trampoline
+// Keep trampoline implementation in sync with interception/interception.h
+# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, __interceptor_trampoline_##name
+# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
+ .weak __interceptor_##name; \
+ .set __interceptor_##name, ASM_WRAPPER_NAME(name); \
+ .globl __interceptor_trampoline_##name; \
+ ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
+ __interceptor_trampoline_##name: \
+ CFI_STARTPROC; \
+ ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
+ CFI_ENDPROC; \
+ ASM_SIZE(__interceptor_trampoline_##name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
+# endif // Architecture supports interceptor trampoline
#else
# define ASM_HIDDEN(symbol)
# define ASM_TYPE_FUNCTION(symbol)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index 82236453157f..79b7748b8f6e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -61,6 +61,26 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
UNREACHABLE("unable to mmap");
}
+void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
+ bool raw_report) {
+ static int recursion_count;
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion just die. The
+ // Report() and CHECK calls below may call munmap recursively and fail.
+ RawWrite("ERROR: Failed to munmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report(
+ "ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
+ "code: %d)\n",
+ SanitizerToolName, size, size, addr, err);
+#if !SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to unmmap");
+}
+
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index b462e388c232..e7e4b8cb506d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -117,6 +117,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+bool MprotectReadWrite(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
@@ -211,6 +212,7 @@ class LowLevelAllocator {
public:
// Requires an external lock.
void *Allocate(uptr size);
+
private:
char *allocated_end_;
char *allocated_current_;
@@ -315,6 +317,8 @@ CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report = false);
+void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
+ bool raw_report = false);
// Returns true if the platform-specific error reported is an OOM error.
bool ErrorIsOOM(error_t err);
@@ -516,8 +520,8 @@ class InternalMmapVectorNoCtor {
return data_[i];
}
void push_back(const T &element) {
- CHECK_LE(size_, capacity());
- if (size_ == capacity()) {
+ if (UNLIKELY(size_ >= capacity())) {
+ CHECK_EQ(size_, capacity());
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Realloc(new_capacity);
}
@@ -577,7 +581,7 @@ class InternalMmapVectorNoCtor {
}
private:
- void Realloc(uptr new_capacity) {
+ NOINLINE void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_LE(size_, new_capacity);
uptr new_capacity_bytes =
@@ -793,7 +797,11 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "";
}
+#if SANITIZER_APPLE
+const uptr kModuleUUIDSize = 16;
+#else
const uptr kModuleUUIDSize = 32;
+#endif
const uptr kMaxSegName = 16;
// Represents a binary loaded into virtual memory (e.g. this can be an
@@ -1076,20 +1084,6 @@ inline u32 GetNumberOfCPUsCached() {
return NumberOfCPUsCached;
}
-template <typename T>
-class ArrayRef {
- public:
- ArrayRef() {}
- ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
-
- T *begin() { return begin_; }
- T *end() { return end_; }
-
- private:
- T *begin_ = nullptr;
- T *end_ = nullptr;
-};
-
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index e999239549cc..299561b3ad3a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -26,10 +26,8 @@
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
-// COMMON_INTERCEPTOR_MEMSET_IMPL
-// COMMON_INTERCEPTOR_MEMMOVE_IMPL
-// COMMON_INTERCEPTOR_MEMCPY_IMPL
// COMMON_INTERCEPTOR_MMAP_IMPL
+// COMMON_INTERCEPTOR_MUNMAP_IMPL
// COMMON_INTERCEPTOR_COPY_STRING
// COMMON_INTERCEPTOR_STRNDUP_IMPL
// COMMON_INTERCEPTOR_STRERROR
@@ -198,15 +196,6 @@ extern const short *_tolower_tab_;
#define wait4 __wait4_time64
#endif
-// Platform-specific options.
-#if SANITIZER_APPLE
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
-#endif // SANITIZER_APPLE
-
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif
@@ -302,53 +291,17 @@ extern const short *_tolower_tab_;
COMMON_INTERCEPT_FUNCTION(fn)
#endif
-#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memset(dst, v, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
- if (common_flags()->intercept_intrin) \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- return REAL(memset)(dst, v, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memmove(dst, src, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memmove)(dst, src, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
- return internal_memmove(dst, src, size); \
- } \
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memcpy)(dst, src, size); \
- }
-#endif
-
#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
off) \
{ return REAL(mmap)(addr, sz, prot, flags, fd, off); }
#endif
+#ifndef COMMON_INTERCEPTOR_MUNMAP_IMPL
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ { return REAL(munmap)(addr, sz); }
+#endif
+
#ifndef COMMON_INTERCEPTOR_COPY_STRING
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
#endif
@@ -841,57 +794,6 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#define INIT_STRPBRK
#endif
-#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
-}
-
-#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
-#else
-#define INIT_MEMSET
-#endif
-
-#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-}
-
-#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
-#else
-#define INIT_MEMMOVE
-#endif
-
-#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- // N.B.: If we switch this to internal_ we'll have to use internal_memmove
- // due to memcpy being an alias of memmove on OS X.
- void *ctx;
-#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
-#else
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-#endif
-}
-
-#define INIT_MEMCPY \
- do { \
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
- COMMON_INTERCEPT_FUNCTION(memcpy); \
- } else { \
- ASSIGN_REAL(memcpy, memmove); \
- } \
- CHECK(REAL(memcpy)); \
- } while (false)
-
-#else
-#define INIT_MEMCPY
-#endif
-
#if SANITIZER_INTERCEPT_MEMCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
const void *s1, const void *s2, uptr n,
@@ -1350,7 +1252,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
char *name = (char *)arg5;
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
}
- int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ int res = REAL(prctl)(option, arg2, arg3, arg4, arg5);
if (option == PR_SET_NAME) {
char buff[16];
internal_strncpy(buff, (char *)arg2, 15);
@@ -3416,7 +3318,8 @@ INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3431,7 +3334,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -3452,7 +3355,8 @@ INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3467,7 +3371,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -4039,7 +3943,7 @@ static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir_filter(dir);
}
@@ -4047,9 +3951,9 @@ static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir_compar(a, b);
}
@@ -4073,7 +3977,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -4092,7 +3996,7 @@ static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir64_filter(dir);
}
@@ -4100,9 +4004,9 @@ static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir64_compar(a, b);
}
@@ -4127,7 +4031,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -4404,12 +4308,16 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- int res = REAL(backtrace)(buffer, size);
- if (res && buffer)
+ // 'buffer' might be freed memory, hence it is unsafe to directly call
+ // REAL(backtrace)(buffer, size). Instead, we use our own known-good
+ // scratch buffer.
+ void **scratch = (void**)InternalAlloc(sizeof(void*) * size);
+ int res = REAL(backtrace)(scratch, size);
+ if (res && buffer) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
+ internal_memcpy(buffer, scratch, res * sizeof(*buffer));
+ }
+ InternalFree(scratch);
return res;
}
@@ -4418,9 +4326,8 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+ // The COMMON_INTERCEPTOR_READ_RANGE above ensures that 'buffer' is
+ // valid for reading.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
@@ -4453,7 +4360,7 @@ INTERCEPTOR(void, _exit, int status) {
#if SANITIZER_INTERCEPT___LIBC_MUTEX
INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
-ALIAS(WRAPPER_NAME(pthread_setcancelstate));
+ALIAS(WRAP(pthread_setcancelstate));
#define INIT___LIBC_THR_SETCANCELSTATE \
COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
@@ -5484,9 +5391,7 @@ INTERCEPTOR(void *, __tls_get_addr, void *arg) {
// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
// mostly the same semantics as __tls_get_addr, but its presence enables
// some optimizations in linker (which are safe to ignore here).
-extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
- visibility("default")))
-void *__tls_get_addr_opt(void *arg);
+INTERCEPTOR(void *, __tls_get_addr_opt, void *arg) ALIAS(WRAP(__tls_get_addr));
#endif
#else // SANITIZER_S390
// On s390, we have to intercept two functions here:
@@ -5520,21 +5425,20 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
#if SANITIZER_S390 && \
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
-extern "C" uptr __tls_get_offset(void *arg);
-extern "C" uptr __interceptor___tls_get_offset(void *arg);
// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
-extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
- visibility("hidden")))
-uptr __tls_get_addr_hidden(void *arg);
+extern "C" __attribute__((visibility("hidden"))) uptr __tls_get_addr_hidden(
+ void *arg) ALIAS(WRAP(__tls_get_addr_internal));
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr TRAMPOLINE(__tls_get_offset)(void *arg);
+extern "C" uptr WRAP(__tls_get_offset)(void *arg);
// Now carefully intercept __tls_get_offset.
asm(
".text\n"
// The __intercept_ version has to exist, so that gen_dynamic_list.py
// exports our symbol.
".weak __tls_get_offset\n"
- ".type __tls_get_offset, @function\n"
- "__tls_get_offset:\n"
+ ".set __tls_get_offset, __interceptor___tls_get_offset\n"
".global __interceptor___tls_get_offset\n"
".type __interceptor___tls_get_offset, @function\n"
"__interceptor___tls_get_offset:\n"
@@ -5763,8 +5667,10 @@ INTERCEPTOR(int, capget, void *hdrp, void *datap) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(capget)(hdrp, datap);
- if (res == 0 && datap)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (res == 0 && datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, datasz);
+ }
// We can also return -1 and write to hdrp->version if the version passed in
// hdrp->version is unsupported. But that's not a trivial condition to check,
// and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
@@ -5775,8 +5681,10 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
if (hdrp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
- if (datap)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, datasz);
+ }
return REAL(capset)(hdrp, datap);
}
#define INIT_CAPGET \
@@ -5786,105 +5694,6 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#define INIT_CAPGET
#endif
-#if SANITIZER_INTERCEPT_AEABI_MEM
-INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-// Note the argument order.
-INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-#define INIT_AEABI_MEM \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
-#else
-#define INIT_AEABI_MEM
-#endif // SANITIZER_INTERCEPT_AEABI_MEM
-
-#if SANITIZER_INTERCEPT___BZERO
-INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
-#else
-#define INIT___BZERO
-#endif // SANITIZER_INTERCEPT___BZERO
-
-#if SANITIZER_INTERCEPT_BZERO
-INTERCEPTOR(void *, bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
-#else
-#define INIT_BZERO
-#endif // SANITIZER_INTERCEPT_BZERO
-
#if SANITIZER_INTERCEPT_FTIME
INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
void *ctx;
@@ -7169,6 +6978,7 @@ INTERCEPTOR(int, mprobe, void *ptr) {
}
#endif
+#if SANITIZER_INTERCEPT_WCSLEN
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
@@ -7187,6 +6997,9 @@ INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
#define INIT_WCSLEN \
COMMON_INTERCEPT_FUNCTION(wcslen); \
COMMON_INTERCEPT_FUNCTION(wcsnlen);
+#else
+#define INIT_WCSLEN
+#endif
#if SANITIZER_INTERCEPT_WCSCAT
INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
@@ -7595,6 +7408,14 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
}
+INTERCEPTOR(int, munmap, void *addr, SIZE_T sz) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (int)internal_munmap(addr, sz);
+ COMMON_INTERCEPTOR_ENTER(ctx, munmap, addr, sz);
+ COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz);
+}
+
INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
void *ctx;
if (common_flags()->detect_write_exec)
@@ -7607,6 +7428,7 @@ INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
}
#define INIT_MMAP \
COMMON_INTERCEPT_FUNCTION(mmap); \
+ COMMON_INTERCEPT_FUNCTION(munmap); \
COMMON_INTERCEPT_FUNCTION(mprotect);
#else
#define INIT_MMAP
@@ -7744,8 +7566,7 @@ static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, internal_strlen(*pp) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, pp_size * sizeof(char *));
}
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
@@ -7851,8 +7672,7 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7873,8 +7693,7 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7893,8 +7712,7 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -10086,41 +9904,6 @@ INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
#define INIT_GETRANDOM
#endif
-#if SANITIZER_INTERCEPT_CRYPT
-INTERCEPTOR(char *, crypt, char *key, char *salt) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt, key, salt);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt)(key, salt);
- if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- return res;
-}
-#define INIT_CRYPT COMMON_INTERCEPT_FUNCTION(crypt);
-#else
-#define INIT_CRYPT
-#endif
-
-#if SANITIZER_INTERCEPT_CRYPT_R
-INTERCEPTOR(char *, crypt_r, char *key, char *salt, void *data) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt_r, key, salt, data);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt_r)(key, salt, data);
- if (res != nullptr) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data,
- __sanitizer::struct_crypt_data_sz);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- }
- return res;
-}
-#define INIT_CRYPT_R COMMON_INTERCEPT_FUNCTION(crypt_r);
-#else
-#define INIT_CRYPT_R
-#endif
-
#if SANITIZER_INTERCEPT_GETENTROPY
INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
void *ctx;
@@ -10371,14 +10154,52 @@ INTERCEPTOR(void, hexdump, const void *ptr, int length, const char *header, int
#define INIT_HEXDUMP
#endif
+#if SANITIZER_INTERCEPT_ARGP_PARSE
+INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
+ unsigned flags, int *arg_index, void *input) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, argp_parse, argp, argc, argv, flags, arg_index,
+ input);
+ for (int i = 0; i < argc; i++)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argv[i], internal_strlen(argv[i]) + 1);
+ int res = REAL(argp_parse)(argp, argc, argv, flags, arg_index, input);
+ if (!res && arg_index)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg_index, sizeof(int));
+ return res;
+}
+
+#define INIT_ARGP_PARSE COMMON_INTERCEPT_FUNCTION(argp_parse);
+#else
+#define INIT_ARGP_PARSE
+#endif
+
+#if SANITIZER_INTERCEPT_CPUSET_GETAFFINITY
+INTERCEPTOR(int, cpuset_getaffinity, int level, int which, __int64_t id, SIZE_T cpusetsize, __sanitizer_cpuset_t *mask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cpuset_getaffinity, level, which, id, cpusetsize, mask);
+ int res = REAL(cpuset_getaffinity)(level, which, id, cpusetsize, mask);
+ if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
+ return res;
+}
+#define INIT_CPUSET_GETAFFINITY COMMON_INTERCEPT_FUNCTION(cpuset_getaffinity);
+#else
+#define INIT_CPUSET_GETAFFINITY
+#endif
+
#include "sanitizer_common_interceptors_netbsd_compat.inc"
+namespace __sanitizer {
+void InitializeMemintrinsicInterceptors();
+} // namespace __sanitizer
+
static void InitializeCommonInterceptors() {
#if SI_POSIX
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();
#endif
+ __sanitizer::InitializeMemintrinsicInterceptors();
+
INIT_MMAP;
INIT_MMAP64;
INIT_TEXTDOMAIN;
@@ -10400,9 +10221,6 @@ static void InitializeCommonInterceptors() {
INIT_STRPBRK;
INIT_STRXFRM;
INIT___STRXFRM_L;
- INIT_MEMSET;
- INIT_MEMMOVE;
- INIT_MEMCPY;
INIT_MEMCHR;
INIT_MEMCMP;
INIT_BCMP;
@@ -10574,9 +10392,6 @@ static void InitializeCommonInterceptors() {
INIT_GETIFADDRS;
INIT_IF_INDEXTONAME;
INIT_CAPGET;
- INIT_AEABI_MEM;
- INIT___BZERO;
- INIT_BZERO;
INIT_FTIME;
INIT_XDR;
INIT_XDRREC_LINUX;
@@ -10679,8 +10494,6 @@ static void InitializeCommonInterceptors() {
INIT_GETUSERSHELL;
INIT_SL_INIT;
INIT_GETRANDOM;
- INIT_CRYPT;
- INIT_CRYPT_R;
INIT_GETENTROPY;
INIT_QSORT;
INIT_QSORT_R;
@@ -10690,6 +10503,8 @@ static void InitializeCommonInterceptors() {
INIT_UNAME;
INIT___XUNAME;
INIT_HEXDUMP;
+ INIT_ARGP_PARSE;
+ INIT_CPUSET_GETAFFINITY;
INIT___PRINTF_CHK;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc
new file mode 100644
index 000000000000..52e489d02cda
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc
@@ -0,0 +1,244 @@
+//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Memintrinsic function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// These interceptors are part of the common interceptors, but separated out so
+// that implementations may add them, if necessary, to a separate source file
+// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
+//
+// This file should be included into the tool's memintrinsic interceptor file,
+// which has to define its own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_REDEFINE_BUILTINS_H
+#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
+#endif
+
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+// Platform-specific options.
+#if SANITIZER_APPLE
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
+#endif // SANITIZER_APPLE
+
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
+#else
+#define INIT_MEMCPY
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_BZERO
+INTERCEPTOR(void *, bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
+#else
+#define INIT_BZERO
+#endif // SANITIZER_INTERCEPT_BZERO
+
+namespace __sanitizer {
+// This does not need to be called if InitializeCommonInterceptors() is called.
+void InitializeMemintrinsicInterceptors() {
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_BZERO;
+}
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
index 72e482754b62..cdfa6f1d7f53 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
@@ -40,8 +40,8 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
GNU_PROPERTY_BTI_PAC
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
index 780a9d46e26a..87bb48380569 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
@@ -43,7 +43,7 @@ ASM_WRAPPER_NAME(vfork):
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
index f60b05d157bb..c633014e2daa 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
@@ -58,7 +58,7 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
index 68782acb379d..8429d57d669c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
@@ -51,7 +51,7 @@ ASM_WRAPPER_NAME(vfork):
jr $ra
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
index b7ec27859b8a..5b6ea6fe6c7a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
@@ -50,7 +50,7 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
index 8fd18ea67ffd..5500f817aec5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
@@ -34,9 +34,9 @@ ASM_WRAPPER_NAME(vfork):
.L_exit:
pop %rax
ret
-ASM_SIZE(vfork)
+ASM_SIZE(ASM_WRAPPER_NAME(vfork))
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
index 958f071e7b5f..557207fe62ac 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -32,7 +32,9 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size_fast)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@@ -44,3 +46,7 @@ INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
+// Memintrinsic functions.
+INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
+INTERFACE_FUNCTION(__sanitizer_internal_memmove)
+INTERFACE_FUNCTION(__sanitizer_internal_memset)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index 8fd398564280..895763ac6b6f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -67,6 +67,8 @@ void *BackgroundThread(void *arg) {
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
+ Report("%s: soft rss limit unexhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
SetRssLimitExceeded(false);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
index 9d7518ac9476..c10943b3e487 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -417,14 +417,14 @@ PRE_SYSCALL(capget)(void *header, void *dataptr) {
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
if (res >= 0)
if (dataptr)
- POST_WRITE(dataptr, __user_cap_data_struct_sz);
+ POST_WRITE(dataptr, __user_cap_data_struct_sz(header));
}
PRE_SYSCALL(capset)(void *header, const void *data) {
if (header)
PRE_READ(header, __user_cap_header_struct_sz);
if (data)
- PRE_READ(data, __user_cap_data_struct_sz);
+ PRE_READ(data, __user_cap_data_struct_sz(header));
}
POST_SYSCALL(capset)(long res, void *header, const void *data) {}
@@ -1374,9 +1374,8 @@ PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
}
POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
- if (res >= 0) {
- if (ctx)
- POST_WRITE(ctx, sizeof(*ctx));
+ if (res >= 0 && ctx) {
+ POST_WRITE(ctx, sizeof(*ctx));
// (*ctx) is actually a pointer to a kernel mapped page, and there are
// people out there who are crazy enough to peek into that page's 32-byte
// header.
@@ -2136,7 +2135,7 @@ PRE_SYSCALL(epoll_pwait2)
const sanitizer_kernel_timespec *timeout, const kernel_sigset_t *sigmask,
long sigsetsize) {
if (timeout)
- PRE_READ(timeout, sizeof(timeout));
+ PRE_READ(timeout, sizeof(*timeout));
if (sigmask)
PRE_READ(sigmask, sigsetsize);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
index 956b48e0b434..ce4326967180 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
@@ -282,7 +282,14 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr* beg,
// Weak definition for code instrumented with -fsanitize-coverage=stack-depth
// and later linked with code containing a strong definition.
// E.g., -fsanitize=fuzzer-no-link
+// FIXME: Update Apple deployment target so that thread_local is always
+// supported, and remove the #if.
+// FIXME: Figure out how this should work on Windows, exported thread_local
+// symbols are not supported:
+// "data with thread storage duration may not have dll interface"
+#if !SANITIZER_APPLE && !SANITIZER_WINDOWS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
+thread_local uptr __sancov_lowest_stack;
+#endif
#endif // !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
index 810c1e452f61..9459c6b00acc 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
@@ -15,6 +15,7 @@
#ifndef SANITIZER_FILE_H
#define SANITIZER_FILE_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
index 9e274268bf2a..c620da7f220a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
@@ -13,9 +13,9 @@
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
namespace __sanitizer {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
index 3ccc6a6fa537..ae49294dde95 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -13,9 +13,9 @@
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
-#include "sanitizer_common.h"
namespace __sanitizer {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h
index 05fb554d20c1..8bb8304910c7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h
@@ -21,12 +21,6 @@
namespace __sanitizer {
-// Call these callbacks on mmap/munmap.
-struct NoOpMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {}
- void OnUnmap(uptr p, uptr size) const {}
-};
-
// Maps integers in rage [0, kSize) to values.
template <typename T, u64 kSize,
typename AddressSpaceViewTy = LocalAddressSpaceView>
@@ -62,8 +56,7 @@ class FlatMap {
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <typename T, u64 kSize1, u64 kSize2,
- typename AddressSpaceViewTy = LocalAddressSpaceView,
- class MapUnmapCallback = NoOpMapUnmapCallback>
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
class TwoLevelMap {
static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
@@ -79,7 +72,6 @@ class TwoLevelMap {
T *p = Get(i);
if (!p)
continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), MmapSize());
UnmapOrDie(p, kSize2);
}
Init();
@@ -149,7 +141,6 @@ class TwoLevelMap {
T *res = Get(idx);
if (!res) {
res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
@@ -164,10 +155,8 @@ template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
template <u64 kSize1, u64 kSize2,
- typename AddressSpaceViewTy = LocalAddressSpaceView,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-using TwoLevelByteMap =
- TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy, MapUnmapCallback>;
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index a92e84cb8ecf..1e25265c00a2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -285,6 +285,12 @@ bool MprotectReadOnly(uptr addr, uptr size) {
ZX_OK;
}
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
+ size) == ZX_OK;
+}
+
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
CHECK_GE(size, GetPageSize());
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
index 9683b97ab91d..16b2a10d8b06 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
@@ -1267,8 +1267,6 @@ static void ioctl_table_fill() {
_(TIOCGFLAGS, WRITE, sizeof(int));
_(TIOCSFLAGS, READ, sizeof(int));
_(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
- _(TIOCRCVFRAME, READ, sizeof(uptr));
- _(TIOCXMTFRAME, READ, sizeof(uptr));
_(TIOCPTMGET, WRITE, struct_ptmget_sz);
_(TIOCGRANTPT, NONE, 0);
_(TIOCPTSNAME, WRITE, struct_ptmget_sz);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index 6b800820ab80..552d65067944 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -13,6 +13,7 @@
#define SANITIZER_DEFS_H
#include "sanitizer_platform.h"
+#include "sanitizer_redefine_builtins.h"
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
@@ -37,15 +38,6 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-// TLS is handled differently on different platforms
-#if SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_FREEBSD
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
- __attribute__((tls_model("initial-exec"))) thread_local
-#else
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
-#endif
-
//--------------------------- WEAK FUNCTIONS ---------------------------------//
// When working with weak functions, to simplify the code and make it more
// portable, when possible define a default implementation using this macro:
@@ -226,7 +218,7 @@ typedef u64 tid_t;
# define WARN_UNUSED_RESULT
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
-# define ALIAS(x) __attribute__((alias(x)))
+# define ALIAS(x) __attribute__((alias(SANITIZER_STRINGIFY(x))))
// Please only use the ALIGNED macro before the type.
// Using ALIGNED after the variable declaration is not portable!
# define ALIGNED(x) __attribute__((aligned(x)))
@@ -267,6 +259,12 @@ typedef u64 tid_t;
# define FALLTHROUGH
#endif
+#if __has_attribute(uninitialized)
+# define UNINITIALIZED __attribute__((uninitialized))
+#else
+# define UNINITIALIZED
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
index d3076f0da489..4a6fa5e8dbac 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
@@ -10,6 +10,9 @@
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
+// Do not redefine builtins; this file is defining the builtin replacements.
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -46,7 +49,10 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
return 0;
}
-void *internal_memcpy(void *dest, const void *src, uptr n) {
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i)
@@ -54,7 +60,8 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memmove(void *dest, const void *src, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
sptr i, signed_n = (sptr)n;
@@ -72,7 +79,8 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memset(void* s, int c, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n) {
// Optimize for the most performance-critical case:
if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
u64 *p = reinterpret_cast<u64*>(s);
@@ -95,6 +103,7 @@ void *internal_memset(void* s, int c, uptr n) {
}
return s;
}
+} // extern "C"
uptr internal_strcspn(const char *s, const char *reject) {
uptr i;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
index 39a212665d0a..e881db207908 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
@@ -24,15 +24,33 @@ namespace __sanitizer {
// internal_X() is a custom implementation of X() for use in RTL.
+extern "C" {
+// These are used as builtin replacements; see sanitizer_redefine_builtins.h.
+// In normal runtime code, use the __sanitizer::internal_X() aliases instead.
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n);
+} // extern "C"
+
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
-void *internal_memcpy(void *dest, const void *src, uptr n);
-void *internal_memmove(void *dest, const void *src, uptr n);
+ALWAYS_INLINE void *internal_memcpy(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memcpy(dest, src, n);
+}
+ALWAYS_INLINE void *internal_memmove(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memmove(dest, src, n);
+}
// Should not be used in performance-critical places.
-void *internal_memset(void *s, int c, uptr n);
+ALWAYS_INLINE void *internal_memset(void *s, int c, uptr n) {
+ return __sanitizer_internal_memset(s, c, n);
+}
char* internal_strchr(const char *s, int c);
char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index ebbd14eca72f..8759d96609e5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -156,11 +156,11 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
namespace __sanitizer {
-void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *old) {
- CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, old));
+void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) {
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset));
}
-ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
+void BlockSignals(__sanitizer_sigset_t *oldset) {
__sanitizer_sigset_t set;
internal_sigfillset(&set);
# if SANITIZER_LINUX && !SANITIZER_ANDROID
@@ -175,7 +175,11 @@ ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
// hang.
internal_sigdelset(&set, 31);
# endif
- SetSigProcMask(&set, &saved_);
+ SetSigProcMask(&set, oldset);
+}
+
+ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
+ BlockSignals(&saved_);
if (copy)
internal_memcpy(copy, &saved_, sizeof(saved_));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index 2c769dd59aa0..7454369fa419 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -51,6 +51,7 @@ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
+void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
struct ScopedBlockSignals {
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
~ScopedBlockSignals();
@@ -152,6 +153,9 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
"rdhwr %0,$29\n" \
".set pop\n" : "=r"(__v)); \
__v; })
+#elif defined (__riscv)
+# define __get_tls() \
+ ({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
#elif defined(__i386__)
# define __get_tls() \
({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 2e9329183e8d..d0315381cf63 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -150,7 +150,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
+ internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 23c4c6619de8..24e3d1112520 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -12,81 +12,81 @@
#include "sanitizer_platform.h"
#if SANITIZER_APPLE
-#include "sanitizer_mac.h"
-#include "interception/interception.h"
+# include "interception/interception.h"
+# include "sanitizer_mac.h"
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
-#ifndef _DARWIN_USE_64_BIT_INODE
-#define _DARWIN_USE_64_BIT_INODE 1
-#endif
-#include <stdio.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_interface_internal.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_ptrauth.h"
-
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetEnviron
-#else
+# ifndef _DARWIN_USE_64_BIT_INODE
+# define _DARWIN_USE_64_BIT_INODE 1
+# endif
+# include <stdio.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_ptrauth.h"
+
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetEnviron
+# else
extern char **environ;
-#endif
+# endif
-#if defined(__has_include) && __has_include(<os/trace.h>)
-#define SANITIZER_OS_TRACE 1
-#include <os/trace.h>
-#else
-#define SANITIZER_OS_TRACE 0
-#endif
+# if defined(__has_include) && __has_include(<os/trace.h>)
+# define SANITIZER_OS_TRACE 1
+# include <os/trace.h>
+# else
+# define SANITIZER_OS_TRACE 0
+# endif
// import new crash reporting api
-#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
-#define HAVE_CRASHREPORTERCLIENT_H 1
-#include <CrashReporterClient.h>
-#else
-#define HAVE_CRASHREPORTERCLIENT_H 0
-#endif
+# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
+# define HAVE_CRASHREPORTERCLIENT_H 1
+# include <CrashReporterClient.h>
+# else
+# define HAVE_CRASHREPORTERCLIENT_H 0
+# endif
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
-#else
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+# else
extern "C" {
- extern char ***_NSGetArgv(void);
+extern char ***_NSGetArgv(void);
}
-#endif
+# endif
-#include <asl.h>
-#include <dlfcn.h> // for dladdr()
-#include <errno.h>
-#include <fcntl.h>
-#include <libkern/OSAtomic.h>
-#include <mach-o/dyld.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/vm_statistics.h>
-#include <malloc/malloc.h>
-#include <os/log.h>
-#include <pthread.h>
-#include <pthread/introspection.h>
-#include <sched.h>
-#include <signal.h>
-#include <spawn.h>
-#include <stdlib.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+# include <asl.h>
+# include <dlfcn.h> // for dladdr()
+# include <errno.h>
+# include <fcntl.h>
+# include <libkern/OSAtomic.h>
+# include <mach-o/dyld.h>
+# include <mach/mach.h>
+# include <mach/mach_time.h>
+# include <mach/vm_statistics.h>
+# include <malloc/malloc.h>
+# include <os/log.h>
+# include <pthread.h>
+# include <pthread/introspection.h>
+# include <sched.h>
+# include <signal.h>
+# include <spawn.h>
+# include <stdlib.h>
+# include <sys/ioctl.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/sysctl.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
@@ -989,7 +989,7 @@ static void VerifyInterceptorsWorking() {
// "wrap_puts" within our own dylib.
Dl_info info_puts, info_runtime;
RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
- RAW_CHECK(dladdr((void *)__sanitizer_report_error_summary, &info_runtime));
+ RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
Report(
"ERROR: Interceptors are not working. This may be because %s is "
@@ -1039,7 +1039,7 @@ static void StripEnv() {
return;
Dl_info info;
- RAW_CHECK(dladdr((void *)__sanitizer_report_error_summary, &info));
+ RAW_CHECK(dladdr((void *)&StripEnv, &info));
const char *dylib_name = StripModuleName(info.dli_fname);
bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
if (!lib_is_in_env)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
new file mode 100644
index 000000000000..4e58c02df835
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
@@ -0,0 +1,38 @@
+//===-- sanitizer_mallinfo.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Definition for mallinfo on different platforms.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MALLINFO_H
+#define SANITIZER_MALLINFO_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+#if SANITIZER_ANDROID
+
+struct __sanitizer_struct_mallinfo {
+ uptr v[10];
+};
+
+#elif SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_FUCHSIA
+
+struct __sanitizer_struct_mallinfo {
+ int v[10];
+};
+
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MALLINFO_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 814ff462d1cf..c740778b6228 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -347,7 +347,8 @@
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
-#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP \
+ (SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
@@ -367,6 +368,8 @@
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
(SI_LINUX_NOT_ANDROID && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_TRYJOIN SI_GLIBC
+#define SANITIZER_INTERCEPT_TIMEDJOIN SI_GLIBC
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
@@ -492,6 +495,7 @@
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSLEN 1
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
@@ -569,15 +573,14 @@
#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
-#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_GETRANDOM \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
-#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETENTROPY \
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
@@ -592,6 +595,8 @@
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
#define SANITIZER_INTERCEPT_HEXDUMP SI_FREEBSD
+#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
+#define SANITIZER_INTERCEPT_CPUSET_GETAFFINITY SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index 37e72cd5d45e..38f968d533b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -17,6 +17,7 @@
#include <sys/capsicum.h>
#include <sys/consio.h>
+#include <sys/cpuset.h>
#include <sys/filio.h>
#include <sys/ipc.h>
#include <sys/kbio.h>
@@ -103,6 +104,7 @@ void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
+unsigned struct_cpuset_sz = sizeof(cpuset_t);
unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
@@ -173,6 +175,12 @@ uptr __sanitizer_in_addr_sz(int af) {
return 0;
}
+// For FreeBSD the actual size of a directory entry is not always in d_reclen.
+// Use the appropriate macro to get the correct size for all cases (e.g. NFS).
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp) {
+ return _GENERIC_DIRSIZ(dp);
+}
+
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
@@ -558,4 +566,5 @@ COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
+COMPILER_CHECK(sizeof(__sanitizer_cpuset_t) >= sizeof(cpuset_t));
#endif // SANITIZER_FREEBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
index daef1177a2db..b119f059007d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -249,9 +249,15 @@ struct __sanitizer_dirent {
unsigned int d_fileno;
# endif
unsigned short d_reclen;
- // more fields that we don't care about
+ u8 d_type;
+ u8 d_pad0;
+ u16 d_namlen;
+ u16 d_pad1;
+ char d_name[256];
};
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp);
+
// 'clock_t' is 32 bits wide on x64 FreeBSD
typedef int __sanitizer_clock_t;
typedef int __sanitizer_clockid_t;
@@ -709,6 +715,17 @@ extern unsigned struct_cap_rights_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_StringList_sz;
+
+struct __sanitizer_cpuset {
+#if __FreeBSD_version >= 1400090
+ long __bits[(1024 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#else
+ long __bits[(256 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#endif
+};
+
+typedef struct __sanitizer_cpuset __sanitizer_cpuset_t;
+extern unsigned struct_cpuset_sz;
} // namespace __sanitizer
# define CHECK_TYPE_SIZE(TYPE) \
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
index 648e502b904a..c40877ba48d0 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -2342,8 +2342,6 @@ unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
-unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
-unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
index dc6eb59b2800..4c697b4d107d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -2195,8 +2195,6 @@ extern unsigned IOCTL_TIOCDRAIN;
extern unsigned IOCTL_TIOCGFLAGS;
extern unsigned IOCTL_TIOCSFLAGS;
extern unsigned IOCTL_TIOCDCDTIMESTAMP;
-extern unsigned IOCTL_TIOCRCVFRAME;
-extern unsigned IOCTL_TIOCXMTFRAME;
extern unsigned IOCTL_TIOCPTMGET;
extern unsigned IOCTL_TIOCGRANTPT;
extern unsigned IOCTL_TIOCPTSNAME;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index fc01498aa228..6d61d276d77e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -18,6 +18,7 @@
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#undef _FILE_OFFSET_BITS
+#undef _TIME_BITS
#endif
// Must go after undef _FILE_OFFSET_BITS.
@@ -176,10 +177,6 @@ typedef struct user_fpregs elf_fpregset_t;
# include "sanitizer_platform_interceptors.h"
# include "sanitizer_platform_limits_posix.h"
-#if SANITIZER_INTERCEPT_CRYPT_R
-#include <crypt.h>
-#endif
-
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
@@ -247,7 +244,23 @@ namespace __sanitizer {
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
- unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
+ unsigned __user_cap_data_struct_sz(void *hdrp) {
+ int u32s = 0;
+ if (hdrp) {
+ switch (((struct __user_cap_header_struct *)hdrp)->version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ u32s = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ u32s = _LINUX_CAPABILITY_U32S_2;
+ break;
+ case _LINUX_CAPABILITY_VERSION_3:
+ u32s = _LINUX_CAPABILITY_U32S_3;
+ break;
+ }
+ }
+ return sizeof(struct __user_cap_data_struct) * u32s;
+ }
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
@@ -283,10 +296,6 @@ namespace __sanitizer {
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
#endif // SANITIZER_GLIBC
-#if SANITIZER_INTERCEPT_CRYPT_R
- unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
-#endif
-
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 978a7f3c1654..d2561ee433aa 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -18,6 +18,7 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
+#include "sanitizer_mallinfo.h"
#if SANITIZER_APPLE
#include <sys/cdefs.h>
@@ -135,7 +136,7 @@ struct __sanitizer_perf_event_attr {
extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz;
extern unsigned __user_cap_header_struct_sz;
-extern unsigned __user_cap_data_struct_sz;
+extern unsigned __user_cap_data_struct_sz(void *hdrp);
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
@@ -205,17 +206,7 @@ struct __sanitizer_sem_t {
};
#endif // SANITIZER_LINUX
-#if SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- uptr v[10];
-};
-#endif
-
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- int v[10];
-};
-
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statvfs64_sz;
@@ -319,7 +310,6 @@ extern unsigned struct_msqid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_statvfs_sz;
-extern unsigned struct_crypt_data_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
@@ -588,8 +578,13 @@ struct __sanitizer_sigset_t {
#endif
struct __sanitizer_siginfo_pad {
+#if SANITIZER_X32
+ // x32 siginfo_t is aligned to 8 bytes.
+ u64 pad[128 / sizeof(u64)];
+#else
// Require uptr, because siginfo_t is always pointer-size aligned on Linux.
uptr pad[128 / sizeof(uptr)];
+#endif
};
#if SANITIZER_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index 75968ad33ccf..8d2c5b2cefbe 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -57,11 +57,9 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
- if (UNLIKELY(internal_iserror(res))) {
- Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, size, size, addr);
- CHECK("unable to unmap" && 0);
- }
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
+ ReportMunmapFailureAndDie(addr, size, reserrno);
DecreaseTotalMmap(size);
}
@@ -156,6 +154,10 @@ bool MprotectReadOnly(uptr addr, uptr size) {
return 0 == internal_mprotect((void *)addr, size, PROT_READ);
}
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ | PROT_WRITE);
+}
+
#if !SANITIZER_APPLE
void MprotectMallocZones(void *addr, int prot) {}
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index f91e26e74b87..c5811dffea94 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -90,7 +90,7 @@ int real_pthread_join(void *th, void **ret);
} \
} // namespace __sanitizer
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size);
// A routine named real_sigaction() must be implemented by each sanitizer in
// order for internal_sigaction() to bypass interceptors.
@@ -120,6 +120,9 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags);
// alive at least as long as the mapping exists.
void DecorateMapping(uptr addr, uptr size, const char *name);
+# if !SANITIZER_FREEBSD
+# define __sanitizer_dirsiz(dp) ((dp)->d_reclen)
+# endif
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
index 46e41c669738..e88e654eec5a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -383,7 +383,7 @@ SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
#if !SANITIZER_GO && !SANITIZER_APPLE
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
@@ -397,7 +397,7 @@ void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
uptr stacksize = 0;
- my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ internal_pthread_attr_getstack(attr, (void **)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
index 19bad158387c..bf3c2c28e32e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
@@ -65,6 +65,8 @@ class MemoryMappedSegment {
MemoryMappedSegmentData *data_;
};
+struct ImageHeader;
+
class MemoryMappingLayoutBase {
public:
virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
@@ -75,10 +77,22 @@ class MemoryMappingLayoutBase {
~MemoryMappingLayoutBase() {}
};
-class MemoryMappingLayout final : public MemoryMappingLayoutBase {
+class MemoryMappingLayout : public MemoryMappingLayoutBase {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+
+// This destructor cannot be virtual, as it would cause an operator new() linking
+// failures in hwasan test cases. However non-virtual destructors emit warnings
+// in macOS build, hence disabling those
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
~MemoryMappingLayout();
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
virtual bool Next(MemoryMappedSegment *segment) override;
virtual bool Error() const override;
virtual void Reset() override;
@@ -90,10 +104,14 @@ class MemoryMappingLayout final : public MemoryMappingLayoutBase {
// Adds all mapped objects into a vector.
void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
+ protected:
+#if SANITIZER_APPLE
+ virtual const ImageHeader *CurrentImageHeader();
+#endif
+ MemoryMappingLayoutData data_;
+
private:
void LoadFromCache();
-
- MemoryMappingLayoutData data_;
};
// Returns code range for the specified module.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
index 4b0e67819761..b44e016a0e5b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -250,7 +250,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
MemoryMappedSegmentData *seg_data,
MemoryMappingLayoutData *layout_data) {
const char *lc = layout_data->current_load_cmd_addr;
+
layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
+ layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
@@ -358,11 +360,16 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
return false;
}
+const ImageHeader *MemoryMappingLayout::CurrentImageHeader() {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
+ return (const ImageHeader *)hdr;
+}
+
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
- const mach_header *hdr = (data_.current_image == kDyldImageIdx)
- ? get_dyld_hdr()
- : _dyld_get_image_header(data_.current_image);
+ const mach_header *hdr = (const mach_header *)CurrentImageHeader();
if (!hdr) continue;
if (data_.current_load_cmd_count < 0) {
// Set up for this image;
@@ -392,7 +399,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
(const load_command *)data_.current_load_cmd_addr);
}
- for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ while (data_.current_load_cmd_count > 0) {
switch (data_.current_magic) {
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
@@ -413,6 +420,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
+ data_.current_load_cmd_count = -1; // This will trigger loading next image
}
return false;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 4aa605485166..460d96ea681b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -68,10 +68,6 @@ struct QuarantineBatch {
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
-// The callback interface is:
-// void Callback::Recycle(Node *ptr);
-// void *cb.Allocate(uptr size);
-// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
@@ -94,21 +90,20 @@ class Quarantine {
recycle_mutex_.Init();
}
- uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
- uptr GetCacheSize() const {
- return atomic_load_relaxed(&max_cache_size_);
- }
+ uptr GetMaxSize() const { return atomic_load_relaxed(&max_size_); }
+ uptr GetMaxCacheSize() const { return atomic_load_relaxed(&max_cache_size_); }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
- uptr cache_size = GetCacheSize();
- if (cache_size) {
+ uptr max_cache_size = GetMaxCacheSize();
+ if (max_cache_size && size <= GetMaxSize()) {
+ cb.PreQuarantine(ptr);
c->Enqueue(cb, ptr, size);
} else {
- // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
- cb.Recycle(ptr);
+ // GetMaxCacheSize() == 0 only when GetMaxSize() == 0 (see Init).
+ cb.RecyclePassThrough(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
- if (c->Size() > cache_size)
+ if (c->Size() > max_cache_size)
Drain(c, cb);
}
@@ -117,7 +112,7 @@ class Quarantine {
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
- if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
+ if (cache_.Size() > GetMaxSize() && recycle_mutex_.TryLock())
Recycle(atomic_load_relaxed(&min_size_), cb);
}
@@ -133,7 +128,7 @@ class Quarantine {
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
- GetSize() >> 20, GetCacheSize() >> 10);
+ GetMaxSize() >> 20, GetMaxCacheSize() >> 10);
cache_.PrintStats();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp
new file mode 100644
index 000000000000..68d79f18ac8d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp
@@ -0,0 +1,62 @@
+//===-- sanitizer_range.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_range.h"
+
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output) {
+ output.clear();
+
+ struct Event {
+ uptr val;
+ s8 diff1;
+ s8 diff2;
+ };
+
+ InternalMmapVector<Event> events;
+ for (const Range &r : a) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 1, 0});
+ events.push_back({r.end, -1, 0});
+ }
+
+ for (const Range &r : b) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 0, 1});
+ events.push_back({r.end, 0, -1});
+ }
+
+ Sort(events.data(), events.size(),
+ [](const Event &lh, const Event &rh) { return lh.val < rh.val; });
+
+ uptr start = 0;
+ sptr state1 = 0;
+ sptr state2 = 0;
+ for (const auto &e : events) {
+ if (e.val != start) {
+ DCHECK_GE(state1, 0);
+ DCHECK_GE(state2, 0);
+ if (state1 && state2) {
+ if (!output.empty() && start == output.back().end)
+ output.back().end = e.val;
+ else
+ output.push_back({start, e.val});
+ }
+ start = e.val;
+ }
+
+ state1 += e.diff1;
+ state2 += e.diff2;
+ }
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h
new file mode 100644
index 000000000000..7c593e171ba2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h
@@ -0,0 +1,40 @@
+//===-- sanitizer_range.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Contais Range and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_RANGE_H
+#define SANITIZER_RANGE_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+struct Range {
+ uptr begin;
+ uptr end;
+};
+
+inline bool operator==(const Range &lhs, const Range &rhs) {
+ return lhs.begin == rhs.begin && lhs.end == rhs.end;
+}
+
+inline bool operator!=(const Range &lhs, const Range &rhs) {
+ return !(lhs == rhs);
+}
+
+// Calculates intersection of two sets of regions in O(N log N) time.
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_RANGE_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
new file mode 100644
index 000000000000..6649ff5844f5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
@@ -0,0 +1,52 @@
+//===-- sanitizer_redefine_builtins.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Redefine builtin functions to use internal versions. This is needed where
+// compiler optimizations end up producing unwanted libcalls!
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+#ifndef SANITIZER_REDEFINE_BUILTINS_H
+#define SANITIZER_REDEFINE_BUILTINS_H
+
+// The asm hack only works with GCC and Clang.
+#if !defined(_WIN32)
+
+asm("memcpy = __sanitizer_internal_memcpy");
+asm("memmove = __sanitizer_internal_memmove");
+asm("memset = __sanitizer_internal_memset");
+
+// The builtins should not be redefined in source files that make use of C++
+// standard libraries, in particular where C++STL headers with inline functions
+// are used. The redefinition in such cases would lead to ODR violations.
+//
+// Try to break the build in common cases where builtins shouldn't be redefined.
+namespace std {
+class Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file {
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file& operator=(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+};
+using array = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using atomic = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using function = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using shared_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using string = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unique_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+} // namespace std
+
+#endif // !_WIN32
+
+#endif // SANITIZER_REDEFINE_BUILTINS_H
+#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
index 475e577d9982..94e4e2954a3b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
@@ -43,6 +43,7 @@ using namespace __sanitizer;
#if SANITIZER_INTERCEPT_BSD_SIGNAL
INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
}
@@ -53,6 +54,7 @@ INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
INTERCEPTOR(uptr, signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
return (uptr) nullptr;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
@@ -61,6 +63,7 @@ INTERCEPTOR(uptr, signal, int signum, uptr handler) {
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
if (!oldact) return 0;
act = nullptr;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
index ee996c3e07ea..47aed488c71a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -91,10 +91,10 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
#elif SANITIZER_RISCV64
- // RV-64 has variable instruciton length...
+ // RV-64 has variable instruction length...
// C extentions gives us 2-byte instructoins
// RV-64 has 4-byte instructions
- // + RISCV architecture allows instructions up to 8 bytes
+ // + RISC-V architecture allows instructions up to 8 bytes
// It seems difficult to figure out the exact instruction length -
// pc - 2 seems like a safe option for the purposes of stack tracing
return pc - 2;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
index 2d0eccc1602a..45c480d225c7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -11,25 +11,47 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
+
#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
#include "sanitizer_fuchsia.h"
namespace __sanitizer {
-// sanitizer_symbolizer_markup.cpp implements these differently.
-#if !SANITIZER_SYMBOLIZER_MARKUP
-
-static const char *StripFunctionName(const char *function, const char *prefix) {
- if (!function) return nullptr;
- if (!prefix) return function;
- uptr prefix_len = internal_strlen(prefix);
- if (0 == internal_strncmp(function, prefix, prefix_len))
- return function + prefix_len;
+const char *StripFunctionName(const char *function) {
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
+ auto try_strip = [function](const char *prefix) -> const char * {
+ const uptr prefix_len = internal_strlen(prefix);
+ if (!internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return nullptr;
+ };
+ if (SANITIZER_APPLE) {
+ if (const char *s = try_strip("wrap_"))
+ return s;
+ } else if (SANITIZER_WINDOWS) {
+ if (const char *s = try_strip("__asan_wrap_"))
+ return s;
+ } else {
+ if (const char *s = try_strip("___interceptor_"))
+ return s;
+ if (const char *s = try_strip("__interceptor_"))
+ return s;
+ }
return function;
}
+// sanitizer_symbolizer_markup.cpp implements these differently.
+#if !SANITIZER_SYMBOLIZER_MARKUP
+
static const char *DemangleFunctionName(const char *function) {
- if (!function) return nullptr;
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
// NetBSD uses indirection for old threading functions for historical reasons
// The mangled names are internal implementation detail and should not be
@@ -121,7 +143,7 @@ static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
+ const char *strip_path_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
@@ -157,8 +179,8 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
break;
case 'f':
- buffer->append("%s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->append("%s",
+ DemangleFunctionName(StripFunctionName(info->function)));
break;
case 'q':
buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
@@ -178,8 +200,8 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
case 'F':
// Function name and offset, if file is unknown.
if (info->function) {
- buffer->append("in %s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->append("in %s",
+ DemangleFunctionName(StripFunctionName(info->function)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info->function_offset);
}
@@ -198,7 +220,9 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
RenderModuleLocation(buffer, info->module, info->module_offset,
info->module_arch, strip_path_prefix);
+#if !SANITIZER_APPLE
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
buffer->append("(<unknown module>)");
}
@@ -211,7 +235,9 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info->module),
info->module_offset, info->module_arch, "");
+#if !SANITIZER_APPLE
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
buffer->append("(%p)", (void *)address);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index 96119b2ee9e9..bf2755a2e8f4 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -17,6 +17,9 @@
namespace __sanitizer {
+// Strip interceptor prefixes from function name.
+const char *StripFunctionName(const char *function);
+
// Render the contents of "info" structure, which represents the contents of
// stack frame "frame_no" and appends it to the "buffer". "format" is a
// string with placeholders, which is copied to the output with
@@ -26,8 +29,7 @@ namespace __sanitizer {
// will be turned into
// " frame 10: function foo::bar() at my/file.cc:10"
// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
-// source files and modules, and "strip_func_prefix" to strip prefixes of
-// function names.
+// source files and modules.
// Here's the full list of available placeholders:
// %% - represents a '%' character;
// %n - frame number (copy of frame_no);
@@ -48,8 +50,7 @@ namespace __sanitizer {
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix = "",
- const char *strip_func_prefix = "");
+ const char *strip_path_prefix = "");
bool RenderNeedsSymbolization(const char *format);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
index 3ebeac52280a..813616467656 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -154,12 +154,10 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
&reg_count);
if (err != KERN_SUCCESS) {
VReport(1, "Error - unable to get registers for a thread\n");
- // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
- // or the thread does not exist. The other possible error case,
// MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
// still safe to proceed.
- return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
- : REGISTERS_UNAVAILABLE;
+ return err == MIG_ARRAY_TOO_LARGE ? REGISTERS_UNAVAILABLE
+ : REGISTERS_UNAVAILABLE_FATAL;
}
buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
index a674034b8e29..f3818526baab 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
@@ -86,6 +86,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
}
Parse(file_contents);
+ UnmapOrDie(file_contents, contents_size);
}
bool SuppressionContext::Match(const char *str, const char *type,
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
index 29a08386d0b9..3ec4d80105a2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -13,8 +13,8 @@
#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
#define SANITIZER_SYMBOLIZER_INTERNAL_H
-#include "sanitizer_symbolizer.h"
#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
#include "sanitizer_vector.h"
namespace __sanitizer {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
index 27ed222745ec..cc02c77bccdc 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
@@ -11,11 +11,11 @@
// Libbacktrace implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
+#include "sanitizer_symbolizer_libbacktrace.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
#if SANITIZER_LIBBACKTRACE
# include "backtrace-supported.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index f4f2a036a1e7..a9c958b2d100 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -14,16 +14,16 @@
#include "sanitizer_platform.h"
#if SANITIZER_APPLE
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_mac.h"
-#include "sanitizer_symbolizer_mac.h"
+# include <dlfcn.h>
+# include <errno.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
-#include <dlfcn.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_mac.h"
+# include "sanitizer_symbolizer_mac.h"
namespace __sanitizer {
@@ -163,7 +163,7 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
uptr start_address = AddressInfo::kUnknown;
if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
&stack->info.file, &line, &start_address)) {
- process_ = nullptr;
+ Report("WARNING: atos failed to symbolize address \"0x%zx\"\n", addr);
return false;
}
stack->info.line = (int)line;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 1ec0c5cad7a2..c8c10de10d03 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -91,7 +91,7 @@ bool RenderNeedsSymbolization(const char *format) { return false; }
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
+ const char *strip_path_prefix) {
CHECK(!RenderNeedsSymbolization(format));
buffer->append(kFormatFrame, frame_no, address);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index b223f6cd01e3..1a5e38faea88 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -13,25 +13,25 @@
#include "sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer_internal.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
-#include "sanitizer_symbolizer_mac.h"
-
-#include <dlfcn.h> // for dlsym()
-#include <errno.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
+# include <dlfcn.h> // for dlsym()
+# include <errno.h>
+# include <stdint.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_symbolizer_libbacktrace.h"
+# include "sanitizer_symbolizer_mac.h"
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index d5c028e3640d..73915715c5ba 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -107,8 +107,7 @@ void ReportMmapWriteExec(int prot, int flags) {
stack->Reset();
uptr top = 0;
uptr bottom = 0;
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
bool fast = common_flags()->fast_unwind_on_fatal;
if (StackTrace::WillUseFastUnwind(fast)) {
GetThreadStackTopAndBottom(false, &top, &bottom);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index ac2afe42e269..ae2d3be19ef3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -14,8 +14,8 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#include "sanitizer_dbghelp.h"
-#include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_dbghelp.h"
+# include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -292,15 +292,15 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
- VReport(2, "Using llvm-symbolizer at %spath: %s\n",
- user_path ? "user-specified " : "", path);
- list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
- } else {
if (user_path && user_path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
} else {
- VReport(2, "External symbolizer is not present.\n");
+ VReport(2, "Using llvm-symbolizer at %spath: %s\n",
+ user_path ? "user-specified " : "", path);
+ list->push_back(new (*allocator) LLVMSymbolizer(path, allocator));
}
+ } else {
+ VReport(2, "External symbolizer is not present.\n");
}
// Add the dbghelp based symbolizer.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp
new file mode 100644
index 000000000000..bddb28521408
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp
@@ -0,0 +1,94 @@
+//===-- sanitizer_thread_arg_retval.cpp -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_arg_retval.h"
+
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+void ThreadArgRetval::CreateLocked(uptr thread, bool detached,
+ const Args& args) {
+ CheckLocked();
+ Data& t = data_[thread];
+ t = {};
+ t.gen = gen_++;
+ t.detached = detached;
+ t.args = args;
+}
+
+ThreadArgRetval::Args ThreadArgRetval::GetArgs(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ CHECK(t);
+ if (t->second.done)
+ return {};
+ return t->second.args;
+}
+
+void ThreadArgRetval::Finish(uptr thread, void* retval) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t)
+ return;
+ if (t->second.detached) {
+ // Retval of detached thread connot be retrieved.
+ data_.erase(t);
+ return;
+ }
+ t->second.done = true;
+ t->second.args.arg_retval = retval;
+}
+
+u32 ThreadArgRetval::BeforeJoin(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ CHECK(t);
+ CHECK(!t->second.detached);
+ return t->second.gen;
+}
+
+void ThreadArgRetval::AfterJoin(uptr thread, u32 gen) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t || gen != t->second.gen) {
+ // Thread was reused and erased by any other event.
+ return;
+ }
+ CHECK(!t->second.detached);
+ data_.erase(t);
+}
+
+void ThreadArgRetval::DetachLocked(uptr thread) {
+ CheckLocked();
+ auto t = data_.find(thread);
+ CHECK(t);
+ CHECK(!t->second.detached);
+ if (t->second.done) {
+ // We can't retrive retval after detached thread finished.
+ data_.erase(t);
+ return;
+ }
+ t->second.detached = true;
+}
+
+void ThreadArgRetval::GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs) {
+ CheckLocked();
+ CHECK(ptrs);
+ data_.forEach([&](DenseMap<uptr, Data>::value_type& kv) -> bool {
+ ptrs->push_back((uptr)kv.second.args.arg_retval);
+ return true;
+ });
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h
new file mode 100644
index 000000000000..c77021beb67d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h
@@ -0,0 +1,116 @@
+//===-- sanitizer_thread_arg_retval.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_ARG_RETVAL_H
+#define SANITIZER_THREAD_ARG_RETVAL_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_dense_map.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Primary goal of the class is to keep alive arg and retval pointer for leak
+// checking. However it can be used to pass those pointer into wrappers used by
+// interceptors. The difference from ThreadRegistry/ThreadList is that this
+// class keeps data up to the detach or join, as exited thread still can be
+// joined to retrive retval. ThreadRegistry/ThreadList can discard exited
+// threads immediately.
+class SANITIZER_MUTEX ThreadArgRetval {
+ public:
+ struct Args {
+ void* (*routine)(void*);
+ void* arg_retval; // Either arg or retval.
+ };
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
+
+ // Wraps pthread_create or similar. We need to keep object locked, to
+ // prevent child thread from proceeding without thread handle.
+ template <typename CreateFn /* returns thread id on success, or 0 */>
+ void Create(bool detached, const Args& args, const CreateFn& fn) {
+ // No need to track detached threads with no args, but we will to do as it's
+ // not expensive and less edge-cases.
+ __sanitizer::Lock lock(&mtx_);
+ if (uptr thread = fn())
+ CreateLocked(thread, detached, args);
+ }
+
+ // Returns thread arg and routine.
+ Args GetArgs(uptr thread) const;
+
+ // Mark thread as done and stores retval or remove if detached. Should be
+ // called by the thread.
+ void Finish(uptr thread, void* retval);
+
+ // Mark thread as detached or remove if done.
+ template <typename DetachFn /* returns true on success */>
+ void Detach(uptr thread, const DetachFn& fn) {
+ // Lock to prevent re-use of the thread between fn() and DetachLocked()
+ // calls.
+ __sanitizer::Lock lock(&mtx_);
+ if (fn())
+ DetachLocked(thread);
+ }
+
+ // Joins the thread.
+ template <typename JoinFn /* returns true on success */>
+ void Join(uptr thread, const JoinFn& fn) {
+ // Remember internal id of the thread to prevent re-use of the thread
+ // between fn() and AfterJoin() calls. Locking JoinFn, like in
+ // Detach(), implementation can cause deadlock.
+ auto gen = BeforeJoin(thread);
+ if (fn())
+ AfterJoin(thread, gen);
+ }
+
+ // Returns all arg and retval which are considered alive.
+ void GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs);
+
+ uptr size() const {
+ __sanitizer::Lock lock(&mtx_);
+ return data_.size();
+ }
+
+ // FIXME: Add fork support. Expected users of the class are sloppy with forks
+ // anyway. We likely should lock/unlock the object to avoid deadlocks, and
+ // erase all but the current threads, so we can detect leaked arg or retval in
+ // child process.
+
+ // FIXME: Add cancelation support. Now if a thread was canceled, the class
+ // will keep pointers alive forever, missing leaks caused by cancelation.
+
+ private:
+ struct Data {
+ Args args;
+ u32 gen; // Avoid collision if thread id re-used.
+ bool detached;
+ bool done;
+ };
+
+ void CreateLocked(uptr thread, bool detached, const Args& args);
+ u32 BeforeJoin(uptr thread) const;
+ void AfterJoin(uptr thread, u32 gen);
+ void DetachLocked(uptr thread);
+
+ mutable Mutex mtx_;
+
+ DenseMap<uptr, Data> data_;
+ u32 gen_ = 0;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_ARG_RETVAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
index 278f6defca95..741e0731c415 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
@@ -335,7 +335,7 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
ThreadContextBase *ThreadRegistry::QuarantinePop() {
if (invalid_threads_.size() == 0)
- return 0;
+ return nullptr;
ThreadContextBase *tctx = invalid_threads_.front();
invalid_threads_.pop_front();
return tctx;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
index b13e2dc9e332..252979f1c2ba 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
@@ -26,13 +27,6 @@ struct TlsGetAddrParam {
uptr offset;
};
-// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
-// which has such header.
-struct Glibc_2_19_tls_header {
- uptr size;
- uptr start;
-};
-
// This must be static TLS
__attribute__((tls_model("initial-exec")))
static __thread DTLS dtls;
@@ -108,6 +102,14 @@ static const uptr kDtvOffset = 0x800;
static const uptr kDtvOffset = 0;
#endif
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p);
+
+SANITIZER_WEAK_ATTRIBUTE
+const void *__sanitizer_get_allocated_begin(const void *p);
+}
+
DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
uptr static_tls_begin, uptr static_tls_end) {
if (!common_flags()->intercept_tls_get_addr) return 0;
@@ -125,19 +127,18 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
- VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={0x%zx,0x%zx}\n",
+ VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
tls_size = 0;
- } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
- // We may want to check gnu_get_libc_version().
- Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
- tls_size = header->size;
- tls_beg = header->start;
- VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={0x%zx 0x%zx}\n",
+ } else if (const void *start =
+ __sanitizer_get_allocated_begin((void *)tls_beg)) {
+ tls_beg = (uptr)start;
+ tls_size = __sanitizer_get_allocated_size(start);
+ VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
} else {
VReport(2, "__tls_get_addr: Can't guess glibc version\n");
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
index a599c0bbc75c..0ddab61deb10 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
@@ -12,16 +12,24 @@
// the lack of interface that would tell us about the Dynamic TLS (DTLS).
// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
//
-// The matters get worse because the glibc implementation changed between
-// 2.18 and 2.19:
-// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
-//
-// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// Before 2.25: every DTLS chunk is allocated with __libc_memalign,
// which we intercept and thus know where is the DTLS.
-// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
-// which is an internal function that wraps a mmap call, neither of which
-// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
-// header which we can use.
+//
+// Since 2.25: DTLS chunks are allocated with malloc. We could co-opt
+// the malloc interceptor to keep track of the last allocation, similar
+// to how we handle __libc_memalign; however, this adds some overhead
+// (since malloc, unlike __libc_memalign, is commonly called), and
+// requires care to avoid false negatives for LeakSanitizer.
+// Instead, we rely on our internal allocators - which keep track of all
+// its allocations - to determine if an address points to a malloc
+// allocation.
+//
+// There exists a since-deprecated version of Google's internal glibc fork
+// that used __signal_safe_memalign. DTLS_on_tls_get_addr relied on a
+// heuristic check (is the allocation 16 bytes from the start of a page
+// boundary?), which was sometimes erroneous:
+// https://bugs.chromium.org/p/chromium/issues/detail?id=1275223#c15
+// Since that check has no practical use anymore, we have removed it.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index e0568c9b62d5..06e496523eea 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -362,6 +362,11 @@ bool MprotectReadOnly(uptr addr, uptr size) {
return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection);
}
+bool MprotectReadWrite(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_READWRITE, &old_protection);
+}
+
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
end_aligned = RoundDownTo(end, GetPageSizeCached());
@@ -718,13 +723,24 @@ void ListOfModules::fallbackInit() { clear(); }
// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
-int Atexit(void (*function)(void)) {
+static int queueAtexit(void (*function)(void)) {
atexit_functions.push_back(function);
return 0;
}
+// If Atexit() is being called after RunAtexit() has already been run, it needs
+// to be able to call atexit() directly. Here we use a function ponter to
+// switch out its behaviour.
+// An example of where this is needed is the asan_dynamic runtime on MinGW-w64.
+// On this environment, __asan_init is called during global constructor phase,
+// way after calling the .CRT$XID initializer.
+static int (*volatile queueOrCallAtExit)(void (*)(void)) = &queueAtexit;
+
+int Atexit(void (*function)(void)) { return queueOrCallAtExit(function); }
+
static int RunAtexit() {
TraceLoggingUnregister(g_asan_provider);
+ queueOrCallAtExit = &atexit;
int ret = 0;
for (uptr i = 0; i < atexit_functions.size(); ++i) {
ret |= atexit(atexit_functions[i]);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
index 48c73c4c98ad..639d91a2edae 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
@@ -84,7 +84,7 @@ extern "C" int __dll_thunk_init();
// which isn't a big deal.
#define INTERCEPT_LIBRARY_FUNCTION(name) \
extern "C" void name(); \
- INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+ INTERCEPT_OR_DIE(STRINGIFY(WRAP(name)), name)
// Use these macros for functions that could be called before __dll_thunk_init()
// is executed and don't lead to errors if defined (free, malloc, etc).
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
index bca12d42f90a..2bc0444050f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
@@ -41,6 +41,16 @@ static llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {
return Config;
}
+static llvm::symbolize::ErrorHandler symbolize_error_handler(
+ llvm::raw_string_ostream &OS) {
+ return
+ [&](const llvm::ErrorInfoBase &ErrorInfo, llvm::StringRef ErrorBanner) {
+ OS << ErrorBanner;
+ ErrorInfo.log(OS);
+ OS << '\n';
+ };
+}
+
namespace __sanitizer {
int internal_snprintf(char *buffer, uintptr_t length, const char *format,
...);
@@ -57,8 +67,8 @@ bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
llvm::raw_string_ostream OS(Result);
llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
llvm::symbolize::Request Request{ModuleName, ModuleOffset};
- auto Printer =
- std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
// TODO: it is neccessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
@@ -86,8 +96,8 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
llvm::raw_string_ostream OS(Result);
llvm::symbolize::Request Request{ModuleName, ModuleOffset};
- auto Printer =
- std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
// TODO: it is neccessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index d923b1f9d474..509e3f19fe38 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -50,6 +50,7 @@ _exit U
abort U
access U
aligned_alloc U
+arc4random U
bcmp U
calloc U
catclose U
@@ -74,6 +75,7 @@ fopen U
fork U
fprintf U
fputc U
+fputwc U
free U
freelocale U
fwrite U
@@ -86,6 +88,7 @@ getpwuid U
getrlimit U
gettimeofday U
getuid U
+getwc U
ioctl U
isalnum U
isalpha U
@@ -111,6 +114,7 @@ memcpy U
memmove U
memset U
mkdir U
+modf U
munmap U
newlocale U
perror U
@@ -154,12 +158,15 @@ strncmp U
strncpy U
strrchr U
strsep U
+strtod U
strtod_l U
strtof_l U
strtok_r U
strtol U
strtold_l U
+strtoll U
strtoll_l U
+strtoull U
strtoull_l U
syscall U
sysconf U
@@ -168,6 +175,7 @@ tolower U
toupper U
uname U
ungetc U
+ungetwc U
unlink U
uselocale U
vasprintf U
@@ -179,6 +187,7 @@ waitpid U
wcrtomb U
wcslen U
wcsnrtombs U
+wmemchr U
wmemcpy U
wmemmove U
wmemset U
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
index 63eb325c9b87..315a04f7635d 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -19,6 +19,22 @@
#include "tsd_exclusive.h"
#include "tsd_shared.h"
+// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
+// aliasing the `Config` like:
+//
+// namespace scudo {
+// // The instance of Scudo will be initiated with `Config`.
+// typedef CustomConfig Config;
+// // Aliasing as default configuration to run the tests with this config.
+// typedef CustomConfig DefaultConfig;
+// } // namespace scudo
+//
+// Put them in the header `custom_scudo_config.h` then you will be using the
+// custom configuration and able to run all the tests as well.
+#ifdef SCUDO_USE_CUSTOM_CONFIG
+#include "custom_scudo_config.h"
+#endif
+
namespace scudo {
// The combined allocator uses a structure as a template argument that
@@ -26,185 +42,262 @@ namespace scudo {
// allocator.
//
// struct ExampleConfig {
-// // SizeClasMmap to use with the Primary.
-// using SizeClassMap = DefaultSizeClassMap;
// // Indicates possible support for Memory Tagging.
// static const bool MaySupportMemoryTagging = false;
-// // Defines the Primary allocator to use.
-// typedef SizeClassAllocator64<ExampleConfig> Primary;
-// // Log2 of the size of a size class region, as used by the Primary.
-// static const uptr PrimaryRegionSizeLog = 30U;
-// // Log2 of the size of block group, as used by the Primary. Each group
-// // contains a range of memory addresses, blocks in the range will belong to
-// // the same group. In general, single region may have 1 or 2MB group size.
-// // Multiple regions will have the group size equal to the region size
-// // because the region size is usually smaller than 1 MB.
-// // Smaller value gives fine-grained control of memory usage but the trade
-// // off is that it may take longer time of deallocation.
-// static const uptr PrimaryGroupSizeLog = 20U;
-// // Defines the type and scale of a compact pointer. A compact pointer can
-// // be understood as the offset of a pointer within the region it belongs
-// // to, in increments of a power-of-2 scale.
-// // eg: Ptr = Base + (CompactPtr << Scale).
-// typedef u32 PrimaryCompactPtrT;
-// static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-// // Indicates support for offsetting the start of a region by
-// // a random number of pages. Only used with primary64.
-// static const bool PrimaryEnableRandomOffset = true;
-// // Call map for user memory with at least this size. Only used with
-// // primary64.
-// static const uptr PrimaryMapSizeIncrement = 1UL << 18;
-// // Defines the minimal & maximal release interval that can be set.
-// static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-// static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-// // Defines the type of cache used by the Secondary. Some additional
-// // configuration entries can be necessary depending on the Cache.
-// typedef MapAllocatorNoCache SecondaryCache;
+//
// // Thread-Specific Data Registry used, shared or exclusive.
// template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
+//
+// struct Primary {
+// // SizeClassMap to use with the Primary.
+// using SizeClassMap = DefaultSizeClassMap;
+//
+// // Log2 of the size of a size class region, as used by the Primary.
+// static const uptr RegionSizeLog = 30U;
+//
+// // Log2 of the size of block group, as used by the Primary. Each group
+// // contains a range of memory addresses, blocks in the range will belong
+// // to the same group. In general, single region may have 1 or 2MB group
+// // size. Multiple regions will have the group size equal to the region
+// // size because the region size is usually smaller than 1 MB.
+// // Smaller value gives fine-grained control of memory usage but the
+// // trade-off is that it may take longer time of deallocation.
+// static const uptr GroupSizeLog = 20U;
+//
+// // Defines the type and scale of a compact pointer. A compact pointer can
+// // be understood as the offset of a pointer within the region it belongs
+// // to, in increments of a power-of-2 scale.
+// // eg: Ptr = Base + (CompactPtr << Scale).
+// typedef u32 CompactPtrT;
+// static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+//
+// // Indicates support for offsetting the start of a region by
+// // a random number of pages. Only used with primary64.
+// static const bool EnableRandomOffset = true;
+//
+// // Call map for user memory with at least this size. Only used with
+// // primary64.
+// static const uptr MapSizeIncrement = 1UL << 18;
+//
+// // Defines the minimal & maximal release interval that can be set.
+// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+// };
+// // Defines the type of Primary allocator to use.
+// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+//
+// // Defines the type of cache used by the Secondary. Some additional
+// // configuration entries can be necessary depending on the Cache.
+// struct Secondary {
+// struct Cache {
+// static const u32 EntriesArraySize = 32U;
+// static const u32 QuarantineSize = 0U;
+// static const u32 DefaultMaxEntriesCount = 32U;
+// static const uptr DefaultMaxEntrySize = 1UL << 19;
+// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+// };
+// // Defines the type of Secondary Cache to use.
+// template <typename Config> using CacheT = MapAllocatorCache<Config>;
+// };
+// // Defines the type of Secondary allocator to use.
+// template <typename Config> using SecondaryT = MapAllocator<Config>;
// };
-// Default configurations for various platforms.
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+// Default configurations for various platforms. Note this is only enabled when
+// there's no custom configuration in the build system.
struct DefaultConfig {
- using SizeClassMap = DefaultSizeClassMap;
static const bool MaySupportMemoryTagging = true;
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+ struct Primary {
+ using SizeClassMap = DefaultSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<DefaultConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 32U;
- static const uptr PrimaryGroupSizeLog = 21U;
- typedef uptr PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = 0;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr RegionSizeLog = 32U;
+ static const uptr GroupSizeLog = 21U;
+ typedef uptr CompactPtrT;
+ static const uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
#else
- typedef SizeClassAllocator32<DefaultConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 19U;
- static const uptr PrimaryGroupSizeLog = 19U;
- typedef uptr PrimaryCompactPtrT;
+ static const uptr RegionSizeLog = 19U;
+ static const uptr GroupSizeLog = 19U;
+ typedef uptr CompactPtrT;
+#endif
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
- typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 32U;
- static const u32 SecondaryCacheQuarantineSize = 0U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 32U;
+ static const u32 QuarantineSize = 0U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 1UL << 19;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
- template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
+
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
struct AndroidConfig {
- using SizeClassMap = AndroidSizeClassMap;
static const bool MaySupportMemoryTagging = true;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+ struct Primary {
+ using SizeClassMap = AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<AndroidConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 28U;
- typedef u32 PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const uptr PrimaryGroupSizeLog = 20U;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr RegionSizeLog = 28U;
+ typedef u32 CompactPtrT;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr GroupSizeLog = 20U;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
#else
- typedef SizeClassAllocator32<AndroidConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 18U;
- static const uptr PrimaryGroupSizeLog = 18U;
- typedef uptr PrimaryCompactPtrT;
+ static const uptr RegionSizeLog = 18U;
+ static const uptr GroupSizeLog = 18U;
+ typedef uptr CompactPtrT;
+#endif
+ static const s32 MinReleaseToOsIntervalMs = 1000;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
- typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 256U;
- static const u32 SecondaryCacheQuarantineSize = 32U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 1000;
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 256U;
+ static const u32 QuarantineSize = 32U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 2UL << 20;
+ static const s32 MinReleaseToOsIntervalMs = 0;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
- template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
struct AndroidSvelteConfig {
- using SizeClassMap = SvelteSizeClassMap;
static const bool MaySupportMemoryTagging = false;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
+ struct Primary {
+ using SizeClassMap = SvelteSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 27U;
- typedef u32 PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const uptr PrimaryGroupSizeLog = 18U;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr RegionSizeLog = 27U;
+ typedef u32 CompactPtrT;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr GroupSizeLog = 18U;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
#else
- typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 16U;
- static const uptr PrimaryGroupSizeLog = 16U;
- typedef uptr PrimaryCompactPtrT;
+ static const uptr RegionSizeLog = 16U;
+ static const uptr GroupSizeLog = 16U;
+ typedef uptr CompactPtrT;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+ static const s32 MinReleaseToOsIntervalMs = 1000;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
- typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 16U;
- static const u32 SecondaryCacheQuarantineSize = 32U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 0;
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
+#endif
- template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 16U;
+ static const u32 QuarantineSize = 32U;
+ static const u32 DefaultMaxEntriesCount = 4U;
+ static const uptr DefaultMaxEntrySize = 1UL << 18;
+ static const s32 MinReleaseToOsIntervalMs = 0;
+ static const s32 MaxReleaseToOsIntervalMs = 0;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
- using SizeClassMap = FuchsiaSizeClassMap;
static const bool MaySupportMemoryTagging = false;
-
- typedef SizeClassAllocator64<FuchsiaConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 30U;
- static const uptr PrimaryGroupSizeLog = 21U;
- typedef u32 PrimaryCompactPtrT;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
- typedef MapAllocatorNoCache SecondaryCache;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+
+ struct Primary {
+ using SizeClassMap = FuchsiaSizeClassMap;
+#if SCUDO_RISCV64
+ // Support 39-bit VMA for riscv-64
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 19U;
+#else
+ static const uptr RegionSizeLog = 30U;
+ static const uptr GroupSizeLog = 21U;
+#endif
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
struct TrustyConfig {
- using SizeClassMap = TrustySizeClassMap;
- static const bool MaySupportMemoryTagging = false;
-
- typedef SizeClassAllocator64<TrustyConfig> Primary;
- // Some apps have 1 page of heap total so small regions are necessary.
- static const uptr PrimaryRegionSizeLog = 10U;
- static const uptr PrimaryGroupSizeLog = 10U;
- typedef u32 PrimaryCompactPtrT;
- static const bool PrimaryEnableRandomOffset = false;
- // Trusty is extremely memory-constrained so minimally round up map calls.
- static const uptr PrimaryMapSizeIncrement = 1UL << 4;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
- typedef MapAllocatorNoCache SecondaryCache;
+ static const bool MaySupportMemoryTagging = true;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+
+ struct Primary {
+ using SizeClassMap = TrustySizeClassMap;
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 20U;
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = false;
+ static const uptr MapSizeIncrement = 1UL << 12;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
#endif
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+
#if SCUDO_ANDROID
typedef AndroidConfig Config;
#elif SCUDO_FUCHSIA
@@ -215,6 +308,8 @@ typedef TrustyConfig Config;
typedef DefaultConfig Config;
#endif
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
} // namespace scudo
#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
index 88bada8c2d19..32874a8df642 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
@@ -85,7 +85,7 @@ constexpr uptr OffsetMask = (1UL << 16) - 1;
constexpr uptr ChecksumMask = (1UL << 16) - 1;
constexpr uptr getHeaderSize() {
- return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+ return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
index b6d74ab451b6..b17acc71f892 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
@@ -43,13 +43,14 @@ extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
namespace scudo {
-template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
+template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
class Allocator {
public:
- using PrimaryT = typename Params::Primary;
+ using PrimaryT = typename Config::template PrimaryT<Config>;
+ using SecondaryT = typename Config::template SecondaryT<Config>;
using CacheT = typename PrimaryT::CacheT;
- typedef Allocator<Params, PostInitCallback> ThisT;
- typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+ typedef Allocator<Config, PostInitCallback> ThisT;
+ typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
void callPostInitCallback() {
pthread_once(&PostInitNonce, PostInitCallback);
@@ -71,7 +72,7 @@ public:
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
Cache.deallocate(NewHeader.ClassId, BlockBegin);
@@ -98,7 +99,7 @@ public:
// Reset tag to 0 as this chunk may have been previously used for a tagged
// user allocation.
- if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
+ if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
storeTags(reinterpret_cast<uptr>(Ptr),
reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
@@ -162,10 +163,9 @@ public:
Primary.Options.set(OptionBit::DeallocTypeMismatch);
if (getFlags()->delete_size_mismatch)
Primary.Options.set(OptionBit::DeleteSizeMismatch);
- if (allocatorSupportsMemoryTagging<Params>() &&
+ if (allocatorSupportsMemoryTagging<Config>() &&
systemSupportsMemoryTagging())
Primary.Options.set(OptionBit::UseMemoryTagging);
- Primary.Options.set(OptionBit::UseOddEvenTags);
QuarantineMaxChunkSize =
static_cast<u32>(getFlags()->quarantine_max_chunk_size);
@@ -178,7 +178,7 @@ public:
static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
- initRingBuffer();
+ mapAndInitializeRingBuffer();
}
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
@@ -228,6 +228,7 @@ public:
}
void unmapTestOnly() {
+ unmapRingBuffer();
TSDRegistry.unmapTestOnly(this);
Primary.unmapTestOnly();
Secondary.unmapTestOnly();
@@ -239,6 +240,7 @@ public:
}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+ QuarantineT *getQuarantine() { return &Quarantine; }
// The Cache must be provided zero-initialized.
void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
@@ -249,13 +251,20 @@ public:
// - unlinking the local stats from the global ones (destroying the cache does
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
- Quarantine.drain(&TSD->QuarantineCache,
- QuarantineCallback(*this, TSD->Cache));
- TSD->Cache.destroy(&Stats);
+ Quarantine.drain(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().destroy(&Stats);
}
+ void drainCache(TSD<ThisT> *TSD) {
+ Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().drain();
+ }
+ void drainCaches() { TSDRegistry.drainCaches(this); }
+
ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
- if (!allocatorSupportsMemoryTagging<Params>())
+ if (!allocatorSupportsMemoryTagging<Config>())
return Ptr;
auto UntaggedPtr = untagPointer(Ptr);
if (UntaggedPtr != Ptr)
@@ -267,7 +276,7 @@ public:
}
ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
- if (!allocatorSupportsMemoryTagging<Params>())
+ if (!allocatorSupportsMemoryTagging<Config>())
return Ptr;
return addFixedTag(Ptr, 2);
}
@@ -305,7 +314,7 @@ public:
NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
uptr Alignment = MinAlignment,
- bool ZeroContents = false) {
+ bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
const Options Options = Primary.Options.load();
@@ -342,7 +351,7 @@ public:
// to be sure that there will be an address in the block that will satisfy
// the alignment.
const uptr NeededSize =
- roundUpTo(Size, MinAlignment) +
+ roundUp(Size, MinAlignment) +
((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
@@ -375,23 +384,24 @@ public:
DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- Block = TSD->Cache.allocate(ClassId);
+ Block = TSD->getCache().allocate(ClassId);
// If the allocation failed, the most likely reason with a 32-bit primary
// is the region being full. In that event, retry in each successively
// larger class until it fits. If it fails to fit in the largest class,
// fallback to the Secondary.
if (UNLIKELY(!Block)) {
while (ClassId < SizeClassMap::LargestClassId && !Block)
- Block = TSD->Cache.allocate(++ClassId);
+ Block = TSD->getCache().allocate(++ClassId);
if (!Block)
ClassId = 0;
}
if (UnlockRequired)
TSD->unlock();
}
- if (UNLIKELY(ClassId == 0))
+ if (UNLIKELY(ClassId == 0)) {
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
FillContents);
+ }
if (UNLIKELY(!Block)) {
if (Options.get(OptionBit::MayReturnNull))
@@ -401,7 +411,7 @@ public:
const uptr BlockUptr = reinterpret_cast<uptr>(Block);
const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
- const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
+ const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
void *Ptr = reinterpret_cast<void *>(UserPtr);
void *TaggedPtr = Ptr;
@@ -417,7 +427,7 @@ public:
//
// When memory tagging is enabled, zeroing the contents is done as part of
// setting the tag.
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
uptr PrevUserPtr;
Chunk::UnpackedHeader Header;
const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -460,7 +470,7 @@ public:
PrevUserPtr == UserPtr &&
(TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
- const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
+ const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
@@ -473,8 +483,8 @@ public:
// was freed, it would not have been retagged and thus zeroed, and
// therefore it needs to be zeroed now.
memset(TaggedPtr, 0,
- Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
- archMemoryTagGranuleSize())));
+ Min(Size, roundUp(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
} else if (Size) {
// Clear any stack metadata that may have previously been stored in
// the chunk data.
@@ -499,7 +509,7 @@ public:
} else {
Block = addHeaderTag(Block);
Ptr = addHeaderTag(Ptr);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
}
@@ -666,7 +676,7 @@ public:
(reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
if (ClassId) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
@@ -687,6 +697,8 @@ public:
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
if (LIKELY(NewPtr)) {
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
+ if (UNLIKELY(&__scudo_deallocate_hook))
+ __scudo_deallocate_hook(OldTaggedPtr);
quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
}
return NewPtr;
@@ -695,7 +707,7 @@ public:
// TODO(kostyak): disable() is currently best-effort. There are some small
// windows of time when an allocation could still succeed after
// this function finishes. We will revisit that later.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
#ifdef GWP_ASAN_HOOKS
GuardedAlloc.disable();
@@ -707,7 +719,7 @@ public:
Secondary.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
Secondary.enable();
Primary.enable();
@@ -726,9 +738,7 @@ public:
// sizing purposes.
uptr getStats(char *Buffer, uptr Size) {
ScopedString Str;
- disable();
const uptr Length = getStats(&Str) + 1;
- enable();
if (Length < Size)
Size = Length;
if (Buffer && Size) {
@@ -740,15 +750,15 @@ public:
void printStats() {
ScopedString Str;
- disable();
getStats(&Str);
- enable();
Str.output();
}
- void releaseToOS() {
+ void releaseToOS(ReleaseToOS ReleaseType) {
initThreadMaybe();
- Primary.releaseToOS();
+ if (ReleaseType == ReleaseToOS::ForceAll)
+ drainCaches();
+ Primary.releaseToOS(ReleaseType);
Secondary.releaseToOS();
}
@@ -762,7 +772,7 @@ public:
Base = untagPointer(Base);
const uptr From = Base;
const uptr To = Base + Size;
- bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
+ bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
systemSupportsMemoryTagging();
auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
Arg](uptr Block) {
@@ -784,9 +794,9 @@ public:
}
if (Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
TaggedChunk = untagPointer(TaggedChunk);
- if (useMemoryTagging<Params>(Primary.Options.load()))
+ if (useMemoryTagging<Config>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
Arg);
@@ -885,7 +895,7 @@ public:
}
bool useMemoryTaggingTestOnly() const {
- return useMemoryTagging<Params>(Primary.Options.load());
+ return useMemoryTagging<Config>(Primary.Options.load());
}
void disableMemoryTagging() {
// If we haven't been initialized yet, we need to initialize now in order to
@@ -895,7 +905,7 @@ public:
// callback), which may cause mappings to be created with memory tagging
// enabled.
TSDRegistry.initOnceMaybe(this);
- if (allocatorSupportsMemoryTagging<Params>()) {
+ if (allocatorSupportsMemoryTagging<Config>()) {
Secondary.disableMemoryTagging();
Primary.Options.clear(OptionBit::UseMemoryTagging);
}
@@ -979,7 +989,7 @@ public:
const char *Memory, const char *MemoryTags,
uintptr_t MemoryAddr, size_t MemorySize) {
*ErrorInfo = {};
- if (!allocatorSupportsMemoryTagging<Params>() ||
+ if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
@@ -1007,7 +1017,6 @@ public:
}
private:
- using SecondaryT = MapAllocator<Params>;
typedef typename PrimaryT::SizeClassMap SizeClassMap;
static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
@@ -1019,7 +1028,7 @@ private:
static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
"Minimal alignment must at least cover a chunk header.");
- static_assert(!allocatorSupportsMemoryTagging<Params>() ||
+ static_assert(!allocatorSupportsMemoryTagging<Config>() ||
MinAlignment >= archMemoryTagGranuleSize(),
"");
@@ -1119,14 +1128,15 @@ private:
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(const_cast<void *>(Ptr));
return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
- Chunk::UnpackedHeader *Header, uptr Size) {
+ Chunk::UnpackedHeader *Header,
+ uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
@@ -1139,12 +1149,12 @@ private:
NewHeader.State = Chunk::State::Available;
else
NewHeader.State = Chunk::State::Quarantined;
- NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
+ NewHeader.OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
NewHeader.ClassId &&
!TSDRegistry.getDisableMemInit();
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
if (NewHeader.ClassId) {
@@ -1161,18 +1171,25 @@ private:
}
}
if (BypassQuarantine) {
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
const uptr ClassId = NewHeader.ClassId;
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- TSD->Cache.deallocate(ClassId, BlockBegin);
+ const bool CacheDrained =
+ TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
TSD->unlock();
+ // When we have drained some blocks back to the Primary from TSD, that
+ // implies that we may have the chance to release some pages as well.
+ // Note that in order not to block other thread's accessing the TSD,
+ // release the TSD first then try the page release.
+ if (CacheDrained)
+ Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
} else {
- if (UNLIKELY(useMemoryTagging<Params>(Options)))
+ if (UNLIKELY(useMemoryTagging<Config>(Options)))
storeTags(reinterpret_cast<uptr>(BlockBegin),
reinterpret_cast<uptr>(Ptr));
Secondary.deallocate(Options, BlockBegin);
@@ -1180,8 +1197,8 @@ private:
} else {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- Quarantine.put(&TSD->QuarantineCache,
- QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+ Quarantine.put(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
if (UnlockRequired)
TSD->unlock();
}
@@ -1241,15 +1258,15 @@ private:
void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
uptr BlockEnd) {
- uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
+ uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
uptr RoundNewPtr;
if (RoundOldPtr >= NewPtr) {
// If the allocation is shrinking we just need to set the tag past the end
// of the allocation to 0. See explanation in storeEndMarker() above.
- RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
+ RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
} else {
// Set the memory tag of the region
- // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
+ // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
// to the pointer tag stored in OldPtr.
RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
}
@@ -1483,6 +1500,7 @@ private:
Primary.getStats(Str);
Secondary.getStats(Str);
Quarantine.getStats(Str);
+ TSDRegistry.getStats(Str);
return Str->length();
}
@@ -1497,16 +1515,16 @@ private:
&RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
}
- void initRingBuffer() {
+ void mapAndInitializeRingBuffer() {
u32 AllocationRingBufferSize =
static_cast<u32>(getFlags()->allocation_ring_buffer_size);
if (AllocationRingBufferSize < 1)
return;
- MapPlatformData Data = {};
RawRingBuffer = static_cast<char *>(
map(/*Addr=*/nullptr,
- roundUpTo(ringBufferSizeInBytes(AllocationRingBufferSize), getPageSizeCached()),
- "AllocatorRingBuffer", /*Flags=*/0, &Data));
+ roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
+ getPageSizeCached()),
+ "AllocatorRingBuffer"));
auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
RingBuffer->Size = AllocationRingBufferSize;
static_assert(sizeof(AllocationRingBuffer) %
@@ -1515,6 +1533,11 @@ private:
"invalid alignment");
}
+ void unmapRingBuffer() {
+ unmap(RawRingBuffer, roundUp(getRingBufferSize(), getPageSizeCached()));
+ RawRingBuffer = nullptr;
+ }
+
static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
return sizeof(AllocationRingBuffer) +
AllocationRingBufferSize *
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
index 2ec9a630359a..82e6cf4aee61 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
@@ -27,17 +27,31 @@ template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
return D;
}
-inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+inline constexpr uptr roundUp(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return (X + Boundary - 1) & ~(Boundary - 1);
}
+inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
+ return ((X + Boundary - 1) / Boundary) * Boundary;
+}
-inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+inline constexpr uptr roundDown(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return X & ~(Boundary - 1);
}
+inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
+ return (X / Boundary) * Boundary;
+}
inline constexpr bool isAligned(uptr X, uptr Alignment) {
+ DCHECK(isPowerOfTwo(Alignment));
return (X & (Alignment - 1)) == 0;
}
+inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
+ return X % Alignment == 0;
+}
template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
@@ -49,14 +63,12 @@ template <class T> void Swap(T &A, T &B) {
B = Tmp;
}
-inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
-
inline uptr getMostSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}
-inline uptr roundUpToPowerOfTwo(uptr Size) {
+inline uptr roundUpPowerOfTwo(uptr Size) {
DCHECK(Size);
if (isPowerOfTwo(Size))
return Size;
@@ -135,6 +147,9 @@ const char *getEnv(const char *Name);
uptr GetRSS();
u64 getMonotonicTime();
+// Gets the time faster but with less accuracy. Can call getMonotonicTime
+// if no fast version is available.
+u64 getMonotonicTimeFast();
u32 getThreadID();
@@ -200,6 +215,13 @@ enum class Option : u8 {
MaxTSDsCount, // Number of usable TSDs for the shared registry.
};
+enum class ReleaseToOS : u8 {
+ Normal, // Follow the normal rules for releasing pages to the OS
+ Force, // Force release pages to the OS, but avoid cases that take too long.
+ ForceAll, // Force release every page possible regardless of how long it will
+ // take.
+};
+
constexpr unsigned char PatternFillByte = 0xAB;
enum FillContentsMode {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
index 70e4e714f2cb..0788c4198e53 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -19,6 +19,7 @@
#include <zircon/compiler.h>
#include <zircon/process.h>
#include <zircon/sanitizer.h>
+#include <zircon/status.h>
#include <zircon/syscalls.h>
namespace scudo {
@@ -31,6 +32,16 @@ void NORETURN die() { __builtin_trap(); }
// with ZX_HANDLE_INVALID.
static_assert(ZX_HANDLE_INVALID == 0, "");
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, zx_status_get_string(Status));
+ outputRaw(Error);
+ die();
+}
+
static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
// Only scenario so far.
DCHECK(Data);
@@ -42,7 +53,7 @@ static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
Size, &Data->Vmar, &Data->VmarBase);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmar_allocate", Size);
return nullptr;
}
return reinterpret_cast<void *>(Data->VmarBase);
@@ -73,7 +84,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
if (Status != ZX_OK) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmo_set_size", VmoSize + Size);
return nullptr;
}
} else {
@@ -81,7 +92,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmo_create", Size);
return nullptr;
}
_zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
@@ -99,7 +110,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmar_map", Size);
return nullptr;
}
@@ -120,7 +131,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
}
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmar_op_range", Size);
return nullptr;
}
@@ -145,7 +156,7 @@ void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
const zx_status_t Status =
_zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
if (UNLIKELY(Status != ZX_OK))
- dieOnMapUnmapError();
+ dieOnError(Status, "zx_vmar_unmap", Size);
}
if (Data) {
if (Data->Vmo != ZX_HANDLE_INVALID)
@@ -160,12 +171,15 @@ void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
(Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
- if (_zx_vmar_protect(Data->Vmar, Prot, Addr, Size) != ZX_OK)
- dieOnMapUnmapError();
+ const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size);
+ if (Status != ZX_OK)
+ dieOnError(Status, "zx_vmar_protect", Size);
}
void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data) {
+ // TODO: DCHECK the BaseAddress is consistent with the data in
+ // MapPlatformData.
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
@@ -195,7 +209,10 @@ void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
sync_mutex_unlock(&M);
}
+void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
+
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
index 23bcfba3982a..6c0c521f8d82 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -118,6 +118,10 @@ size_t __scudo_get_ring_buffer_size(void);
#define M_PURGE -101
#endif
+#ifndef M_PURGE_ALL
+#define M_PURGE_ALL -104
+#endif
+
// Tune the allocator's choice of memory tags to make it more likely that
// a certain class of memory errors will be detected. The value argument should
// be one of the M_MEMTAG_TUNING_* constants below.
@@ -155,6 +159,11 @@ size_t __scudo_get_ring_buffer_size(void);
#define M_MEMTAG_TUNING_UAF 1
#endif
+// Print internal stats to the log.
+#ifndef M_LOG_STATS
+#define M_LOG_STATS -205
+#endif
+
} // extern "C"
#endif // SCUDO_INTERFACE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
index 9c5755af5750..e285d8a3d2d2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -11,6 +11,7 @@
#if SCUDO_LINUX
#include "common.h"
+#include "internal_defs.h"
#include "linux.h"
#include "mutex.h"
#include "string_utils.h"
@@ -128,6 +129,10 @@ void HybridMutex::unlock() {
}
}
+void HybridMutex::assertHeldImpl() {
+ CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
+}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
@@ -135,6 +140,17 @@ u64 getMonotonicTime() {
static_cast<u64>(TS.tv_nsec);
}
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
u32 getNumberOfCPUs() {
cpu_set_t CPUs;
// sched_getaffinity can fail for a variety of legitimate reasons (lack of
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
index 6e84158659ae..1095eb5f186d 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -14,6 +14,7 @@
#include "platform.h"
#include "report.h"
#include "stats.h"
+#include "string_utils.h"
namespace scudo {
@@ -34,6 +35,15 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
// u16 will be promoted to int by arithmetic type conversion.
Count = static_cast<u16>(Count + N);
}
+ void appendFromTransferBatch(TransferBatch *B, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ DCHECK_GE(B->Count, N);
+ // Append from the back of `B`.
+ memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ B->Count = static_cast<u16>(B->Count - N);
+ }
void clear() { Count = 0; }
void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
@@ -43,6 +53,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
}
u16 getCount() const { return Count; }
+ bool isEmpty() const { return Count == 0U; }
CompactPtrT get(u16 I) const {
DCHECK_LE(I, Count);
return Batch[I];
@@ -62,17 +73,16 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
struct BatchGroup {
// `Next` is used by IntrusiveList.
BatchGroup *Next;
- // The identifier of each group
- uptr GroupId;
+ // The compact base address of each group
+ uptr CompactPtrGroupBase;
// Cache value of TransferBatch::getMaxCached()
u16 MaxCachedPerBatch;
// Number of blocks pushed into this group. This is an increment-only
// counter.
uptr PushedBlocks;
- // This is used to track how many blocks are pushed since last time we
- // checked `PushedBlocks`. It's useful for page releasing to determine the
- // usage of a BatchGroup.
- uptr PushedBlocksAtLastCheckpoint;
+ // This is used to track how many bytes are not in-use since last time we
+ // tried to release pages.
+ uptr BytesInBGAtLastCheckpoint;
// Blocks are managed by TransferBatch in a list.
SinglyLinkedList<TransferBatch> Batches;
};
@@ -112,13 +122,16 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
return Allocator->decompactPtr(ClassId, CompactP);
}
- void deallocate(uptr ClassId, void *P) {
+ bool deallocate(uptr ClassId, void *P) {
CHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
// We still have to initialize the cache in the event that the first heap
// operation in a thread is a deallocation.
initCacheMaybe(C);
- if (C->Count == C->MaxCount)
+
+ // If the cache is full, drain half of blocks back to the main allocator.
+ const bool NeedToDrainCache = C->Count == C->MaxCount;
+ if (NeedToDrainCache)
drain(C, ClassId);
// See comment in allocate() about memory accesses.
const uptr ClassSize = C->ClassSize;
@@ -126,6 +139,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Stats.sub(StatAllocated, ClassSize);
Stats.add(StatFree, ClassSize);
+
+ return NeedToDrainCache;
}
bool isEmpty() const {
@@ -165,6 +180,29 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
LocalStats &getStats() { return Stats; }
+ void getStats(ScopedString *Str) {
+ bool EmptyCache = true;
+ for (uptr I = 0; I < NumClasses; ++I) {
+ if (PerClassArray[I].Count == 0)
+ continue;
+
+ EmptyCache = false;
+ // The size of BatchClass is set to 0 intentionally. See the comment in
+ // initCache() for more details.
+ const uptr ClassSize = I == BatchClassId
+ ? SizeClassAllocator::getSizeByClassId(I)
+ : PerClassArray[I].ClassSize;
+ // Note that the string utils don't support printing u16 thus we cast it
+ // to a common use type uptr.
+ Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
+ static_cast<uptr>(PerClassArray[I].Count),
+ static_cast<uptr>(PerClassArray[I].MaxCount));
+ }
+
+ if (EmptyCache)
+ Str->append(" No block is cached.\n");
+ }
+
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
new file mode 100644
index 000000000000..115cc34e7060
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
@@ -0,0 +1,84 @@
+//===-- mem_map.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map.h"
+
+#include "common.h"
+
+namespace scudo {
+
+bool MemMapDefault::mapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *MappedAddr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ if (MappedAddr == nullptr)
+ return false;
+ Base = reinterpret_cast<uptr>(MappedAddr);
+ MappedBase = Base;
+ Capacity = Size;
+ return true;
+}
+
+void MemMapDefault::unmapImpl(uptr Addr, uptr Size) {
+ if (Size == Capacity) {
+ Base = MappedBase = Capacity = 0;
+ } else {
+ if (Base == Addr) {
+ Base = Addr + Size;
+ MappedBase = MappedBase == 0 ? Base : Max(MappedBase, Base);
+ }
+ Capacity -= Size;
+ }
+
+ ::scudo::unmap(reinterpret_cast<void *>(Addr), Size, UNMAP_ALL, &Data);
+}
+
+bool MemMapDefault::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *RemappedPtr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ const uptr RemappedAddr = reinterpret_cast<uptr>(RemappedPtr);
+ MappedBase = MappedBase == 0 ? RemappedAddr : Min(MappedBase, RemappedAddr);
+ return RemappedAddr == Addr;
+}
+
+void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ DCHECK_NE(MappedBase, 0U);
+ DCHECK_GE(From, MappedBase);
+ return ::scudo::releasePagesToOS(MappedBase, From - MappedBase, Size, &Data);
+}
+
+void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ return ::scudo::setMemoryPermission(Addr, Size, Flags);
+}
+
+void ReservedMemoryDefault::releaseImpl() {
+ ::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
+}
+
+bool ReservedMemoryDefault::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *Reserved = ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name,
+ Flags | MAP_NOACCESS, &Data);
+ if (Reserved == nullptr)
+ return false;
+
+ Base = reinterpret_cast<uptr>(Reserved);
+ Capacity = Size;
+
+ return true;
+}
+
+ReservedMemoryDefault::MemMapT ReservedMemoryDefault::dispatchImpl(uptr Addr,
+ uptr Size) {
+ ReservedMemoryDefault::MemMapT NewMap(Addr, Size);
+ NewMap.setMapPlatformData(Data);
+ return NewMap;
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
new file mode 100644
index 000000000000..409e4dbbe04b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
@@ -0,0 +1,91 @@
+//===-- mem_map.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_H_
+#define SCUDO_MEM_MAP_H_
+
+#include "mem_map_base.h"
+
+#include "common.h"
+#include "internal_defs.h"
+
+// TODO: This is only used for `MapPlatformData`. Remove these includes when we
+// have all three platform specific `MemMap` and `ReservedMemory`
+// implementations.
+#include "fuchsia.h"
+#include "linux.h"
+#include "trusty.h"
+
+#include "mem_map_fuchsia.h"
+
+namespace scudo {
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class MemMapDefault final : public MemMapBase<MemMapDefault> {
+public:
+ constexpr MemMapDefault() = default;
+ MemMapDefault(uptr Base, uptr Capacity) : Base(Base), Capacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+ void setMapPlatformData(MapPlatformData &NewData) { Data = NewData; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ uptr MappedBase = 0;
+ MapPlatformData Data = {};
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryDefault final
+ : public ReservedMemory<ReservedMemoryDefault, MemMapDefault> {
+public:
+ constexpr ReservedMemoryDefault() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ MapPlatformData Data = {};
+};
+
+#if SCUDO_LINUX
+using ReservedMemoryT = ReservedMemoryDefault;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_FUCHSIA
+using ReservedMemoryT = ReservedMemoryDefault;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_TRUSTY
+using ReservedMemoryT = ReservedMemoryDefault;
+using MemMapT = ReservedMemoryT::MemMapT;
+#else
+#error \
+ "Unsupported platform, please implement the ReservedMemory for your platform!"
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
new file mode 100644
index 000000000000..99ab0cba604f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -0,0 +1,129 @@
+//===-- mem_map_base.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_BASE_H_
+#define SCUDO_MEM_MAP_BASE_H_
+
+#include "common.h"
+
+namespace scudo {
+
+// In Scudo, every memory operation will be fulfilled through a
+// platform-specific `MemMap` instance. The essential APIs are listed in the
+// `MemMapBase` below. This is implemented in CRTP, so for each implementation,
+// it has to implement all of the 'Impl' named functions.
+template <class Derived> class MemMapBase {
+public:
+ constexpr MemMapBase() = default;
+
+ // This is used to map a new set of contiguous pages. Note that the `Addr` is
+ // only a suggestion to the system.
+ bool map(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isAllocated());
+ return invokeImpl(&Derived::mapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to unmap partial/full pages from the beginning or the end.
+ // I.e., the result pages are expected to be still contiguous.
+ void unmap(uptr Addr, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
+ invokeImpl(&Derived::unmapImpl, Addr, Size);
+ }
+
+ // This is used to remap a mapped range (either from map() or dispatched from
+ // ReservedMemory). For example, we have reserved several pages and then we
+ // want to remap them with different accessibility.
+ bool remap(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to update the pages' access permission. For example, mark
+ // pages as no read/write permission.
+ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::setMemoryPermissionImpl, Addr, Size, Flags);
+ }
+
+ // Suggest releasing a set of contiguous physical pages back to the OS. Note
+ // that only physical pages are supposed to be released. Any release of
+ // virtual pages may lead to undefined behavior.
+ void releasePagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releasePagesToOSImpl, From, Size);
+ }
+ // This is similar to the above one except that any subsequent access to the
+ // released pages will return with zero-filled pages.
+ void releaseAndZeroPagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releaseAndZeroPagesToOSImpl, From, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isAllocated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+// `ReservedMemory` is a special memory handle which can be viewed as a page
+// allocator. `ReservedMemory` will reserve a contiguous pages and the later
+// page request can be fulfilled at the designated address. This is used when
+// we want to ensure the virtual address of the MemMap will be in a known range.
+// This is implemented in CRTP, so for each
+// implementation, it has to implement all of the 'Impl' named functions.
+template <class Derived, typename MemMapTy> class ReservedMemory {
+public:
+ using MemMapT = MemMapTy;
+ constexpr ReservedMemory() = default;
+
+ // Reserve a chunk of memory at a suggested address.
+ bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isCreated());
+ return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
+ }
+
+ // Release the entire reserved memory.
+ void release() {
+ DCHECK(isCreated());
+ invokeImpl(&Derived::releaseImpl);
+ }
+
+ // Dispatch a sub-range of reserved memory. Note that any fragmentation of
+ // the reserved pages is managed by each implementation.
+ MemMapT dispatch(uptr Addr, uptr Size) {
+ DCHECK(isCreated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::dispatchImpl, Addr, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isCreated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_BASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
new file mode 100644
index 000000000000..9ace1fef7ad4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -0,0 +1,252 @@
+//===-- mem_map_fuchsia.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map_fuchsia.h"
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "string_utils.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, _zx_status_get_string(Status));
+ outputRaw(Error);
+ die();
+}
+
+static void setVmoName(zx_handle_t Vmo, const char *Name) {
+ size_t Len = strlen(Name);
+ DCHECK_LT(Len, ZX_MAX_NAME_LEN);
+ zx_status_t Status = _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, Len);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+// Returns the (cached) base address of the root VMAR.
+static uptr getRootVmarBase() {
+ static atomic_uptr CachedResult = {0};
+
+ uptr Result = atomic_load_relaxed(&CachedResult);
+ if (UNLIKELY(!Result)) {
+ zx_info_vmar_t VmarInfo;
+ zx_status_t Status =
+ _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &VmarInfo,
+ sizeof(VmarInfo), nullptr, nullptr);
+ CHECK_EQ(Status, ZX_OK);
+ CHECK_NE(VmarInfo.base, 0);
+
+ atomic_store_relaxed(&CachedResult, VmarInfo.base);
+ Result = VmarInfo.base;
+ }
+
+ return Result;
+}
+
+// Lazily creates and then always returns the same zero-sized VMO.
+static zx_handle_t getPlaceholderVmo() {
+ static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
+
+ zx_handle_t Vmo = atomic_load_relaxed(&StoredVmo);
+ if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
+ // Create a zero-sized placeholder VMO.
+ zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", 0);
+
+ setVmoName(Vmo, "scudo:reserved");
+
+ // Atomically store its handle. If some other thread wins the race, use its
+ // handle and discard ours.
+ zx_handle_t OldValue =
+ atomic_compare_exchange(&StoredVmo, ZX_HANDLE_INVALID, Vmo);
+ if (OldValue != ZX_HANDLE_INVALID) {
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ Vmo = OldValue;
+ }
+ }
+
+ return Vmo;
+}
+
+MemMapFuchsia::MemMapFuchsia(uptr Base, uptr Capacity)
+ : MapAddr(Base), WindowBase(Base), WindowSize(Capacity) {
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Capacity, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", Capacity);
+}
+
+bool MemMapFuchsia::mapImpl(UNUSED uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmo_create", Size);
+ return false;
+ }
+
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ // Map it.
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, 0, Vmo, 0, Size, &MapAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ return false;
+ }
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ WindowBase = MapAddr;
+ WindowSize = Size;
+ return true;
+}
+
+void MemMapFuchsia::unmapImpl(uptr Addr, uptr Size) {
+ zx_status_t Status;
+
+ if (Size == WindowSize) {
+ // NOTE: Closing first and then unmapping seems slightly faster than doing
+ // the same operations in the opposite order.
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = WindowBase = WindowSize = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ } else {
+ // Unmap the subrange.
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ // Decommit the pages that we just unmapped.
+ Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, Addr - MapAddr, Size,
+ nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+
+ if (Addr == WindowBase)
+ WindowBase += Size;
+ WindowSize -= Size;
+ }
+}
+
+bool MemMapFuchsia::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // NOTE: This will rename the *whole* VMO, not only the requested portion of
+ // it. But we cannot do better than this given the MemMap API. In practice,
+ // the upper layers of Scudo always pass the same Name for a given MemMap.
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ uptr MappedAddr;
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC_OVERWRITE;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, Addr - getRootVmarBase(),
+ Vmo, Addr - MapAddr, Size, &MappedAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return false;
+ }
+ DCHECK_EQ(Addr, MappedAddr);
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ return true;
+}
+
+void MemMapFuchsia::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ zx_status_t Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, From - MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ zx_vm_option_t MapFlags = 0;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_protect(_zx_vmar_root_self(), MapFlags, Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
+ UNUSED const char *Name, uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // Reserve memory by mapping the placeholder VMO without any permission.
+ zx_status_t Status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_ALLOW_FAULTS, 0,
+ getPlaceholderVmo(), 0, Size, &Base);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return false;
+ }
+
+ Capacity = Size;
+ return true;
+}
+
+void ReservedMemoryFuchsia::releaseImpl() {
+ zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(), Base, Capacity);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+ReservedMemoryFuchsia::MemMapT ReservedMemoryFuchsia::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryFuchsia::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
new file mode 100644
index 000000000000..2e66f89cfca5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
@@ -0,0 +1,75 @@
+//===-- mem_map_fuchsia.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_FUCHSIA_H_
+#define SCUDO_MEM_MAP_FUCHSIA_H_
+
+#include "mem_map_base.h"
+
+#if SCUDO_FUCHSIA
+
+#include <stdint.h>
+#include <zircon/types.h>
+
+namespace scudo {
+
+class MemMapFuchsia final : public MemMapBase<MemMapFuchsia> {
+public:
+ constexpr MemMapFuchsia() = default;
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return WindowBase; }
+ uptr getCapacityImpl() { return WindowSize; }
+
+private:
+ friend class ReservedMemoryFuchsia;
+
+ // Used by ReservedMemoryFuchsia::dispatch.
+ MemMapFuchsia(uptr Base, uptr Capacity);
+
+ // Virtual memory address corresponding to VMO offset 0.
+ uptr MapAddr = 0;
+
+ // Virtual memory base address and size of the VMO subrange that is still in
+ // use. unmapImpl() can shrink this range, either at the beginning or at the
+ // end.
+ uptr WindowBase = 0;
+ uptr WindowSize = 0;
+
+ zx_handle_t Vmo = ZX_HANDLE_INVALID;
+};
+
+class ReservedMemoryFuchsia final
+ : public ReservedMemory<ReservedMemoryFuchsia, MemMapFuchsia> {
+public:
+ constexpr ReservedMemoryFuchsia() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_MEM_MAP_FUCHSIA_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
index 7f14a30fee12..aaed2192ad75 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
@@ -11,7 +11,7 @@
#include "internal_defs.h"
-#if SCUDO_LINUX
+#if SCUDO_CAN_USE_MTE
#include <sys/auxv.h>
#include <sys/prctl.h>
#endif
@@ -25,7 +25,7 @@ namespace scudo {
// tagging. Not all operating systems enable TBI, so we only claim architectural
// support for memory tagging if the operating system enables TBI.
// HWASan uses the top byte for its own purpose and Scudo should not touch it.
-#if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI) && \
+#if SCUDO_CAN_USE_MTE && !defined(SCUDO_DISABLE_TBI) && \
!__has_feature(hwaddress_sanitizer)
inline constexpr bool archSupportsMemoryTagging() { return true; }
#else
@@ -60,7 +60,7 @@ inline NORETURN uint8_t extractTag(uptr Ptr) {
#if __clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)
-#if SCUDO_LINUX
+#if SCUDO_CAN_USE_MTE
inline bool systemSupportsMemoryTagging() {
#ifndef HWCAP2_MTE
@@ -106,7 +106,7 @@ inline void enableSystemMemoryTaggingTestOnly() {
0, 0, 0);
}
-#else // !SCUDO_LINUX
+#else // !SCUDO_CAN_USE_MTE
inline bool systemSupportsMemoryTagging() { return false; }
@@ -118,7 +118,7 @@ inline NORETURN void enableSystemMemoryTaggingTestOnly() {
UNREACHABLE("memory tagging not supported");
}
-#endif // SCUDO_LINUX
+#endif // SCUDO_CAN_USE_MTE
class ScopedDisableMemoryTagChecks {
uptr PrevTCO;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
index c8504c040914..05340de3e12d 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
@@ -11,6 +11,7 @@
#include "atomic_helpers.h"
#include "common.h"
+#include "thread_annotations.h"
#include <string.h>
@@ -20,10 +21,10 @@
namespace scudo {
-class HybridMutex {
+class CAPABILITY("mutex") HybridMutex {
public:
- bool tryLock();
- NOINLINE void lock() {
+ bool tryLock() TRY_ACQUIRE(true);
+ NOINLINE void lock() ACQUIRE() {
if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
@@ -40,9 +41,20 @@ public:
}
lockSlow();
}
- void unlock();
+ void unlock() RELEASE();
+
+ // TODO(chiahungduan): In general, we may want to assert the owner of lock as
+ // well. Given the current uses of HybridMutex, it's acceptable without
+ // asserting the owner. Re-evaluate this when we have certain scenarios which
+ // requires a more fine-grained lock granularity.
+ ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
+ if (SCUDO_DEBUG)
+ assertHeldImpl();
+ }
private:
+ void assertHeldImpl();
+
static constexpr u8 NumberOfTries = 8U;
static constexpr u8 NumberOfYields = 8U;
@@ -52,13 +64,13 @@ private:
sync_mutex_t M = {};
#endif
- void lockSlow();
+ void lockSlow() ACQUIRE();
};
-class ScopedLock {
+class SCOPED_CAPABILITY ScopedLock {
public:
- explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
- ~ScopedLock() { Mutex.unlock(); }
+ explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() RELEASE() { Mutex.unlock(); }
private:
HybridMutex &Mutex;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
index db4217ddab9f..7c7024ff570e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
@@ -37,6 +37,12 @@
#define SCUDO_TRUSTY 0
#endif
+#if defined(__riscv) && (__riscv_xlen == 64)
+#define SCUDO_RISCV64 1
+#else
+#define SCUDO_RISCV64 0
+#endif
+
#if defined(__LP64__)
#define SCUDO_WORDSIZE 64U
#else
@@ -53,6 +59,10 @@
#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
#endif
+#ifndef SCUDO_CAN_USE_MTE
+#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
+#endif
+
#ifndef SCUDO_MIN_ALIGNMENT_LOG
// We force malloc-type functions to be aligned to std::max_align_t, but there
// is no reason why the minimum alignment for all other functions can't be 8
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
index a3d908cee9e5..1d8a34ec65d6 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
@@ -18,6 +18,7 @@
#include "report.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -41,13 +42,14 @@ namespace scudo {
template <typename Config> class SizeClassAllocator32 {
public:
- typedef typename Config::PrimaryCompactPtrT CompactPtrT;
- typedef typename Config::SizeClassMap SizeClassMap;
- static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
+ typedef typename Config::Primary::CompactPtrT CompactPtrT;
+ typedef typename Config::Primary::SizeClassMap SizeClassMap;
+ static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
- static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
+ static_assert((1UL << Config::Primary::RegionSizeLog) >=
+ SizeClassMap::MaxSize,
"");
typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@@ -62,7 +64,7 @@ public:
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
if (SCUDO_FUCHSIA)
reportError("SizeClassAllocator32 is not supported on Fuchsia");
@@ -72,7 +74,7 @@ public:
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
PossibleRegions.init();
u32 Seed;
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(
Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
@@ -87,24 +89,77 @@ public:
}
void unmapTestOnly() {
- while (NumberOfStashedRegions > 0)
- unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
- RegionSize);
+ {
+ ScopedLock L(RegionsStashMutex);
+ while (NumberOfStashedRegions > 0) {
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ }
+ }
+
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
*Sci = {};
}
- for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
+
+ ScopedLock L(ByteMapMutex);
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
}
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ const uptr BlockSize = getSizeByClassId(I);
+ DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
+ DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
+ }
+
+ SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Sci->AllocatedUser / BlockSize);
+ const uptr BlocksInUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
return static_cast<CompactPtrT>(Ptr);
}
@@ -113,23 +168,37 @@ public:
return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
}
- uptr compactPtrGroup(CompactPtrT CompactPtr) {
- return CompactPtr >> GroupSizeLog;
+ uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
+ const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
+ return CompactPtr & ~Mask;
+ }
+
+ uptr decompactGroupBase(uptr CompactPtrGroupBase) {
+ return CompactPtrGroupBase;
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
+ }
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
}
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatch *B = popBatchImpl(C, ClassId);
+ TransferBatch *B = popBatchImpl(C, ClassId, Sci);
if (UNLIKELY(!B)) {
if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return nullptr;
- B = popBatchImpl(C, ClassId);
+ B = popBatchImpl(C, ClassId, Sci);
// if `populateFreeList` succeeded, we are supposed to get free blocks.
DCHECK_NE(B, nullptr);
}
- Sci->Stats.PoppedBlocks += B->getCount();
return B;
}
@@ -141,16 +210,7 @@ public:
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
if (ClassId == SizeClassMap::BatchClassId) {
ScopedLock L(Sci->Mutex);
- // Constructing a batch group in the free list will use two blocks in
- // BatchClassId. If we are pushing BatchClassId blocks, we will use the
- // blocks in the array directly (can't delegate local cache which will
- // cause a recursive allocation). However, The number of free blocks may
- // be less than two. Therefore, populate the free list before inserting
- // the blocks.
- if (Size == 1 && !populateFreeList(C, ClassId, Sci))
- return;
- pushBlocksImpl(C, ClassId, Array, Size);
- Sci->Stats.PushedBlocks += Size;
+ pushBatchClassBlocks(Sci, Array, Size);
return;
}
@@ -161,11 +221,12 @@ public:
// together.
bool SameGroup = true;
for (u32 I = 1; I < Size; ++I) {
- if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
SameGroup = false;
CompactPtrT Cur = Array[I];
u32 J = I;
- while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
+ while (J > 0 &&
+ compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
Array[J] = Array[J - 1];
--J;
}
@@ -173,14 +234,10 @@ public:
}
ScopedLock L(Sci->Mutex);
- pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
-
- Sci->Stats.PushedBlocks += Size;
- if (ClassId != SizeClassMap::BatchClassId)
- releaseToOSMaybe(Sci, ClassId);
+ pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
@@ -189,11 +246,11 @@ public:
}
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
RegionsStashMutex.lock();
- PossibleRegions.disable();
+ ByteMapMutex.lock();
}
- void enable() {
- PossibleRegions.enable();
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ ByteMapMutex.unlock();
RegionsStashMutex.unlock();
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
for (uptr I = 0; I < NumClasses; I++) {
@@ -207,12 +264,20 @@ public:
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator32. We may consider locking each region on demand
+ // only.
+ Sci->Mutex.assertHeld();
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
}
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+
+ // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
+ ByteMapMutex.assertHeld();
+
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
if (PossibleRegions[I] &&
(PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
@@ -221,6 +286,7 @@ public:
for (uptr Block = From; Block < To; Block += BlockSize)
Callback(Block);
}
+ }
}
void getStats(ScopedString *Str) {
@@ -230,22 +296,26 @@ public:
uptr PushedBlocks = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
TotalMapped += Sci->AllocatedUser;
- PoppedBlocks += Sci->Stats.PoppedBlocks;
- PushedBlocks += Sci->Stats.PushedBlocks;
+ PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Sci->FreeListInfo.PushedBlocks;
}
Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
"remains %zu\n",
TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
- for (uptr I = 0; I < NumClasses; I++)
- getStats(Str, I, 0);
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getStats(Str, I, Sci);
+ }
}
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
- Config::PrimaryMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(Min(static_cast<s32>(Value),
+ Config::Primary::MaxReleaseToOsIntervalMs),
+ Config::Primary::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
@@ -253,14 +323,22 @@ public:
return true;
}
- uptr releaseToOS() {
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ // TODO: Once we have separate locks like primary64, we may consider using
+ // tryLock() as well.
+ ScopedLock L(Sci->Mutex);
+ return releaseToOSMaybe(Sci, ClassId, ReleaseType);
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
- TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
+ TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
}
return TotalReleasedBytes;
}
@@ -277,42 +355,42 @@ public:
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+ static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
static const uptr NumRegions =
- SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
+ SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
typedef FlatByteMap<NumRegions> ByteMap;
- struct SizeClassStats {
- uptr PoppedBlocks;
- uptr PushedBlocks;
- };
-
struct ReleaseToOsInfo {
- uptr PushedBlocksAtLastRelease;
+ uptr BytesInFreeListAtLastCheckpoint;
uptr RangesReleased;
uptr LastReleasedBytes;
u64 LastReleaseAtNs;
};
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroup> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
- SinglyLinkedList<BatchGroup> FreeList;
- uptr CurrentRegion;
- uptr CurrentRegionAllocated;
- SizeClassStats Stats;
+ BlocksInfo FreeListInfo GUARDED_BY(Mutex);
+ uptr CurrentRegion GUARDED_BY(Mutex);
+ uptr CurrentRegionAllocated GUARDED_BY(Mutex);
u32 RandState;
- uptr AllocatedUser;
+ uptr AllocatedUser GUARDED_BY(Mutex);
// Lowest & highest region index allocated for this size class, to avoid
// looping through the whole NumRegions.
- uptr MinRegionIndex;
- uptr MaxRegionIndex;
- ReleaseToOsInfo ReleaseInfo;
+ uptr MinRegionIndex GUARDED_BY(Mutex);
+ uptr MaxRegionIndex GUARDED_BY(Mutex);
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr computeRegionId(uptr Mem) {
- const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
+ const uptr Id = Mem >> Config::Primary::RegionSizeLog;
CHECK_LT(Id, NumRegions);
return Id;
}
@@ -332,17 +410,22 @@ private:
else
MapSize = RegionSize;
} else {
- Region = roundUpTo(MapBase, RegionSize);
+ Region = roundUp(MapBase, RegionSize);
unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
MapSize = RegionSize;
}
const uptr End = Region + MapSize;
if (End != MapEnd)
unmap(reinterpret_cast<void *>(End), MapEnd - End);
+
+ DCHECK_EQ(Region % RegionSize, 0U);
+ static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
+ "Memory group should be the same size as Region");
+
return Region;
}
- uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
@@ -359,6 +442,7 @@ private:
Sci->MinRegionIndex = RegionIndex;
if (RegionIndex > Sci->MaxRegionIndex)
Sci->MaxRegionIndex = RegionIndex;
+ ScopedLock L(ByteMapMutex);
PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
@@ -369,15 +453,125 @@ private:
return &SizeClassInfoArray[ClassId];
}
+ void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // The unit of each popBatch() request is entire TransferBatch. Return
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroup *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
+ getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Sci->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatch *TB = reinterpret_cast<TransferBatch *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatch *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatch *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ }
// Push the blocks to their batch group. The layout will be like,
//
- // FreeList - > BG -> BG -> BG
- // | | |
- // v v v
- // TB TB TB
- // |
- // v
- // TB
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
//
// Each BlockGroup(BG) will associate with unique group id and the free blocks
// are managed by a list of TransferBatch(TB). To reduce the time of inserting
@@ -386,44 +580,23 @@ private:
// Use `SameGroup=true` to indicate that all blocks in the array are from the
// same group then we will skip checking the group id of each block.
//
- // Note that this aims to have a better management of dirty pages, i.e., the
- // RSS usage won't grow indefinitely. There's an exception that we may not put
- // a block to its associated group. While populating new blocks, we may have
- // blocks cross different groups. However, most cases will fall into same
- // group and they are supposed to be popped soon. In that case, it's not worth
- // sorting the array with the almost-sorted property. Therefore, we use
- // `SameGroup=true` instead.
- //
// The region mutex needs to be held while calling this method.
- void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
- bool SameGroup = false) {
+ void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
DCHECK_GT(Size, 0U);
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- auto CreateGroup = [&](uptr GroupId) {
- BatchGroup *BG = nullptr;
- TransferBatch *TB = nullptr;
- if (ClassId == SizeClassMap::BatchClassId) {
- DCHECK_GE(Size, 2U);
- BG = reinterpret_cast<BatchGroup *>(
- decompactPtr(ClassId, Array[Size - 1]));
- BG->Batches.clear();
-
- TB = reinterpret_cast<TransferBatch *>(
- decompactPtr(ClassId, Array[Size - 2]));
- TB->clear();
- } else {
- BG = C->createGroup();
- BG->Batches.clear();
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroup *BG = C->createGroup();
+ BG->Batches.clear();
+ TransferBatch *TB = C->createBatch(ClassId, nullptr);
+ TB->clear();
- TB = C->createBatch(ClassId, nullptr);
- TB->clear();
- }
-
- BG->GroupId = GroupId;
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
- BG->PushedBlocksAtLastCheckpoint = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
TransferBatch::getMaxCached(getSizeByClassId(ClassId));
@@ -456,38 +629,34 @@ private:
BG->PushedBlocks += Size;
};
- BatchGroup *Cur = Sci->FreeList.front();
-
- if (ClassId == SizeClassMap::BatchClassId) {
- if (Cur == nullptr) {
- // Don't need to classify BatchClassId.
- Cur = CreateGroup(/*GroupId=*/0);
- Sci->FreeList.push_front(Cur);
- }
- InsertBlocks(Cur, Array, Size);
- return;
- }
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroup *Cur = Sci->FreeListInfo.BlockList.front();
// In the following, `Cur` always points to the BatchGroup for blocks that
// will be pushed next. `Prev` is the element right before `Cur`.
BatchGroup *Prev = nullptr;
- while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) {
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
- if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) {
- Cur = CreateGroup(compactPtrGroup(Array[0]));
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[0]));
if (Prev == nullptr)
- Sci->FreeList.push_front(Cur);
+ Sci->FreeListInfo.BlockList.push_front(Cur);
else
- Sci->FreeList.insert(Prev, Cur);
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
}
// All the blocks are from the same group, just push without checking group
// id.
if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
+
InsertBlocks(Cur, Array, Size);
return;
}
@@ -496,19 +665,21 @@ private:
// push them to their group together.
u32 Count = 1;
for (u32 I = 1; I < Size; ++I) {
- if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
- DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId);
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
+ DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array + I - Count, Count);
- while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) {
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
- if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) {
- Cur = CreateGroup(compactPtrGroup(Array[I]));
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[I]));
DCHECK_NE(Prev, nullptr);
- Sci->FreeList.insert(Prev, Cur);
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
}
Count = 1;
@@ -524,13 +695,28 @@ private:
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- if (Sci->FreeList.empty())
+ TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
+ if (Sci->FreeListInfo.BlockList.empty())
return nullptr;
- SinglyLinkedList<TransferBatch> &Batches = Sci->FreeList.front()->Batches;
- DCHECK(!Batches.empty());
+ SinglyLinkedList<TransferBatch> &Batches =
+ Sci->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
+ TB->clear();
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ Sci->FreeListInfo.PoppedBlocks += 1;
+ return TB;
+ }
TransferBatch *B = Batches.front();
Batches.pop_front();
@@ -538,8 +724,8 @@ private:
DCHECK_GT(B->getCount(), 0U);
if (Batches.empty()) {
- BatchGroup *BG = Sci->FreeList.front();
- Sci->FreeList.pop_front();
+ BatchGroup *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
// allocating. Note that block used by constructing BatchGroup is recorded
@@ -550,10 +736,12 @@ private:
C->deallocate(SizeClassMap::BatchClassId, BG);
}
+ Sci->FreeListInfo.PoppedBlocks += B->getCount();
return B;
}
- NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) {
+ NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
uptr Region;
uptr Offset;
// If the size-class currently has a region associated to it, use it. The
@@ -598,20 +786,35 @@ private:
uptr P = Region + Offset;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
- // No need to shuffle the batches size class.
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
- for (u32 I = 0; I < NumberOfBlocks;) {
- // `MaxCount` is u16 so the result will also fit in u16.
- const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I));
- // Note that the N blocks here may have different group ids. Given that
- // it only happens when it crosses the group size boundary. Instead of
- // sorting them, treat them as same group here to avoid sorting the
- // almost-sorted blocks.
- pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
- I += N;
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroupBase(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
}
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeList` is the internal interface so we should set
+ // the values back to avoid incorrectly setting the stats.
+ Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
+
const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
@@ -629,55 +832,98 @@ private:
return true;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
if (Sci->AllocatedUser == 0)
return;
- const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
- const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
+ uptr PushedBytesDelta = 0;
+ if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
- "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
+ "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
+ "latest pushed bytes: %6zuK\n",
ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
- Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
- AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
+ Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
+ InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
+ Sci->ReleaseInfo.LastReleasedBytes >> 10,
+ PushedBytesDelta >> 10);
}
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
- bool Force = false) {
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
- DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
- (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
- if (BytesInFreeList < PageSize)
- return 0; // No chance to release anything.
- const uptr BytesPushed =
- (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
- BlockSize;
- if (BytesPushed < PageSize)
- return 0; // Nothing new to release.
-
- const bool CheckDensity = BlockSize < PageSize / 16U;
+ (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
+ BlockSize;
+
+ if (UNLIKELY(BytesInFreeList == 0))
+ return 0;
+
+ if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ const uptr PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize && ReleaseType != ReleaseToOS::ForceAll)
+ return 0;
+
+ const bool CheckDensity =
+ isSmallBlock(BlockSize) && ReleaseType != ReleaseToOS::ForceAll;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
- if (CheckDensity) {
- if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
+ if (CheckDensity && ReleaseType == ReleaseToOS::Normal)
+ if (PushedBytesDelta < Sci->AllocatedUser / 16U)
return 0;
- }
- if (!Force) {
+ if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
return 0;
- if (Sci->ReleaseInfo.LastReleaseAtNs +
- static_cast<u64>(IntervalMs) * 1000000 >
- getMonotonicTime()) {
- return 0; // Memory was returned recently.
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Sci->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return 0;
+ }
}
- }
+ } // if (ReleaseType == ReleaseToOS::Normal)
const uptr First = Sci->MinRegionIndex;
const uptr Last = Sci->MaxRegionIndex;
@@ -687,24 +933,24 @@ private:
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
const uptr GroupSize = (1U << GroupSizeLog);
- const uptr CurRegionGroupId =
- compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));
+ const uptr CurGroupBase =
+ compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
ReleaseRecorder Recorder(Base);
- PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+ PageReleaseContext Context(BlockSize, NumberOfRegions,
+ /*ReleaseSize=*/RegionSize);
auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
};
- for (BatchGroup &BG : Sci->FreeList) {
- const uptr PushedBytesDelta =
- BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
- if (PushedBytesDelta * BlockSize < PageSize)
- continue;
-
- uptr AllocatedGroupSize = BG.GroupId == CurRegionGroupId
+ for (BatchGroup &BG : Sci->FreeListInfo.BlockList) {
+ const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
+ // The `GroupSize` may not be divided by `BlockSize`, which means there is
+ // an unused space at the end of Region. Exclude that space to avoid
+ // unused page map entry.
+ uptr AllocatedGroupSize = GroupBase == CurGroupBase
? Sci->CurrentRegionAllocated
- : GroupSize;
+ : roundDownSlow(GroupSize, BlockSize);
if (AllocatedGroupSize == 0)
continue;
@@ -713,6 +959,16 @@ private:
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.front()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;
+
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+ continue;
+ }
+ const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
+ if (ReleaseType != ReleaseToOS::ForceAll && PushedBytesDelta < PageSize)
+ continue;
+
// Given the randomness property, we try to release the pages only if the
// bytes used by free blocks exceed certain proportion of allocated
// spaces.
@@ -721,42 +977,70 @@ private:
continue;
}
- BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
- // Note that we don't always visit blocks in each BatchGroup so that we
- // may miss the chance of releasing certain pages that cross BatchGroups.
- Context.markFreeBlocks(BG.Batches, DecompactPtr, Base);
+ // TODO: Consider updating this after page release if `ReleaseRecorder`
+ // can tell the releasd bytes in each group.
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+
+ const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ const uptr RegionIndex = (GroupBase - Base) / RegionSize;
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches)
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
+
+ const uptr To = GroupBase + AllocatedGroupSize;
+ Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
+ AllocatedGroupSize);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
+ RegionIndex, AllocatedGroupSize,
+ /*MayContainLastBlockInRegion=*/true);
+ }
+
+ // We may not be able to do the page release In a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.hasBlockMarked()))
+ return 0;
}
if (!Context.hasBlockMarked())
return 0;
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
- Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
- Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
return TotalReleasedBytes;
}
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
+ HybridMutex ByteMapMutex;
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
- ByteMap PossibleRegions = {};
+ ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
atomic_s32 ReleaseToOsIntervalMs = {};
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
- uptr NumberOfStashedRegions = 0;
- uptr RegionsStash[MaxStashedRegions] = {};
+ uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
+ uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
index b653bc802022..fd7a1f9e80cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
@@ -13,11 +13,13 @@
#include "common.h"
#include "list.h"
#include "local_cache.h"
+#include "mem_map.h"
#include "memtag.h"
#include "options.h"
#include "release.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -43,10 +45,11 @@ namespace scudo {
template <typename Config> class SizeClassAllocator64 {
public:
- typedef typename Config::PrimaryCompactPtrT CompactPtrT;
- static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
- static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
- typedef typename Config::SizeClassMap SizeClassMap;
+ typedef typename Config::Primary::CompactPtrT CompactPtrT;
+ typedef typename Config::Primary::SizeClassMap SizeClassMap;
+ static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
+ static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
+ static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
@@ -54,62 +57,202 @@ public:
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
+ ? roundUp(sizeof(TransferBatch), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
- DCHECK_EQ(PrimaryBase, 0U);
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1U << GroupSizeLog);
+ const uptr PagesInGroup = GroupSize / PageSize;
+ const uptr MinSizeClass = getSizeByClassId(1);
+ // When trying to release pages back to memory, visiting smaller size
+ // classes is expensive. Therefore, we only try to release smaller size
+ // classes when the amount of free blocks goes over a certain threshold (See
+ // the comment in releaseToOSMaybe() for more details). For example, for
+ // size class 32, we only do the release when the size of free blocks is
+ // greater than 97% of pages in a group. However, this may introduce another
+ // issue that if the number of free blocks is bouncing between 97% ~ 100%.
+ // Which means we may try many page releases but only release very few of
+ // them (less than 3% in a group). Even though we have
+ // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
+ // calls but it will be better to have another guard to mitigate this issue.
+ //
+ // Here we add another constraint on the minimum size requirement. The
+ // constraint is determined by the size of in-use blocks in the minimal size
+ // class. Take size class 32 as an example,
+ //
+ // +- one memory group -+
+ // +----------------------+------+
+ // | 97% of free blocks | |
+ // +----------------------+------+
+ // \ /
+ // 3% in-use blocks
+ //
+ // * The release size threshold is 97%.
+ //
+ // The 3% size in a group is about 7 pages. For two consecutive
+ // releaseToOSMaybe(), we require the difference between `PushedBlocks`
+ // should be greater than 7 pages. This mitigates the page releasing
+ // thrashing which is caused by memory usage bouncing around the threshold.
+ // The smallest size class takes longest time to do the page release so we
+ // use its size of in-use blocks as a heuristic.
+ SmallerBlockReleasePageDelta =
+ PagesInGroup * (1 + MinSizeClass / 16U) / 100;
+
// Reserve the space required for the Primary.
- PrimaryBase = reinterpret_cast<uptr>(
- map(nullptr, PrimarySize, nullptr, MAP_NOACCESS, &Data));
+ CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
+ "scudo:primary_reserve"));
+ PrimaryBase = ReservedMemory.getBase();
+ DCHECK_NE(PrimaryBase, 0U);
u32 Seed;
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
- const uptr PageSize = getPageSizeCached();
+
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
// The actual start of a region is offset by a random number of pages
// when PrimaryEnableRandomOffset is set.
- Region->RegionBeg = getRegionBaseByClassId(I) +
- (Config::PrimaryEnableRandomOffset
- ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
- : 0);
+ Region->RegionBeg =
+ (PrimaryBase + (I << Config::Primary::RegionSizeLog)) +
+ (Config::Primary::EnableRandomOffset
+ ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
+ : 0);
Region->RandState = getRandomU32(&Seed);
+ // Releasing small blocks is expensive, set a higher threshold to avoid
+ // frequent page releases.
+ if (isSmallBlock(getSizeByClassId(I)))
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
+ else
+ Region->TryReleaseThreshold = PageSize;
Region->ReleaseInfo.LastReleaseAtNs = Time;
+
+ Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
+ PrimaryBase + (I << Config::Primary::RegionSizeLog), RegionSize);
+ CHECK(Region->MemMapInfo.MemMap.isAllocated());
}
+ shuffle(RegionInfoArray, NumClasses, &Seed);
+
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() {
+ void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
*Region = {};
}
if (PrimaryBase)
- unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL,
- &Data);
+ ReservedMemory.release();
PrimaryBase = 0U;
}
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(I);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
+ Region->FreeListInfo.PoppedBlocks);
+ }
+
+ RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ uptr TotalBlocks = 0;
+ for (BatchGroup &BG : Region->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
+ const uptr BlocksInUse =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
- ScopedLock L(Region->Mutex);
- TransferBatch *B = popBatchImpl(C, ClassId);
- if (UNLIKELY(!B)) {
- if (UNLIKELY(!populateFreeList(C, ClassId, Region)))
- return nullptr;
- B = popBatchImpl(C, ClassId);
- // if `populateFreeList` succeeded, we are supposed to get free blocks.
- DCHECK_NE(B, nullptr);
+
+ {
+ ScopedLock L(Region->FLLock);
+ TransferBatch *B = popBatchImpl(C, ClassId, Region);
+ if (LIKELY(B))
+ return B;
}
- Region->Stats.PoppedBlocks += B->getCount();
+
+ bool PrintStats = false;
+ TransferBatch *B = nullptr;
+
+ while (true) {
+ // When two threads compete for `Region->MMLock`, we only want one of them
+ // does the populateFreeListAndPopBatch(). To avoid both of them doing
+ // that, always check the freelist before mapping new pages.
+ //
+ // TODO(chiahungduan): Use a condition variable so that we don't need to
+ // hold `Region->MMLock` here.
+ ScopedLock ML(Region->MMLock);
+ {
+ ScopedLock FL(Region->FLLock);
+ B = popBatchImpl(C, ClassId, Region);
+ if (LIKELY(B))
+ return B;
+ }
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted)
+ B = populateFreeListAndPopBatch(C, ClassId, Region);
+ PrintStats = !RegionIsExhausted && Region->Exhausted;
+ break;
+ }
+
+ // Note that `getStats()` requires locking each region so we can't call it
+ // while locking the Region->Mutex in the above.
+ if (UNLIKELY(PrintStats)) {
+ ScopedString Str;
+ getStats(&Str);
+ Str.append(
+ "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
+ RegionSize >> 20, getSizeByClassId(ClassId));
+ Str.output();
+
+ // Theoretically, BatchClass shouldn't be used up. Abort immediately when
+ // it happens.
+ if (ClassId == SizeClassMap::BatchClassId)
+ reportOutOfBatchClass();
+ }
+
return B;
}
@@ -120,17 +263,8 @@ public:
RegionInfo *Region = getRegionInfo(ClassId);
if (ClassId == SizeClassMap::BatchClassId) {
- ScopedLock L(Region->Mutex);
- // Constructing a batch group in the free list will use two blocks in
- // BatchClassId. If we are pushing BatchClassId blocks, we will use the
- // blocks in the array directly (can't delegate local cache which will
- // cause a recursive allocation). However, The number of free blocks may
- // be less than two. Therefore, populate the free list before inserting
- // the blocks.
- if (Size == 1 && UNLIKELY(!populateFreeList(C, ClassId, Region)))
- return;
- pushBlocksImpl(C, ClassId, Array, Size);
- Region->Stats.PushedBlocks += Size;
+ ScopedLock L(Region->FLLock);
+ pushBatchClassBlocks(Region, Array, Size);
return;
}
@@ -152,30 +286,32 @@ public:
Array[J] = Cur;
}
- ScopedLock L(Region->Mutex);
- pushBlocksImpl(C, ClassId, Array, Size, SameGroup);
-
- Region->Stats.PushedBlocks += Size;
- if (ClassId != SizeClassMap::BatchClassId)
- releaseToOSMaybe(Region, ClassId);
+ {
+ ScopedLock L(Region->FLLock);
+ pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
+ }
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
continue;
- getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
+ getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
+ getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
}
- getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
}
- void enable() {
- getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
- getRegionInfo(I)->Mutex.unlock();
+ getRegionInfo(I)->FLLock.unlock();
+ getRegionInfo(I)->MMLock.unlock();
}
}
@@ -183,10 +319,15 @@ public:
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
- const RegionInfo *Region = getRegionInfo(I);
+ RegionInfo *Region = getRegionInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator64. We may consider locking each region on demand
+ // only.
+ Region->FLLock.assertHeld();
+ Region->MMLock.assertHeld();
const uptr BlockSize = getSizeByClassId(I);
const uptr From = Region->RegionBeg;
- const uptr To = From + Region->AllocatedUser;
+ const uptr To = From + Region->MemMapInfo.AllocatedUser;
for (uptr Block = From; Block < To; Block += BlockSize)
Callback(Block);
}
@@ -199,25 +340,34 @@ public:
uptr PushedBlocks = 0;
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
- if (Region->MappedUser)
- TotalMapped += Region->MappedUser;
- PoppedBlocks += Region->Stats.PoppedBlocks;
- PushedBlocks += Region->Stats.PushedBlocks;
+ {
+ ScopedLock L(Region->MMLock);
+ TotalMapped += Region->MemMapInfo.MappedUser;
+ }
+ {
+ ScopedLock L(Region->FLLock);
+ PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Region->FreeListInfo.PushedBlocks;
+ }
}
Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
"allocations; remains %zu\n",
TotalMapped >> 20, 0U, PoppedBlocks,
PoppedBlocks - PushedBlocks);
- for (uptr I = 0; I < NumClasses; I++)
- getStats(Str, I, 0);
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L1(Region->MMLock);
+ ScopedLock L2(Region->FLLock);
+ getStats(Str, I, Region);
+ }
}
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
- Config::PrimaryMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(Min(static_cast<s32>(Value),
+ Config::Primary::MaxReleaseToOsIntervalMs),
+ Config::Primary::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
@@ -225,14 +375,27 @@ public:
return true;
}
- uptr releaseToOS() {
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ // Note that the tryLock() may fail spuriously, given that it should rarely
+ // happen and page releasing is fine to skip, we don't take certain
+ // approaches to ensure one page release is done.
+ if (Region->MMLock.tryLock()) {
+ uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
+ Region->MMLock.unlock();
+ return BytesReleased;
+ }
+ return 0;
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
RegionInfo *Region = getRegionInfo(I);
- ScopedLock L(Region->Mutex);
- TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
+ ScopedLock L(Region->MMLock);
+ TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
}
return TotalReleasedBytes;
}
@@ -244,9 +407,6 @@ public:
static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
uptr getCompactPtrBaseByClassId(uptr ClassId) {
- // If we are not compacting pointers, base everything off of 0.
- if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0)
- return 0;
return getRegionInfo(ClassId)->RegionBeg;
}
@@ -261,16 +421,24 @@ public:
decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
}
- static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
+ static BlockInfo findNearestBlock(const char *RegionInfoData,
+ uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
const RegionInfo *RegionInfoArray =
reinterpret_cast<const RegionInfo *>(RegionInfoData);
+
uptr ClassId;
uptr MinDistance = -1UL;
for (uptr I = 0; I != NumClasses; ++I) {
if (I == SizeClassMap::BatchClassId)
continue;
uptr Begin = RegionInfoArray[I].RegionBeg;
- uptr End = Begin + RegionInfoArray[I].AllocatedUser;
+ // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock.
+ // However, the RegionInfoData is passed with const qualifier and lock the
+ // mutex requires modifying RegionInfoData, which means we need to remove
+ // the const qualifier. This may lead to another undefined behavior (The
+ // first one is accessing `AllocatedUser` without locking. It's better to
+ // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
+ uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser;
if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
continue;
uptr RegionDistance;
@@ -292,7 +460,8 @@ public:
BlockInfo B = {};
if (MinDistance <= 8192) {
B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
- B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser;
+ B.RegionEnd =
+ B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser;
B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
B.BlockBegin =
B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
@@ -308,37 +477,49 @@ public:
AtomicOptions Options;
private:
- static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+ static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;
- static const uptr MapSizeIncrement = Config::PrimaryMapSizeIncrement;
+ static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
// Fill at most this number of batches from the newly map'd memory.
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
- struct RegionStats {
- uptr PoppedBlocks;
- uptr PushedBlocks;
- };
-
struct ReleaseToOsInfo {
- uptr PushedBlocksAtLastRelease;
+ uptr BytesInFreeListAtLastCheckpoint;
uptr RangesReleased;
uptr LastReleasedBytes;
u64 LastReleaseAtNs;
};
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroup> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
+ struct PagesInfo {
+ MemMapT MemMap = {};
+ // Bytes mapped for user memory.
+ uptr MappedUser = 0;
+ // Bytes allocated for user memory.
+ uptr AllocatedUser = 0;
+ };
+
struct UnpaddedRegionInfo {
- HybridMutex Mutex;
- SinglyLinkedList<BatchGroup> FreeList;
+ // Mutex for operations on freelist
+ HybridMutex FLLock;
+ // Mutex for memmap operations
+ HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
+ // `RegionBeg` is initialized before thread creation and won't be changed.
uptr RegionBeg = 0;
- RegionStats Stats = {};
- u32 RandState = 0;
- uptr MappedUser = 0; // Bytes mapped for user memory.
- uptr AllocatedUser = 0; // Bytes allocated for user memory.
- MapPlatformData Data = {};
- ReleaseToOsInfo ReleaseInfo = {};
- bool Exhausted = false;
+ u32 RandState GUARDED_BY(MMLock) = 0;
+ BlocksInfo FreeListInfo GUARDED_BY(FLLock);
+ PagesInfo MemMapInfo GUARDED_BY(MMLock);
+ // The minimum size of pushed blocks to trigger page release.
+ uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
+ bool Exhausted GUARDED_BY(MMLock) = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
@@ -346,18 +527,15 @@ private:
};
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
- uptr PrimaryBase = 0;
- MapPlatformData Data = {};
- atomic_s32 ReleaseToOsIntervalMs = {};
- alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
-
RegionInfo *getRegionInfo(uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
return &RegionInfoArray[ClassId];
}
- uptr getRegionBaseByClassId(uptr ClassId) const {
- return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
+ uptr getRegionBaseByClassId(uptr ClassId) {
+ return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
+ RegionSize) +
+ PrimaryBase;
}
static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
@@ -369,21 +547,144 @@ private:
}
static uptr compactPtrGroup(CompactPtrT CompactPtr) {
- return static_cast<uptr>(CompactPtr) >> (GroupSizeLog - CompactPtrScale);
+ const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
+ return static_cast<uptr>(CompactPtr) & ~Mask;
+ }
+ static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
+ DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
+ return Base + (CompactPtrGroupBase << CompactPtrScale);
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
}
- static uptr batchGroupBase(uptr Base, uptr GroupId) {
- return (GroupId << GroupSizeLog) + Base;
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
+ }
+
+ void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
+ REQUIRES(Region->FLLock) {
+ DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // The unit of each popBatch() request is entire TransferBatch. Return
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroup *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
+ getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Region->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatch *TB = reinterpret_cast<TransferBatch *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatch *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatch *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
}
// Push the blocks to their batch group. The layout will be like,
//
- // FreeList - > BG -> BG -> BG
- // | | |
- // v v v
- // TB TB TB
- // |
- // v
- // TB
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
//
// Each BlockGroup(BG) will associate with unique group id and the free blocks
// are managed by a list of TransferBatch(TB). To reduce the time of inserting
@@ -391,45 +692,22 @@ private:
// that we can get better performance of maintaining sorted property.
// Use `SameGroup=true` to indicate that all blocks in the array are from the
// same group then we will skip checking the group id of each block.
- //
- // Note that this aims to have a better management of dirty pages, i.e., the
- // RSS usage won't grow indefinitely. There's an exception that we may not put
- // a block to its associated group. While populating new blocks, we may have
- // blocks cross different groups. However, most cases will fall into same
- // group and they are supposed to be popped soon. In that case, it's not worth
- // sorting the array with the almost-sorted property. Therefore, we use
- // `SameGroup=true` instead.
- //
- // The region mutex needs to be held while calling this method.
- void pushBlocksImpl(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size,
- bool SameGroup = false) {
+ void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Region->FLLock) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
DCHECK_GT(Size, 0U);
- RegionInfo *Region = getRegionInfo(ClassId);
- auto CreateGroup = [&](uptr GroupId) {
- BatchGroup *BG = nullptr;
- TransferBatch *TB = nullptr;
- if (ClassId == SizeClassMap::BatchClassId) {
- DCHECK_GE(Size, 2U);
- BG = reinterpret_cast<BatchGroup *>(
- decompactPtr(ClassId, Array[Size - 1]));
- BG->Batches.clear();
-
- TB = reinterpret_cast<TransferBatch *>(
- decompactPtr(ClassId, Array[Size - 2]));
- TB->clear();
- } else {
- BG = C->createGroup();
- BG->Batches.clear();
-
- TB = C->createBatch(ClassId, nullptr);
- TB->clear();
- }
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroup *BG = C->createGroup();
+ BG->Batches.clear();
+ TransferBatch *TB = C->createBatch(ClassId, nullptr);
+ TB->clear();
- BG->GroupId = GroupId;
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
- BG->PushedBlocksAtLastCheckpoint = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
TransferBatch::getMaxCached(getSizeByClassId(ClassId));
@@ -462,13 +740,14 @@ private:
BG->PushedBlocks += Size;
};
- BatchGroup *Cur = Region->FreeList.front();
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroup *Cur = Region->FreeListInfo.BlockList.front();
if (ClassId == SizeClassMap::BatchClassId) {
if (Cur == nullptr) {
// Don't need to classify BatchClassId.
- Cur = CreateGroup(/*GroupId=*/0);
- Region->FreeList.push_front(Cur);
+ Cur = CreateGroup(/*CompactPtrGroupBase=*/0);
+ Region->FreeListInfo.BlockList.push_front(Cur);
}
InsertBlocks(Cur, Array, Size);
return;
@@ -478,22 +757,27 @@ private:
// will be pushed next. `Prev` is the element right before `Cur`.
BatchGroup *Prev = nullptr;
- while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) {
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
- if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) {
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroup(Array[0]));
if (Prev == nullptr)
- Region->FreeList.push_front(Cur);
+ Region->FreeListInfo.BlockList.push_front(Cur);
else
- Region->FreeList.insert(Prev, Cur);
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
}
// All the blocks are from the same group, just push without checking group
// id.
if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
+
InsertBlocks(Cur, Array, Size);
return;
}
@@ -503,18 +787,20 @@ private:
u32 Count = 1;
for (u32 I = 1; I < Size; ++I) {
if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
- DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId);
+ DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
InsertBlocks(Cur, Array + I - Count, Count);
- while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) {
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
Prev = Cur;
Cur = Cur->Next;
}
- if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) {
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
Cur = CreateGroup(compactPtrGroup(Array[I]));
DCHECK_NE(Prev, nullptr);
- Region->FreeList.insert(Prev, Cur);
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
}
Count = 1;
@@ -530,14 +816,28 @@ private:
// group id will be considered first.
//
// The region mutex needs to be held while calling this method.
- TransferBatch *popBatchImpl(CacheT *C, uptr ClassId) {
- RegionInfo *Region = getRegionInfo(ClassId);
- if (Region->FreeList.empty())
+ TransferBatch *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->FLLock) {
+ if (Region->FreeListInfo.BlockList.empty())
return nullptr;
SinglyLinkedList<TransferBatch> &Batches =
- Region->FreeList.front()->Batches;
- DCHECK(!Batches.empty());
+ Region->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatch *TB = reinterpret_cast<TransferBatch *>(BG);
+ TB->clear();
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ Region->FreeListInfo.PoppedBlocks += 1;
+ return TB;
+ }
TransferBatch *B = Batches.front();
Batches.pop_front();
@@ -545,8 +845,8 @@ private:
DCHECK_GT(B->getCount(), 0U);
if (Batches.empty()) {
- BatchGroup *BG = Region->FreeList.front();
- Region->FreeList.pop_front();
+ BatchGroup *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
// We don't keep BatchGroup with zero blocks to avoid empty-checking while
// allocating. Note that block used by constructing BatchGroup is recorded
@@ -557,51 +857,49 @@ private:
C->deallocate(SizeClassMap::BatchClassId, BG);
}
+ Region->FreeListInfo.PoppedBlocks += B->getCount();
+
return B;
}
- NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, RegionInfo *Region) {
+ // Refill the freelist and return one batch.
+ NOINLINE TransferBatch *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
+ RegionInfo *Region)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = TransferBatch::getMaxCached(Size);
const uptr RegionBeg = Region->RegionBeg;
- const uptr MappedUser = Region->MappedUser;
- const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+ const uptr MappedUser = Region->MemMapInfo.MappedUser;
+ const uptr TotalUserBytes =
+ Region->MemMapInfo.AllocatedUser + MaxCount * Size;
// Map more space for blocks, if necessary.
if (TotalUserBytes > MappedUser) {
// Do the mmap for the user memory.
const uptr MapSize =
- roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
- if (!Region->Exhausted) {
- Region->Exhausted = true;
- ScopedString Str;
- getStats(&Str);
- Str.append(
- "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
- RegionSize >> 20, Size);
- Str.output();
- }
- return false;
+ Region->Exhausted = true;
+ return nullptr;
}
- if (MappedUser == 0)
- Region->Data = Data;
- if (UNLIKELY(!map(
- reinterpret_cast<void *>(RegionBeg + MappedUser), MapSize,
- "scudo:primary",
+
+ if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
+ RegionBeg + MappedUser, MapSize, "scudo:primary",
MAP_ALLOWNOMEM | MAP_RESIZABLE |
- (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
- &Region->Data))) {
- return false;
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
+ : 0)))) {
+ return nullptr;
}
- Region->MappedUser += MapSize;
+ Region->MemMapInfo.MappedUser += MapSize;
C->getStats().add(StatMapped, MapSize);
}
- const u32 NumberOfBlocks = Min(
- MaxNumBatches * MaxCount,
- static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
+ const u32 NumberOfBlocks =
+ Min(MaxNumBatches * MaxCount,
+ static_cast<u32>((Region->MemMapInfo.MappedUser -
+ Region->MemMapInfo.AllocatedUser) /
+ Size));
DCHECK_GT(NumberOfBlocks, 0);
constexpr u32 ShuffleArraySize =
@@ -610,173 +908,634 @@ private:
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
- uptr P = RegionBeg + Region->AllocatedUser;
+ uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
- // No need to shuffle the batches size class.
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
- for (u32 I = 0; I < NumberOfBlocks;) {
- // `MaxCount` is u16 so the result will also fit in u16.
- const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I));
- // Note that the N blocks here may have different group ids. Given that
- // it only happens when it crosses the group size boundary. Instead of
- // sorting them, treat them as same group here to avoid sorting the
- // almost-sorted blocks.
- pushBlocksImpl(C, ClassId, &ShuffleArray[I], N, /*SameGroup=*/true);
- I += N;
+
+ ScopedLock L(Region->FLLock);
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroup(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
}
+ TransferBatch *B = popBatchImpl(C, ClassId, Region);
+ DCHECK_NE(B, nullptr);
+
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
+ // should set the values back to avoid incorrectly setting the stats.
+ Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
+
const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
- Region->AllocatedUser += AllocatedUser;
+ Region->MemMapInfo.AllocatedUser += AllocatedUser;
- return true;
+ return B;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- RegionInfo *Region = getRegionInfo(ClassId);
- if (Region->MappedUser == 0)
+ void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ if (Region->MemMapInfo.MappedUser == 0)
return;
- const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
- const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
- Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
- "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
- "released: %6zuK region: 0x%zx (0x%zx)\n",
- Region->Exhausted ? "F" : " ", ClassId,
- getSizeByClassId(ClassId), Region->MappedUser >> 10,
- Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
- TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
- Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
- getRegionBaseByClassId(ClassId));
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUse =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList =
+ Region->MemMapInfo.AllocatedUser - InUse * BlockSize;
+ uptr RegionPushedBytesDelta = 0;
+ if (BytesInFreeList >=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ Str->append(
+ "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu total: %6zu releases: %6zu last "
+ "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks, InUse, TotalChunks,
+ Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10,
+ RegionPushedBytesDelta >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
}
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
- bool Force = false) {
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ ScopedLock L(Region->FLLock);
+
const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr BytesInFreeList =
+ Region->MemMapInfo.AllocatedUser - (Region->FreeListInfo.PoppedBlocks -
+ Region->FreeListInfo.PushedBlocks) *
+ BlockSize;
+ if (UNLIKELY(BytesInFreeList == 0))
+ return false;
+
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+ const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
+
+ // ====================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ====================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
+ }
+
+ // ====================================================================== //
+ // 2. Determine which groups can release the pages. Use a heuristic to
+ // gather groups that are candidates for doing a release.
+ // ====================================================================== //
+ SinglyLinkedList<BatchGroup> GroupsToRelease;
+ if (ReleaseType == ReleaseToOS::ForceAll) {
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ } else {
+ GroupsToRelease = collectGroupsToRelease(
+ Region, BlockSize, AllocatedUserEnd, CompactPtrBase);
+ }
+ if (GroupsToRelease.empty())
+ return 0;
+
+ // Ideally, we should use a class like `ScopedUnlock`. However, this form of
+ // unlocking is not supported by the thread-safety analysis. See
+ // https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
+ // for more details.
+ // Put it as local class so that we can mark the ctor/dtor with proper
+ // annotations associated to the target lock. Note that accessing the
+ // function variable in local class only works in thread-safety annotations.
+ // TODO: Implement general `ScopedUnlock` when it's supported.
+ class FLLockScopedUnlock {
+ public:
+ FLLockScopedUnlock(RegionInfo *Region) RELEASE(Region->FLLock)
+ : R(Region) {
+ R->FLLock.assertHeld();
+ R->FLLock.unlock();
+ }
+ ~FLLockScopedUnlock() ACQUIRE(Region->FLLock) { R->FLLock.lock(); }
+
+ private:
+ RegionInfo *R;
+ };
+
+ // Note that we have extracted the `GroupsToRelease` from region freelist.
+ // It's safe to let pushBlocks()/popBatches() access the remaining region
+ // freelist. In the steps 3 and 4, we will temporarily release the FLLock
+ // and lock it again before step 5.
+
+ uptr ReleasedBytes = 0;
+ {
+ FLLockScopedUnlock UL(Region);
+ // ==================================================================== //
+ // 3. Mark the free blocks in `GroupsToRelease` in the
+ // `PageReleaseContext`. Then we can tell which pages are in-use by
+ // querying `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context = markFreeBlocks(
+ Region, BlockSize, AllocatedUserEnd, CompactPtrBase, GroupsToRelease);
+ if (UNLIKELY(!Context.hasBlockMarked())) {
+ ScopedLock L(Region->FLLock);
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 4. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
+ Region->RegionBeg,
+ Context.getReleaseOffset());
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+ ReleasedBytes = Recorder.getReleasedBytes();
+ }
+
+ // ====================================================================== //
+ // 5. Merge the `GroupsToRelease` back to the freelist.
+ // ====================================================================== //
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+
+ return ReleasedBytes;
+ }
+
+ bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
const uptr PageSize = getPageSizeCached();
- DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
- const uptr BytesInFreeList =
- Region->AllocatedUser -
- (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
- if (BytesInFreeList < PageSize)
- return 0; // No chance to release anything.
- const uptr BytesPushed = (Region->Stats.PushedBlocks -
- Region->ReleaseInfo.PushedBlocksAtLastRelease) *
- BlockSize;
- if (BytesPushed < PageSize)
- return 0; // Nothing new to release.
-
- bool CheckDensity = BlockSize < PageSize / 16U;
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ if (BytesInFreeList <=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ }
+
+ const uptr RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (RegionPushedBytesDelta < PageSize)
+ return false;
+
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
- if (CheckDensity) {
- if (!Force && BytesPushed < Region->AllocatedUser / 16U)
- return 0;
- }
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
+ if (RegionPushedBytesDelta < Region->TryReleaseThreshold)
+ return false;
- if (!Force) {
+ if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
- return 0;
- if (Region->ReleaseInfo.LastReleaseAtNs +
- static_cast<u64>(IntervalMs) * 1000000 >
- getMonotonicTime()) {
- return 0; // Memory was returned recently.
+ return false;
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Region->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return false;
+ }
}
- }
+ } // if (ReleaseType == ReleaseToOS::Normal)
+ return true;
+ }
+
+ SinglyLinkedList<BatchGroup>
+ collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase)
+ REQUIRES(Region->MMLock, Region->FLLock) {
const uptr GroupSize = (1U << GroupSizeLog);
- const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
- ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
- PageReleaseContext Context(BlockSize, Region->AllocatedUser,
- /*NumberOfRegions=*/1U);
+ const uptr PageSize = getPageSizeCached();
+ SinglyLinkedList<BatchGroup> GroupsToRelease;
+
+ // We are examining each group and will take the minimum distance to the
+ // release threshold as the next Region::TryReleaseThreshold(). Note that if
+ // the size of free blocks has reached the release threshold, the distance
+ // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
+ // the comment on `SmallerBlockReleasePageDelta` for more details.
+ uptr MinDistToThreshold = GroupSize;
+
+ for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ BG != nullptr;) {
+ // Group boundary is always GroupSize-aligned from CompactPtr base. The
+ // layout of memory groups is like,
+ //
+ // (CompactPtrBase)
+ // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ...
+ // | | |
+ // v v v
+ // +-----------------------+-----------------------+
+ // \ / \ /
+ // --- GroupSize --- --- GroupSize ---
+ //
+ // After decompacting the CompactPtrGroupBase, we expect the alignment
+ // property is held as well.
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
+ DCHECK_LE(Region->RegionBeg, BatchGroupBase);
+ DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
+ DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
+ // TransferBatches are pushed in front of BG.Batches. The first one may
+ // not have all caches used.
+ const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
+ BG->Batches.front()->getCount();
+ const uptr BytesInBG = NumBlocks * BlockSize;
- const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
- auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
- return decompactPtrInternal(CompactPtrBase, CompactPtr);
- };
- for (BatchGroup &BG : Region->FreeList) {
- const uptr PushedBytesDelta =
- BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
- if (PushedBytesDelta * BlockSize < PageSize)
+ if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
+ BG->BytesInBGAtLastCheckpoint = BytesInBG;
+ Prev = BG;
+ BG = BG->Next;
continue;
+ }
+
+ const uptr PushedBytesDelta = BG->BytesInBGAtLastCheckpoint - BytesInBG;
+
+ // Given the randomness property, we try to release the pages only if the
+ // bytes used by free blocks exceed certain proportion of group size. Note
+ // that this heuristic only applies when all the spaces in a BatchGroup
+ // are allocated.
+ if (isSmallBlock(BlockSize)) {
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr ReleaseThreshold =
+ (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
+ const bool HighDensity = BytesInBG >= ReleaseThreshold;
+ const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
+ // If all blocks in the group are released, we will do range marking
+ // which is fast. Otherwise, we will wait until we have accumulated
+ // a certain amount of free memory.
+ const bool ReachReleaseDelta =
+ MayHaveReleasedAll
+ ? true
+ : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
+
+ if (!HighDensity) {
+ DCHECK_LE(BytesInBG, ReleaseThreshold);
+ // The following is the usage of a memroy group,
+ //
+ // BytesInBG ReleaseThreshold
+ // / \ v
+ // +---+---------------------------+-----+
+ // | | | | |
+ // +---+---------------------------+-----+
+ // \ / ^
+ // PushedBytesDelta GroupEnd
+ MinDistToThreshold =
+ Min(MinDistToThreshold,
+ ReleaseThreshold - BytesInBG + PushedBytesDelta);
+ } else {
+ // If it reaches high density at this round, the next time we will try
+ // to release is based on SmallerBlockReleasePageDelta
+ MinDistToThreshold =
+ Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
+ }
- // Group boundary does not necessarily have the same alignment as Region.
- // It may sit across a Region boundary. Which means that we may have the
- // following two cases,
+ if (!HighDensity || !ReachReleaseDelta) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+ }
+
+ // If `BG` is the first BatchGroup in the list, we only need to advance
+ // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
+ // for `Prev`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // nil +--+ +--+
+ // |X | -> | | -> ...
+ // +--+ +--+
//
- // 1. Group boundary sits before RegionBeg.
+ // Otherwise, `Prev` will be used to extract the `Cur` from the
+ // `FreeListInfo.BlockList`.
//
- // (BatchGroupBeg)
- // batchGroupBase RegionBeg BatchGroupEnd
- // | | |
- // v v v
- // +------------+----------------+
- // \ /
- // ------ GroupSize ------
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | | -> |X | -> | | -> ...
+ // +--+ +--+ +--+
//
- // 2. Group boundary sits after RegionBeg.
+ // After FreeListInfo.BlockList::extract(),
//
- // (BatchGroupBeg)
- // RegionBeg batchGroupBase BatchGroupEnd
- // | | |
- // v v v
- // +-----------+-----------------------------+
- // \ /
- // ------ GroupSize ------
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | |-+ |X | +->| | -> ...
+ // +--+ | +--+ | +--+
+ // +--------+
//
- // Note that in the first case, the group range before RegionBeg is never
- // used. Therefore, while calculating the used group size, we should
- // exclude that part to get the correct size.
- const uptr BatchGroupBeg =
- Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
- DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
- const uptr BatchGroupEnd =
- batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
+ // Note that we need to advance before pushing this BatchGroup to
+ // GroupsToRelease because it's a destructive operation.
+
+ BatchGroup *Cur = BG;
+ BG = BG->Next;
+
+ // Ideally, we may want to update this only after successful release.
+ // However, for smaller blocks, each block marking is a costly operation.
+ // Therefore, we update it earlier.
+ // TODO: Consider updating this after releasing pages if `ReleaseRecorder`
+ // can tell the released bytes in each group.
+ Cur->BytesInBGAtLastCheckpoint = BytesInBG;
+
+ if (Prev != nullptr)
+ Region->FreeListInfo.BlockList.extract(Prev, Cur);
+ else
+ Region->FreeListInfo.BlockList.pop_front();
+ GroupsToRelease.push_back(Cur);
+ }
+
+ // Only small blocks have the adaptive `TryReleaseThreshold`.
+ if (isSmallBlock(BlockSize)) {
+ // If the MinDistToThreshold is not updated, that means each memory group
+ // may have only pushed less than a page size. In that case, just set it
+ // back to normal.
+ if (MinDistToThreshold == GroupSize)
+ MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
+ Region->TryReleaseThreshold = MinDistToThreshold;
+ }
+
+ return GroupsToRelease;
+ }
+
+ PageReleaseContext
+ markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase,
+ SinglyLinkedList<BatchGroup> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ const uptr GroupSize = (1U << GroupSizeLog);
+ auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
+ return decompactPtrInternal(CompactPtrBase, CompactPtr);
+ };
+
+ const uptr ReleaseBase = decompactGroupBase(
+ CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
+ const uptr LastGroupEnd =
+ Min(decompactGroupBase(CompactPtrBase,
+ GroupsToRelease.back()->CompactPtrGroupBase) +
+ GroupSize,
+ AllocatedUserEnd);
+ // The last block may straddle the group boundary. Rounding up to BlockSize
+ // to get the exact range.
+ const uptr ReleaseEnd =
+ roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
+ Region->RegionBeg;
+ const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
+ const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
+
+ PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+ ReleaseRangeSize, ReleaseOffset);
+ // We may not be able to do the page release in a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.ensurePageMapAllocated()))
+ return Context;
+
+ for (BatchGroup &BG : GroupsToRelease) {
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
- ? BatchGroupEnd - BatchGroupBeg
- : AllocatedUserEnd - BatchGroupBeg;
- if (AllocatedGroupSize == 0)
- continue;
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
+ const bool MayContainLastBlockInRegion =
+ BatchGroupUsedEnd == AllocatedUserEnd;
+ const bool BlockAlignedWithUsedEnd =
+ (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
+
+ uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ if (!BlockAlignedWithUsedEnd)
+ ++MaxContainedBlocks;
- // TransferBatches are pushed in front of BG.Batches. The first one may
- // not have all caches used.
const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
BG.Batches.front()->getCount();
- const uptr BytesInBG = NumBlocks * BlockSize;
- // Given the randomness property, we try to release the pages only if the
- // bytes used by free blocks exceed certain proportion of group size. Note
- // that this heuristic only applies when all the spaces in a BatchGroup
- // are allocated.
- if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
- (100U - 1U - BlockSize / 16U)) {
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches) {
+ if (&It != BG.Batches.front())
+ DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch);
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
+ }
+
+ Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
+ Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(
+ BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
+ }
+ }
+
+ DCHECK(Context.hasBlockMarked());
+
+ return Context;
+ }
+
+ void mergeGroupsToReleaseBack(RegionInfo *Region,
+ SinglyLinkedList<BatchGroup> &GroupsToRelease)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ // After merging two freelists, we may have redundant `BatchGroup`s that
+ // need to be recycled. The number of unused `BatchGroup`s is expected to be
+ // small. Pick a constant which is inferred from real programs.
+ constexpr uptr MaxUnusedSize = 8;
+ CompactPtrT Blocks[MaxUnusedSize];
+ u32 Idx = 0;
+ RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
+ // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
+ // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
+ // should just push it back to the freelist when we merge two `BatchGroup`s.
+ // This logic hasn't been implemented because we haven't supported releasing
+ // pages in `BatchClassRegion`.
+ DCHECK_NE(BatchClassRegion, Region);
+
+ // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
+ // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
+ // sorted.
+ for (BatchGroup *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ ;) {
+ if (BG == nullptr || GroupsToRelease.empty()) {
+ if (!GroupsToRelease.empty())
+ Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
+ break;
+ }
+
+ DCHECK(!BG->Batches.empty());
+
+ if (BG->CompactPtrGroupBase <
+ GroupsToRelease.front()->CompactPtrGroupBase) {
+ Prev = BG;
+ BG = BG->Next;
continue;
}
- BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
- // Note that we don't always visit blocks in each BatchGroup so that we
- // may miss the chance of releasing certain pages that cross BatchGroups.
- Context.markFreeBlocks(BG.Batches, DecompactPtr, Region->RegionBeg);
- }
+ BatchGroup *Cur = GroupsToRelease.front();
+ TransferBatch *UnusedTransferBatch = nullptr;
+ GroupsToRelease.pop_front();
+
+ if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
+ BG->PushedBlocks += Cur->PushedBlocks;
+ // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
+ // collecting the `GroupsToRelease`.
+ BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
+ const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
+
+ // Note that the first TransferBatches in both `Batches` may not be
+ // full and only the first TransferBatch can have non-full blocks. Thus
+ // we have to merge them before appending one to another.
+ if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
+ BG->Batches.append_back(&Cur->Batches);
+ } else {
+ TransferBatch *NonFullBatch = Cur->Batches.front();
+ Cur->Batches.pop_front();
+ const u16 NonFullBatchCount = NonFullBatch->getCount();
+ // The remaining Batches in `Cur` are full.
+ BG->Batches.append_back(&Cur->Batches);
+
+ if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
+ // Only 1 non-full TransferBatch, push it to the front.
+ BG->Batches.push_front(NonFullBatch);
+ } else {
+ const u16 NumBlocksToMove = static_cast<u16>(
+ Min(static_cast<u16>(MaxCachedPerBatch -
+ BG->Batches.front()->getCount()),
+ NonFullBatchCount));
+ BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
+ NumBlocksToMove);
+ if (NonFullBatch->isEmpty())
+ UnusedTransferBatch = NonFullBatch;
+ else
+ BG->Batches.push_front(NonFullBatch);
+ }
+ }
- if (!Context.hasBlockMarked())
- return 0;
+ const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U;
+ if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ Idx = 0;
+ }
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
+ if (UnusedTransferBatch) {
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId,
+ reinterpret_cast<uptr>(UnusedTransferBatch));
+ }
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
+ // larger than the first element in `GroupsToRelease`. We need to insert
+ // `GroupsToRelease::front()` (which is `Cur` below) before `BG`.
+ //
+ // 1. If `Prev` is nullptr, we simply push `Cur` to the front of
+ // FreeListInfo.BlockList.
+ // 2. Otherwise, use `insert()` which inserts an element next to `Prev`.
+ //
+ // Afterwards, we don't need to advance `BG` because the order between
+ // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
+ if (Prev == nullptr)
+ Region->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ DCHECK_EQ(Cur->Next, BG);
+ Prev = Cur;
+ }
- auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
- releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ if (Idx != 0) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ }
- if (Recorder.getReleasedRangesCount() > 0) {
- Region->ReleaseInfo.PushedBlocksAtLastRelease =
- Region->Stats.PushedBlocks;
- Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ if (SCUDO_DEBUG) {
+ BatchGroup *Prev = Region->FreeListInfo.BlockList.front();
+ for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
+ Prev = Cur, Cur = Cur->Next) {
+ CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
+ }
}
- Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
- return Recorder.getReleasedBytes();
}
+
+ // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
+ // deprecated.
+ uptr PrimaryBase = 0;
+ ReservedMemoryT ReservedMemory = {};
+ // The minimum size of pushed blocks that we will try to release the pages in
+ // that size class.
+ uptr SmallerBlockReleasePageDelta = 0;
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
index 2d231c3a28db..b5f8db0e87c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -12,6 +12,7 @@
#include "list.h"
#include "mutex.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -172,7 +173,7 @@ public:
typedef QuarantineCache<Callback> CacheT;
using ThisT = GlobalQuarantine<Callback, Node>;
- void init(uptr Size, uptr CacheSize) {
+ void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
@@ -191,22 +192,31 @@ public:
uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+ // This is supposed to be used in test only.
+ bool isEmpty() {
+ ScopedLock L(CacheMutex);
+ return Cache.getSize() == 0U;
+ }
+
void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
C->enqueue(Cb, Ptr, Size);
if (C->getSize() > getCacheSize())
drain(C, Cb);
}
- void NOINLINE drain(CacheT *C, Callback Cb) {
+ void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
+ bool needRecycle = false;
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
+ needRecycle = Cache.getSize() > getMaxSize();
}
- if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
+
+ if (needRecycle && RecycleMutex.tryLock())
recycle(atomic_load_relaxed(&MinSize), Cb);
}
- void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
@@ -215,20 +225,21 @@ public:
recycle(0, Cb);
}
- void getStats(ScopedString *Str) const {
+ void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
+ ScopedLock L(CacheMutex);
// It assumes that the world is stopped, just as the allocator's printStats.
Cache.getStats(Str);
Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
getMaxSize() >> 10, getCacheSize() >> 10);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
RecycleMutex.lock();
CacheMutex.lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
CacheMutex.unlock();
RecycleMutex.unlock();
}
@@ -236,13 +247,14 @@ public:
private:
// Read-only data.
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
- CacheT Cache;
+ CacheT Cache GUARDED_BY(CacheMutex);
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
atomic_uptr MinSize = {};
atomic_uptr MaxSize = {};
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
- void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
+ EXCLUDES(CacheMutex) {
CacheT Tmp;
Tmp.init();
{
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
index 3f40dbec6d7a..938bb41faf69 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
@@ -10,7 +10,7 @@
namespace scudo {
-HybridMutex RegionPageMap::Mutex = {};
-uptr RegionPageMap::StaticBuffer[RegionPageMap::StaticBufferCount];
+BufferPool<RegionPageMap::StaticBufferCount, RegionPageMap::StaticBufferSize>
+ RegionPageMap::Buffers;
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
index 6de3b15534d0..5bf963d0f26f 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
@@ -11,14 +11,46 @@
#include "common.h"
#include "list.h"
+#include "mem_map.h"
#include "mutex.h"
+#include "thread_annotations.h"
namespace scudo {
+template <typename MemMapT> class RegionReleaseRecorder {
+public:
+ RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
+ : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ uptr getBase() const { return Base; }
+
+ // Releases [From, To) range of pages back to OS. Note that `From` and `To`
+ // are offseted from `Base` + Offset.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ MemMapT *RegionMemMap = nullptr;
+ uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
+};
+
class ReleaseRecorder {
public:
- ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
- : Base(Base), Data(Data) {}
+ ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
+ : Base(Base), Offset(Offset), Data(Data) {}
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
@@ -29,7 +61,7 @@ public:
// Releases [From, To) range of pages back to OS.
void releasePageRangeToOS(uptr From, uptr To) {
const uptr Size = To - From;
- releasePagesToOS(Base, From, Size, Data);
+ releasePagesToOS(Base, From + Offset, Size, Data);
ReleasedRangesCount++;
ReleasedBytes += Size;
}
@@ -37,10 +69,110 @@ public:
private:
uptr ReleasedRangesCount = 0;
uptr ReleasedBytes = 0;
+ // The starting address to release. Note that we may want to combine (Base +
+ // Offset) as a new Base. However, the Base is retrieved from
+ // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
+ // Therefore, store them separately to make it work on all the platforms.
uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
MapPlatformData *Data = nullptr;
};
+// A buffer pool which holds a fixed number of static buffers for fast buffer
+// allocation. If the request size is greater than `StaticBufferSize`, it'll
+// delegate the allocation to map().
+template <uptr StaticBufferCount, uptr StaticBufferSize> class BufferPool {
+public:
+ // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
+ // extracting the least significant bit from the `Mask`.
+ static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
+ static_assert(isAligned(StaticBufferSize, SCUDO_CACHE_LINE_SIZE), "");
+
+ // Return a buffer which is at least `BufferSize`.
+ uptr *getBuffer(const uptr BufferSize) {
+ if (UNLIKELY(BufferSize > StaticBufferSize))
+ return getDynamicBuffer(BufferSize);
+
+ uptr index;
+ {
+ // TODO: In general, we expect this operation should be fast so the
+ // waiting thread won't be put into sleep. The HybridMutex does implement
+ // the busy-waiting but we may want to review the performance and see if
+ // we need an explict spin lock here.
+ ScopedLock L(Mutex);
+ index = getLeastSignificantSetBitIndex(Mask);
+ if (index < StaticBufferCount)
+ Mask ^= static_cast<uptr>(1) << index;
+ }
+
+ if (index >= StaticBufferCount)
+ return getDynamicBuffer(BufferSize);
+
+ const uptr Offset = index * StaticBufferSize;
+ memset(&RawBuffer[Offset], 0, StaticBufferSize);
+ return &RawBuffer[Offset];
+ }
+
+ void releaseBuffer(uptr *Buffer, const uptr BufferSize) {
+ const uptr index = getStaticBufferIndex(Buffer, BufferSize);
+ if (index < StaticBufferCount) {
+ ScopedLock L(Mutex);
+ DCHECK_EQ((Mask & (static_cast<uptr>(1) << index)), 0U);
+ Mask |= static_cast<uptr>(1) << index;
+ } else {
+ unmap(reinterpret_cast<void *>(Buffer),
+ roundUp(BufferSize, getPageSizeCached()));
+ }
+ }
+
+ bool isStaticBufferTestOnly(uptr *Buffer, uptr BufferSize) {
+ return getStaticBufferIndex(Buffer, BufferSize) < StaticBufferCount;
+ }
+
+private:
+ uptr getStaticBufferIndex(uptr *Buffer, uptr BufferSize) {
+ if (UNLIKELY(BufferSize > StaticBufferSize))
+ return StaticBufferCount;
+
+ const uptr BufferBase = reinterpret_cast<uptr>(Buffer);
+ const uptr RawBufferBase = reinterpret_cast<uptr>(RawBuffer);
+
+ if (BufferBase < RawBufferBase ||
+ BufferBase >= RawBufferBase + sizeof(RawBuffer)) {
+ return StaticBufferCount;
+ }
+
+ DCHECK_LE(BufferSize, StaticBufferSize);
+ DCHECK_LE(BufferBase + BufferSize, RawBufferBase + sizeof(RawBuffer));
+ DCHECK_EQ((BufferBase - RawBufferBase) % StaticBufferSize, 0U);
+
+ const uptr index =
+ (BufferBase - RawBufferBase) / (StaticBufferSize * sizeof(uptr));
+ DCHECK_LT(index, StaticBufferCount);
+ return index;
+ }
+
+ uptr *getDynamicBuffer(const uptr BufferSize) {
+ // When using a heap-based buffer, precommit the pages backing the
+ // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
+ // where page fault exceptions are skipped as the allocated memory
+ // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
+ // performance benefit on other platforms.
+ const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
+ return reinterpret_cast<uptr *>(
+ map(nullptr, roundUp(BufferSize, getPageSizeCached()), "scudo:counters",
+ MmapFlags, &MapData));
+ }
+
+ HybridMutex Mutex;
+ // '1' means that buffer index is not used. '0' means the buffer is in use.
+ uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
+ uptr RawBuffer[StaticBufferCount * StaticBufferSize] GUARDED_BY(Mutex);
+ [[no_unique_address]] MapPlatformData MapData = {};
+};
+
// A Region page map is used to record the usage of pages in the regions. It
// implements a packed array of Counters. Each counter occupies 2^N bits, enough
// to store counter's MaxValue. Ctor will try to use a static buffer first, and
@@ -68,14 +200,14 @@ public:
~RegionPageMap() {
if (!isAllocated())
return;
- if (Buffer == &StaticBuffer[0])
- Mutex.unlock();
- else
- unmap(reinterpret_cast<void *>(Buffer),
- roundUpTo(BufferSize, getPageSizeCached()));
+ Buffers.releaseBuffer(Buffer, BufferSize);
Buffer = nullptr;
}
+ // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
+ // specify the thread-safety attribute properly in current code structure.
+ // Besides, it's the only place we may want to check thread safety. Therefore,
+ // it's fine to bypass the thread-safety analysis now.
void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
DCHECK_GT(NumberOfRegion, 0);
DCHECK_GT(CountersPerRegion, 0);
@@ -88,7 +220,7 @@ public:
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
const uptr CounterSizeBits =
- roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
DCHECK_LE(CounterSizeBits, MaxCounterBits);
CounterSizeBitsLog = getLog2(CounterSizeBits);
CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
@@ -99,24 +231,10 @@ public:
BitOffsetMask = PackingRatio - 1;
SizePerRegion =
- roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
+ roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
PackingRatioLog;
BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
- if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
- Mutex.tryLock()) {
- Buffer = &StaticBuffer[0];
- memset(Buffer, 0, BufferSize);
- } else {
- // When using a heap-based buffer, precommit the pages backing the
- // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
- // where page fault exceptions are skipped as the allocated memory
- // is accessed.
- const uptr MmapFlags =
- MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
- Buffer = reinterpret_cast<uptr *>(
- map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
- "scudo:counters", MmapFlags, &MapData));
- }
+ Buffer = Buffers.getBuffer(BufferSize);
}
bool isAllocated() const { return !!Buffer; }
@@ -141,6 +259,17 @@ public:
<< BitOffset;
}
+ void incN(uptr Region, uptr I, uptr N) const {
+ DCHECK_GT(N, 0U);
+ DCHECK_LE(N, CounterMask);
+ DCHECK_LE(get(Region, I), CounterMask - N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ DCHECK_EQ(isAllCounted(Region, I), false);
+ Buffer[Region * SizePerRegion + Index] += N << BitOffset;
+ }
+
void incRange(uptr Region, uptr From, uptr To) const {
DCHECK_LE(From, To);
const uptr Top = Min(To + 1, NumCounters);
@@ -159,14 +288,29 @@ public:
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
}
+ void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ const uptr Top = Min(To + 1, NumCounters);
+ for (uptr I = From; I < Top; I++)
+ setAsAllCounted(Region, I);
+ }
+
+ bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
+ const uptr Count = get(Region, I);
+ if (Count == CounterMask)
+ return true;
+ if (Count == MaxCount) {
+ setAsAllCounted(Region, I);
+ return true;
+ }
+ return false;
+ }
bool isAllCounted(uptr Region, uptr I) const {
return get(Region, I) == CounterMask;
}
uptr getBufferSize() const { return BufferSize; }
- static const uptr StaticBufferCount = 2048U;
-
private:
uptr Regions;
uptr NumCounters;
@@ -178,10 +322,12 @@ private:
uptr SizePerRegion;
uptr BufferSize;
uptr *Buffer;
- [[no_unique_address]] MapPlatformData MapData = {};
- static HybridMutex Mutex;
- static uptr StaticBuffer[StaticBufferCount];
+ // We may consider making this configurable if there are cases which may
+ // benefit from this.
+ static const uptr StaticBufferCount = 2U;
+ static const uptr StaticBufferSize = 512U;
+ static BufferPool<StaticBufferCount, StaticBufferSize> Buffers;
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
@@ -225,10 +371,9 @@ private:
};
struct PageReleaseContext {
- PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
- BlockSize(BlockSize),
- RegionSize(RegionSize),
- NumberOfRegions(NumberOfRegions) {
+ PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
+ uptr ReleaseOffset = 0)
+ : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
PageSize = getPageSizeCached();
if (BlockSize <= PageSize) {
if (PageSize % BlockSize == 0) {
@@ -260,10 +405,16 @@ struct PageReleaseContext {
}
}
- PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
+ // TODO: For multiple regions, it's more complicated to support partial
+ // region marking (which includes the complexity of how to handle the last
+ // block in a region). We may consider this after markFreeBlocks() accepts
+ // only free blocks from the same region.
+ if (NumberOfRegions != 1)
+ DCHECK_EQ(ReleaseOffset, 0U);
+
+ PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
PageSizeLog = getLog2(PageSize);
- RoundedRegionSize = PagesCount << PageSizeLog;
- RoundedSize = NumberOfRegions * RoundedRegionSize;
+ ReleasePageOffset = ReleaseOffset >> PageSizeLog;
}
// PageMap is lazily allocated when markFreeBlocks() is invoked.
@@ -271,17 +422,147 @@ struct PageReleaseContext {
return PageMap.isAllocated();
}
- void ensurePageMapAllocated() {
+ bool ensurePageMapAllocated() {
if (PageMap.isAllocated())
- return;
+ return true;
PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
- DCHECK(PageMap.isAllocated());
+ // TODO: Log some message when we fail on PageMap allocation.
+ return PageMap.isAllocated();
}
- template<class TransferBatchT, typename DecompactPtrT>
- void markFreeBlocks(const IntrusiveList<TransferBatchT> &FreeList,
- DecompactPtrT DecompactPtr, uptr Base) {
- ensurePageMapAllocated();
+ // Mark all the blocks in the given range [From, to). Instead of visiting all
+ // the blocks, we will just mark the page as all counted. Note the `From` and
+ // `To` has to be page aligned but with one exception, if `To` is equal to the
+ // RegionSize, it's not necessary to be aligned with page size.
+ bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
+ const uptr RegionIndex, const uptr RegionSize) {
+ DCHECK_LT(From, To);
+ DCHECK_LE(To, Base + RegionSize);
+ DCHECK_EQ(From % PageSize, 0U);
+ DCHECK_LE(To - From, RegionSize);
+
+ if (!ensurePageMapAllocated())
+ return false;
+
+ uptr FromInRegion = From - Base;
+ uptr ToInRegion = To - Base;
+ uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
+
+ // The straddling block sits across entire range.
+ if (FirstBlockInRange >= ToInRegion)
+ return true;
+
+ // First block may not sit at the first pape in the range, move
+ // `FromInRegion` to the first block page.
+ FromInRegion = roundDown(FirstBlockInRange, PageSize);
+
+ // When The first block is not aligned to the range boundary, which means
+ // there is a block sitting acorss `From`, that looks like,
+ //
+ // From To
+ // V V
+ // +-----------------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // |- first page -||- second page -||- ...
+ //
+ // Therefore, we can't just mark the first page as all counted. Instead, we
+ // increment the number of blocks in the first page in the page map and
+ // then round up the `From` to the next page.
+ if (FirstBlockInRange != FromInRegion) {
+ DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
+ uptr NumBlocksInFirstPage =
+ (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
+ BlockSize;
+ PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
+ NumBlocksInFirstPage);
+ FromInRegion = roundUp(FromInRegion + 1, PageSize);
+ }
+
+ uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
+
+ // Note that LastBlockInRange may be smaller than `FromInRegion` at this
+ // point because it may contain only one block in the range.
+
+ // When the last block sits across `To`, we can't just mark the pages
+ // occupied by the last block as all counted. Instead, we increment the
+ // counters of those pages by 1. The exception is that if it's the last
+ // block in the region, it's fine to mark those pages as all counted.
+ if (LastBlockInRange + BlockSize != RegionSize) {
+ DCHECK_EQ(ToInRegion % PageSize, 0U);
+ // The case below is like,
+ //
+ // From To
+ // V V
+ // +----------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // ... -||- last page -||- next page -|
+ //
+ // The last block is not aligned to `To`, we need to increment the
+ // counter of `next page` by 1.
+ if (LastBlockInRange + BlockSize != ToInRegion) {
+ PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
+ getPageIndex(LastBlockInRange + BlockSize - 1));
+ }
+ } else {
+ ToInRegion = RegionSize;
+ }
+
+ // After handling the first page and the last block, it's safe to mark any
+ // page in between the range [From, To).
+ if (FromInRegion < ToInRegion) {
+ PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
+ getPageIndex(ToInRegion - 1));
+ }
+
+ return true;
+ }
+
+ template <class TransferBatchT, typename DecompactPtrT>
+ bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
+ DecompactPtrT DecompactPtr, const uptr Base,
+ const uptr RegionIndex, const uptr RegionSize,
+ bool MayContainLastBlockInRegion) {
+ if (!ensurePageMapAllocated())
+ return false;
+
+ if (MayContainLastBlockInRegion) {
+ const uptr LastBlockInRegion =
+ ((RegionSize / BlockSize) - 1U) * BlockSize;
+ // The last block in a region may not use the entire page, we mark the
+ // following "pretend" memory block(s) as free in advance.
+ //
+ // Region Boundary
+ // v
+ // -----+-----------------------+
+ // | Last Page | <- Rounded Region Boundary
+ // -----+-----------------------+
+ // |-----||- trailing blocks -|
+ // ^
+ // last block
+ const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
+ const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
+ // If the difference between `RoundedRegionSize` and
+ // `TrailingBlockBase` is larger than a page, that implies the reported
+ // `RegionSize` may not be accurate.
+ DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
+
+ // Only the last page touched by the last block needs to mark the trailing
+ // blocks. Note that if the last "pretend" block straddles the boundary,
+ // we still have to count it in so that the logic of counting the number
+ // of blocks on a page is consistent.
+ uptr NumTrailingBlocks =
+ (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
+ BlockSize - 1) /
+ BlockSize;
+ if (NumTrailingBlocks > 0) {
+ PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
+ NumTrailingBlocks);
+ }
+ }
// Iterate over free chunks and count how many free chunks affect each
// allocated page.
@@ -289,51 +570,37 @@ struct PageReleaseContext {
// Each chunk affects one page only.
for (const auto &It : FreeList) {
for (u16 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Base;
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- const uptr PInRegion = P - RegionIndex * RegionSize;
- PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ DCHECK_LT(PInRegion, RegionSize);
+ PageMap.inc(RegionIndex, getPageIndex(PInRegion));
}
}
} else {
// In all other cases chunks might affect more than one page.
DCHECK_GE(RegionSize, BlockSize);
- const uptr LastBlockInRegion =
- ((RegionSize / BlockSize) - 1U) * BlockSize;
for (const auto &It : FreeList) {
for (u16 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Base;
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- uptr PInRegion = P - RegionIndex * RegionSize;
- PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- // The last block in a region might straddle a page, so if it's
- // free, we mark the following "pretend" memory block(s) as free.
- if (PInRegion == LastBlockInRegion) {
- PInRegion += BlockSize;
- while (PInRegion < RoundedRegionSize) {
- PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- PInRegion += BlockSize;
- }
- }
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+ getPageIndex(PInRegion + BlockSize - 1));
}
}
}
+
+ return true;
}
+ uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
+ uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
+
uptr BlockSize;
- uptr RegionSize;
uptr NumberOfRegions;
+ // For partial region marking, some pages in front are not needed to be
+ // counted.
+ uptr ReleasePageOffset;
uptr PageSize;
uptr PagesCount;
uptr PageSizeLog;
- uptr RoundedRegionSize;
- uptr RoundedSize;
uptr FullPagesBlockCountMax;
bool SameBlockCountPerPage;
RegionPageMap PageMap;
@@ -350,6 +617,7 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
const uptr BlockSize = Context.BlockSize;
const uptr PagesCount = Context.PagesCount;
const uptr NumberOfRegions = Context.NumberOfRegions;
+ const uptr ReleasePageOffset = Context.ReleasePageOffset;
const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
RegionPageMap &PageMap = Context.PageMap;
@@ -365,9 +633,8 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
continue;
}
for (uptr J = 0; J < PagesCount; J++) {
- const bool CanRelease = PageMap.get(I, J) == FullPagesBlockCountMax;
- if (CanRelease)
- PageMap.setAsAllCounted(I, J);
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
RangeTracker.processNextPage(CanRelease);
}
}
@@ -388,6 +655,10 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
}
uptr PrevPageBoundary = 0;
uptr CurrentBoundary = 0;
+ if (ReleasePageOffset > 0) {
+ PrevPageBoundary = ReleasePageOffset * PageSize;
+ CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
+ }
for (uptr J = 0; J < PagesCount; J++) {
const uptr PageBoundary = PrevPageBoundary + PageSize;
uptr BlocksPerPage = Pn;
@@ -401,9 +672,8 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
}
}
PrevPageBoundary = PageBoundary;
- const bool CanRelease = PageMap.get(I, J) == BlocksPerPage;
- if (CanRelease)
- PageMap.setAsAllCounted(I, J);
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
RangeTracker.processNextPage(CanRelease);
}
}
@@ -411,20 +681,6 @@ releaseFreeMemoryToOS(PageReleaseContext &Context,
RangeTracker.finish();
}
-// An overload releaseFreeMemoryToOS which doesn't require the page usage
-// information after releasing.
-template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
- typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
- uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
- SkipRegionT SkipRegion) {
- PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
- Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
- releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
-}
-
} // namespace scudo
#endif // SCUDO_RELEASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
index a37faacbb932..81b3dce4e02c 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
@@ -21,7 +21,7 @@ public:
void append(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- Message.append(Format, Args);
+ Message.vappend(Format, Args);
va_end(Args);
}
NORETURN ~ScopedErrorReport() {
@@ -112,6 +112,11 @@ void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
UserSize, TotalSize, MaxSize);
}
+void NORETURN reportOutOfBatchClass() {
+ ScopedErrorReport Report;
+ Report.append("BatchClass region is used up, can't hold any free block\n");
+}
+
void NORETURN reportOutOfMemory(uptr RequestedSize) {
ScopedErrorReport Report;
Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
index d38451da0988..3a78ab64b13f 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
@@ -32,6 +32,7 @@ void NORETURN reportSanityCheckError(const char *Field);
void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
uptr MaxSize);
+void NORETURN reportOutOfBatchClass();
void NORETURN reportOutOfMemory(uptr RequestedSize);
void NORETURN reportSoftRSSLimit(uptr RssLimitMb);
void NORETURN reportHardRSSLimit(uptr RssLimitMb);
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
index 2d1775762588..105b154b5de2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
@@ -12,11 +12,13 @@
#include "chunk.h"
#include "common.h"
#include "list.h"
+#include "mem_map.h"
#include "memtag.h"
#include "mutex.h"
#include "options.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -36,9 +38,7 @@ struct alignas(Max<uptr>(archSupportsMemoryTagging()
LargeBlock::Header *Next;
uptr CommitBase;
uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
- [[no_unique_address]] MapPlatformData Data;
+ MemMapT MemMap;
};
static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
@@ -64,12 +64,15 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
} // namespace LargeBlock
-static void unmap(LargeBlock::Header *H) {
- MapPlatformData Data = H->Data;
- unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
+static inline void unmap(LargeBlock::Header *H) {
+ // Note that the `H->MapMap` is stored on the pages managed by itself. Take
+ // over the ownership before unmap() so that any operation along with unmap()
+ // won't touch inaccessible pages.
+ MemMapT MemMap = H->MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
-class MapAllocatorNoCache {
+template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
@@ -90,26 +93,29 @@ public:
// Not supported by the Secondary Cache, but not an error either.
return true;
}
+
+ void getStats(UNUSED ScopedString *Str) {
+ Str->append("Secondary Cache Disabled\n");
+ }
};
static const uptr MaxUnusedCachePages = 4U;
template <typename Config>
void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
- uptr AllocPos, uptr Flags, MapPlatformData *Data) {
+ uptr AllocPos, uptr Flags, MemMapT &MemMap) {
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
- map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
- "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
- map(reinterpret_cast<void *>(UntaggedPos),
- CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
- MAP_RESIZABLE | Flags, Data);
+ MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+ MAP_RESIZABLE | MAP_MEMTAG | Flags);
+ MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+ "scudo:secondary", MAP_RESIZABLE | Flags);
} else {
- map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+ const uptr RemapFlags =
MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
- Flags,
- Data);
+ Flags;
+ MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
}
}
@@ -128,36 +134,52 @@ public:
template <typename Config> class MapAllocatorCache {
public:
+ using CacheConfig = typename Config::Secondary::Cache;
+
+ void getStats(ScopedString *Str) {
+ ScopedLock L(Mutex);
+ Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
+ "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
+ EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
+ atomic_load_relaxed(&MaxEntrySize));
+ for (CachedBlock Entry : Entries) {
+ if (!Entry.CommitBase)
+ continue;
+ Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
+ "BlockSize: %zu\n",
+ Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
+ Entry.CommitSize);
+ }
+ }
+
// Ensure the default maximum specified fits the array.
- static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
- Config::SecondaryCacheEntriesArraySize,
+ static_assert(CacheConfig::DefaultMaxEntriesCount <=
+ CacheConfig::EntriesArraySize,
"");
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(EntriesCount, 0U);
setOption(Option::MaxCacheEntriesCount,
- static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
+ static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
setOption(Option::MaxCacheEntrySize,
- static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
+ static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void store(Options Options, LargeBlock::Header *H) {
+ void store(Options Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
if (!canCache(H->CommitSize))
return unmap(H);
bool EntryCached = false;
bool EmptyCache = false;
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
CachedBlock Entry;
Entry.CommitBase = H->CommitBase;
Entry.CommitSize = H->CommitSize;
- Entry.MapBase = H->MapBase;
- Entry.MapSize = H->MapSize;
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.Data = H->Data;
+ Entry.MemMap = H->MemMap;
Entry.Time = Time;
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
@@ -167,13 +189,13 @@ public:
// on top so we just do the two syscalls there.
Entry.Time = 0;
mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
- Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
+ Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
} else {
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
- &Entry.Data);
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+ MAP_NOACCESS);
}
} else if (Interval == 0) {
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
do {
@@ -185,10 +207,9 @@ public:
// just unmap it.
break;
}
- if (Config::SecondaryCacheQuarantineSize &&
- useMemoryTagging<Config>(Options)) {
+ if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
QuarantinePos =
- (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
+ (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
if (!Quarantine[QuarantinePos].CommitBase) {
Quarantine[QuarantinePos] = Entry;
return;
@@ -222,12 +243,11 @@ public:
else if (Interval >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
if (!EntryCached)
- unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
- &Entry.Data);
+ Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
bool retrieve(Options Options, uptr Size, uptr Alignment,
- LargeBlock::Header **H, bool *Zeroed) {
+ LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
bool Found = false;
@@ -243,45 +263,46 @@ public:
continue;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
- roundDownTo(CommitBase + CommitSize - Size, Alignment);
+ roundDown(CommitBase + CommitSize - Size, Alignment);
HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
continue;
+ }
Found = true;
Entry = Entries[I];
Entries[I].CommitBase = 0;
+ EntriesCount--;
break;
}
}
- if (Found) {
- *H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(HeaderPos));
- *Zeroed = Entry.Time == 0;
- if (useMemoryTagging<Config>(Options))
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
- uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
- if (useMemoryTagging<Config>(Options)) {
- if (*Zeroed)
- storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
- NewBlockBegin);
- else if (Entry.BlockBegin < NewBlockBegin)
- storeTags(Entry.BlockBegin, NewBlockBegin);
- else
- storeTags(untagPointer(NewBlockBegin),
- untagPointer(Entry.BlockBegin));
+ if (!Found)
+ return false;
+
+ *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
+ *Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options))
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
+ if (useMemoryTagging<Config>(Options)) {
+ if (*Zeroed) {
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ } else if (Entry.BlockBegin < NewBlockBegin) {
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ } else {
+ storeTags(untagPointer(NewBlockBegin),
+ untagPointer(Entry.BlockBegin));
}
- (*H)->CommitBase = Entry.CommitBase;
- (*H)->CommitSize = Entry.CommitSize;
- (*H)->MapBase = Entry.MapBase;
- (*H)->MapSize = Entry.MapSize;
- (*H)->Data = Entry.Data;
- EntriesCount--;
}
- return Found;
+ (*H)->CommitBase = Entry.CommitBase;
+ (*H)->CommitSize = Entry.CommitSize;
+ (*H)->MemMap = Entry.MemMap;
+ return true;
}
bool canCache(uptr Size) {
@@ -291,16 +312,15 @@ public:
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval =
- Max(Min(static_cast<s32>(Value),
- Config::SecondaryCacheMaxReleaseToOsIntervalMs),
- Config::SecondaryCacheMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
+ CacheConfig::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
if (O == Option::MaxCacheEntriesCount) {
const u32 MaxCount = static_cast<u32>(Value);
- if (MaxCount > Config::SecondaryCacheEntriesArraySize)
+ if (MaxCount > CacheConfig::EntriesArraySize)
return false;
atomic_store_relaxed(&MaxEntriesCount, MaxCount);
return true;
@@ -315,67 +335,62 @@ public:
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
- void disableMemoryTagging() {
+ void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
+ for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
if (Quarantine[I].CommitBase) {
- unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
- Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
Quarantine[I].CommitBase = 0;
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- for (u32 I = 0; I < MaxCount; I++)
- if (Entries[I].CommitBase)
- setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
- &Entries[I].Data);
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (Entries[I].CommitBase) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
+ }
QuarantinePos = -1U;
}
- void disable() { Mutex.lock(); }
+ void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
- void enable() { Mutex.unlock(); }
+ void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
void unmapTestOnly() { empty(); }
private:
void empty() {
- struct {
- void *MapBase;
- uptr MapSize;
- MapPlatformData Data;
- } MapInfo[Config::SecondaryCacheEntriesArraySize];
+ MemMapT MapInfo[CacheConfig::EntriesArraySize];
uptr N = 0;
{
ScopedLock L(Mutex);
- for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
+ for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
if (!Entries[I].CommitBase)
continue;
- MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
- MapInfo[N].MapSize = Entries[I].MapSize;
- MapInfo[N].Data = Entries[I].Data;
+ MapInfo[N] = Entries[I].MemMap;
Entries[I].CommitBase = 0;
N++;
}
EntriesCount = 0;
IsFullEvents = 0;
}
- for (uptr I = 0; I < N; I++)
- unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
- &MapInfo[I].Data);
+ for (uptr I = 0; I < N; I++) {
+ MemMapT &MemMap = MapInfo[I];
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
}
struct CachedBlock {
- uptr CommitBase;
- uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
- uptr BlockBegin;
- [[no_unique_address]] MapPlatformData Data;
- u64 Time;
+ uptr CommitBase = 0;
+ uptr CommitSize = 0;
+ uptr BlockBegin = 0;
+ MemMapT MemMap = {};
+ u64 Time = 0;
};
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
+ void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
if (!Entry.CommitBase || !Entry.Time)
return;
if (Entry.Time > Time) {
@@ -383,38 +398,39 @@ private:
OldestTime = Entry.Time;
return;
}
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
- void releaseOlderThan(u64 Time) {
+ void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
- for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
+ for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
+ for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
releaseIfOlderThan(Entries[I], Time);
}
HybridMutex Mutex;
- u32 EntriesCount = 0;
- u32 QuarantinePos = 0;
+ u32 EntriesCount GUARDED_BY(Mutex) = 0;
+ u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime = 0;
- u32 IsFullEvents = 0;
+ u64 OldestTime GUARDED_BY(Mutex) = 0;
+ u32 IsFullEvents GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
- CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
- NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
- Quarantine = {};
+ CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
+ NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
+ Quarantine GUARDED_BY(Mutex) = {};
};
template <typename Config> class MapAllocator {
public:
- void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
+ void init(GlobalStats *S,
+ s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(AllocatedBytes, 0U);
DCHECK_EQ(FreedBytes, 0U);
Cache.init(ReleaseToOsInterval);
@@ -438,19 +454,19 @@ public:
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
}
- void getStats(ScopedString *Str) const;
-
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
Cache.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
Cache.enable();
Mutex.unlock();
}
template <typename F> void iterateOverBlocks(F Callback) const {
+ Mutex.assertHeld();
+
for (const auto &H : InUseBlocks) {
uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
if (allocatorSupportsMemoryTagging<Config>())
@@ -469,17 +485,19 @@ public:
void unmapTestOnly() { Cache.unmapTestOnly(); }
-private:
- typename Config::SecondaryCache Cache;
+ void getStats(ScopedString *Str);
- HybridMutex Mutex;
- DoublyLinkedList<LargeBlock::Header> InUseBlocks;
- uptr AllocatedBytes = 0;
- uptr FreedBytes = 0;
- uptr LargestSize = 0;
- u32 NumberOfAllocs = 0;
- u32 NumberOfFrees = 0;
- LocalStats Stats;
+private:
+ typename Config::Secondary::template CacheT<Config> Cache;
+
+ mutable HybridMutex Mutex;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
+ uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
+ uptr FreedBytes GUARDED_BY(Mutex) = 0;
+ uptr LargestSize GUARDED_BY(Mutex) = 0;
+ u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
+ u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
+ LocalStats Stats GUARDED_BY(Mutex);
};
// As with the Primary, the size passed to this function includes any desired
@@ -502,9 +520,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
const uptr PageSize = getPageSizeCached();
uptr RoundedSize =
- roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
- Chunk::getHeaderSize(),
- PageSize);
+ roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() +
+ Chunk::getHeaderSize(),
+ PageSize);
if (Alignment > PageSize)
RoundedSize += Alignment - PageSize;
@@ -523,23 +541,26 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
BlockEnd - PtrInt);
- const uptr BlockSize = BlockEnd - HInt;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
- AllocatedBytes += BlockSize;
+ AllocatedBytes += H->CommitSize;
NumberOfAllocs++;
- Stats.add(StatAllocated, BlockSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatAllocated, H->CommitSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return Ptr;
}
}
- MapPlatformData Data = {};
+ ReservedMemoryT ReservedMemory;
const uptr MapSize = RoundedSize + 2 * PageSize;
- uptr MapBase = reinterpret_cast<uptr>(
- map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
+
+ // Take the entire ownership of reserved region.
+ MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity());
+ uptr MapBase = MemMap.getBase();
if (UNLIKELY(!MapBase))
return nullptr;
uptr CommitBase = MapBase + PageSize;
@@ -551,27 +572,27 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
- CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
+ CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
// are less constrained memory wise, and that saves us two syscalls.
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
- unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MemMap.unmap(MapBase, NewMapBase - MapBase);
MapBase = NewMapBase;
}
const uptr NewMapEnd =
- CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
+ CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
- unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
- mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
+ const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
+ mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
const uptr HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
@@ -579,11 +600,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
if (useMemoryTagging<Config>(Options))
storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
reinterpret_cast<uptr>(H + 1));
- H->MapBase = MapBase;
- H->MapSize = MapEnd - MapBase;
H->CommitBase = CommitBase;
H->CommitSize = CommitSize;
- H->Data = Data;
+ H->MemMap = MemMap;
if (BlockEndPtr)
*BlockEndPtr = CommitBase + CommitSize;
{
@@ -594,13 +613,14 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
LargestSize = CommitSize;
NumberOfAllocs++;
Stats.add(StatAllocated, CommitSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
}
template <typename Config>
-void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
+void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
+ EXCLUDES(Mutex) {
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
const uptr CommitSize = H->CommitSize;
{
@@ -609,18 +629,20 @@ void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
FreedBytes += CommitSize;
NumberOfFrees++;
Stats.sub(StatAllocated, CommitSize);
- Stats.sub(StatMapped, H->MapSize);
+ Stats.sub(StatMapped, H->MemMap.getCapacity());
}
Cache.store(Options, H);
}
template <typename Config>
-void MapAllocator<Config>::getStats(ScopedString *Str) const {
+void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
"(%zuK), remains %u (%zuK) max %zuM\n",
NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
(AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+ Cache.getStats(Str);
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
index 766562495ec7..2a6e298f9366 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -311,13 +311,11 @@ struct SvelteSizeClassConfig {
typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
-// Trusty is configured to only have one region containing blocks of size
-// 2^7 bytes.
struct TrustySizeClassConfig {
static const uptr NumBits = 1;
- static const uptr MinSizeLog = 7;
- static const uptr MidSizeLog = 7;
- static const uptr MaxSizeLog = 7;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 5;
+ static const uptr MaxSizeLog = 15;
static const u16 MaxNumCachedHint = 12;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
index be5bf2d3720a..658b75863ade 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
@@ -12,6 +12,7 @@
#include "atomic_helpers.h"
#include "list.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <string.h>
@@ -60,19 +61,19 @@ class GlobalStats : public LocalStats {
public:
void init() { LocalStats::init(); }
- void link(LocalStats *S) {
+ void link(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.push_back(S);
}
- void unlink(LocalStats *S) {
+ void unlink(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.remove(S);
for (uptr I = 0; I < StatCount; I++)
add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
}
- void get(uptr *S) const {
+ void get(uptr *S) const EXCLUDES(Mutex) {
ScopedLock L(Mutex);
for (uptr I = 0; I < StatCount; I++)
S[I] = LocalStats::get(static_cast<StatType>(I));
@@ -85,15 +86,15 @@ public:
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
}
- void lock() { Mutex.lock(); }
- void unlock() { Mutex.unlock(); }
+ void lock() ACQUIRE(Mutex) { Mutex.lock(); }
+ void unlock() RELEASE(Mutex) { Mutex.unlock(); }
- void disable() { lock(); }
- void enable() { unlock(); }
+ void disable() ACQUIRE(Mutex) { lock(); }
+ void enable() RELEASE(Mutex) { unlock(); }
private:
mutable HybridMutex Mutex;
- DoublyLinkedList<LocalStats> StatsList;
+ DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
index 13fdb9c6ca6c..d4e4e3becd0e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -195,6 +195,28 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
break;
}
+ // In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
+ // However, `-Wformat` doesn't know we have a different parser for those
+ // placeholders and it keeps complaining the type mismatch on 64-bit
+ // platform which uses `ld`/`lu` for `s64`/`u64`. Therefore, in order to
+ // silence the warning, we turn to use `PRId64`/`PRIu64` for printing
+ // `s64`/`u64` and handle the `ld`/`lu` here.
+ case 'l': {
+ ++Cur;
+ RAW_CHECK(*Cur == 'd' || *Cur == 'u');
+
+ if (*Cur == 'd') {
+ DVal = va_arg(Args, s64);
+ Res +=
+ appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ } else {
+ UVal = va_arg(Args, u64);
+ Res += appendUnsigned(&Buffer, BufferEnd, UVal, 10, Width, PadWithZero,
+ false);
+ }
+
+ break;
+ }
case '%': {
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
Res += appendChar(&Buffer, BufferEnd, '%');
@@ -218,7 +240,7 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
return Res;
}
-void ScopedString::append(const char *Format, va_list Args) {
+void ScopedString::vappend(const char *Format, va_list Args) {
va_list ArgsCopy;
va_copy(ArgsCopy, Args);
// formatString doesn't currently support a null buffer or zero buffer length,
@@ -239,7 +261,7 @@ void ScopedString::append(const char *Format, va_list Args) {
void ScopedString::append(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- append(Format, Args);
+ vappend(Format, Args);
va_end(Args);
}
@@ -247,7 +269,7 @@ void Printf(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
ScopedString Msg;
- Msg.append(Format, Args);
+ Msg.vappend(Format, Args);
outputRaw(Msg.data());
va_end(Args);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
index 41901194dfdc..a4cab5268ede 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -25,7 +25,7 @@ public:
String.clear();
String.push_back('\0');
}
- void append(const char *Format, va_list Args);
+ void vappend(const char *Format, va_list Args);
void append(const char *Format, ...) FORMAT(2, 3);
void output() const { outputRaw(String.data()); }
void reserve(size_t Size) { String.reserve(Size + 1); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
new file mode 100644
index 000000000000..68a1087c2034
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
@@ -0,0 +1,70 @@
+//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_THREAD_ANNOTATIONS_
+#define SCUDO_THREAD_ANNOTATIONS_
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely ignored when compiling with other compilers.
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+
+#endif // SCUDO_THREAD_ANNOTATIONS_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
new file mode 100644
index 000000000000..59ae21d10f0f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
@@ -0,0 +1,29 @@
+//===-- timing.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "timing.h"
+
+namespace scudo {
+
+Timer::~Timer() {
+ if (Manager)
+ Manager->report(*this);
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const char *Name)
+ : Timer(Manager.getOrCreateTimer(Name)) {
+ start();
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const Timer &Nest,
+ const char *Name)
+ : Timer(Manager.nest(Nest, Name)) {
+ start();
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
new file mode 100644
index 000000000000..84caa79e5c3a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
@@ -0,0 +1,221 @@
+//===-- timing.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TIMING_H_
+#define SCUDO_TIMING_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+namespace scudo {
+
+class TimingManager;
+
+// A simple timer for evaluating execution time of code snippets. It can be used
+// along with TimingManager or standalone.
+class Timer {
+public:
+ // The use of Timer without binding to a TimingManager is supposed to do the
+ // timer logging manually. Otherwise, TimingManager will do the logging stuff
+ // for you.
+ Timer() = default;
+ Timer(Timer &&Other)
+ : StartTime(0), AccTime(Other.AccTime), Manager(Other.Manager),
+ HandleId(Other.HandleId) {
+ Other.Manager = nullptr;
+ }
+
+ Timer(const Timer &) = delete;
+
+ ~Timer();
+
+ void start() {
+ CHECK_EQ(StartTime, 0U);
+ StartTime = getMonotonicTime();
+ }
+ void stop() {
+ AccTime += getMonotonicTime() - StartTime;
+ StartTime = 0;
+ }
+ u64 getAccumulatedTime() const { return AccTime; }
+
+ // Unset the bound TimingManager so that we don't report the data back. This
+ // is useful if we only want to track subset of certain scope events.
+ void ignore() {
+ StartTime = 0;
+ AccTime = 0;
+ Manager = nullptr;
+ }
+
+protected:
+ friend class TimingManager;
+ Timer(TimingManager &Manager, u32 HandleId)
+ : Manager(&Manager), HandleId(HandleId) {}
+
+ u64 StartTime = 0;
+ u64 AccTime = 0;
+ TimingManager *Manager = nullptr;
+ u32 HandleId;
+};
+
+// A RAII-style wrapper for easy scope execution measurement. Note that in order
+// not to take additional space for the message like `Name`. It only works with
+// TimingManager.
+class ScopedTimer : public Timer {
+public:
+ ScopedTimer(TimingManager &Manager, const char *Name);
+ ScopedTimer(TimingManager &Manager, const Timer &Nest, const char *Name);
+ ~ScopedTimer() { stop(); }
+};
+
+// In Scudo, the execution time of single run of code snippets may not be
+// useful, we are more interested in the average time from several runs.
+// TimingManager lets the registered timer report their data and reports the
+// average execution time for each timer periodically.
+class TimingManager {
+public:
+ TimingManager(u32 PrintingInterval = DefaultPrintingInterval)
+ : PrintingInterval(PrintingInterval) {}
+ ~TimingManager() {
+ if (NumAllocatedTimers != 0)
+ printAll();
+ }
+
+ Timer getOrCreateTimer(const char *Name) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ CHECK_LT(strlen(Name), MaxLenOfTimerName);
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (strncmp(Name, Timers[I].Name, MaxLenOfTimerName) == 0)
+ return Timer(*this, I);
+ }
+
+ CHECK_LT(NumAllocatedTimers, MaxNumberOfTimers);
+ strncpy(Timers[NumAllocatedTimers].Name, Name, MaxLenOfTimerName);
+ TimerRecords[NumAllocatedTimers].AccumulatedTime = 0;
+ TimerRecords[NumAllocatedTimers].Occurrence = 0;
+ return Timer(*this, NumAllocatedTimers++);
+ }
+
+ // Add a sub-Timer associated with another Timer. This is used when we want to
+ // detail the execution time in the scope of a Timer.
+ // For example,
+ // void Foo() {
+ // // T1 records the time spent in both first and second tasks.
+ // ScopedTimer T1(getTimingManager(), "Task1");
+ // {
+ // // T2 records the time spent in first task
+ // ScopedTimer T2(getTimingManager, T1, "Task2");
+ // // Do first task.
+ // }
+ // // Do second task.
+ // }
+ //
+ // The report will show proper indents to indicate the nested relation like,
+ // -- Average Operation Time -- -- Name (# of Calls) --
+ // 10.0(ns) Task1 (1)
+ // 5.0(ns) Task2 (1)
+ Timer nest(const Timer &T, const char *Name) EXCLUDES(Mutex) {
+ CHECK_EQ(T.Manager, this);
+ Timer Nesting = getOrCreateTimer(Name);
+
+ ScopedLock L(Mutex);
+ CHECK_NE(Nesting.HandleId, T.HandleId);
+ Timers[Nesting.HandleId].Nesting = T.HandleId;
+ return Nesting;
+ }
+
+ void report(const Timer &T) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ const u32 HandleId = T.HandleId;
+ CHECK_LT(HandleId, MaxNumberOfTimers);
+ TimerRecords[HandleId].AccumulatedTime += T.getAccumulatedTime();
+ ++TimerRecords[HandleId].Occurrence;
+ ++NumEventsReported;
+ if (NumEventsReported % PrintingInterval == 0)
+ printAllImpl();
+ }
+
+ void printAll() EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ printAllImpl();
+ }
+
+private:
+ void printAllImpl() REQUIRES(Mutex) {
+ static char NameHeader[] = "-- Name (# of Calls) --";
+ static char AvgHeader[] = "-- Average Operation Time --";
+ ScopedString Str;
+ Str.append("%-15s %-15s\n", AvgHeader, NameHeader);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (Timers[I].Nesting != MaxNumberOfTimers)
+ continue;
+ printImpl(Str, I);
+ }
+
+ Str.output();
+ }
+
+ void printImpl(ScopedString &Str, const u32 HandleId,
+ const u32 ExtraIndent = 0) REQUIRES(Mutex) {
+ const u64 AccumulatedTime = TimerRecords[HandleId].AccumulatedTime;
+ const u64 Occurrence = TimerRecords[HandleId].Occurrence;
+ const u64 Integral = Occurrence == 0 ? 0 : AccumulatedTime / Occurrence;
+ // Only keep single digit of fraction is enough and it enables easier layout
+ // maintenance.
+ const u64 Fraction =
+ Occurrence == 0 ? 0
+ : ((AccumulatedTime % Occurrence) * 10) / Occurrence;
+
+ Str.append("%14" PRId64 ".%" PRId64 "(ns) %-11s", Integral, Fraction, " ");
+
+ for (u32 I = 0; I < ExtraIndent; ++I)
+ Str.append("%s", " ");
+ Str.append("%s (%" PRId64 ")\n", Timers[HandleId].Name, Occurrence);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I)
+ if (Timers[I].Nesting == HandleId)
+ printImpl(Str, I, ExtraIndent + 1);
+ }
+
+ // Instead of maintaining pages for timer registration, a static buffer is
+ // sufficient for most use cases in Scudo.
+ static constexpr u32 MaxNumberOfTimers = 50;
+ static constexpr u32 MaxLenOfTimerName = 50;
+ static constexpr u32 DefaultPrintingInterval = 100;
+
+ struct Record {
+ u64 AccumulatedTime = 0;
+ u64 Occurrence = 0;
+ };
+
+ struct TimerInfo {
+ char Name[MaxLenOfTimerName + 1];
+ u32 Nesting = MaxNumberOfTimers;
+ };
+
+ HybridMutex Mutex;
+ // The frequency of proactively dumping the timer statistics. For example, the
+ // default setting is to dump the statistics every 100 reported events.
+ u32 PrintingInterval GUARDED_BY(Mutex);
+ u64 NumEventsReported GUARDED_BY(Mutex) = 0;
+ u32 NumAllocatedTimers GUARDED_BY(Mutex) = 0;
+ TimerInfo Timers[MaxNumberOfTimers] GUARDED_BY(Mutex);
+ Record TimerRecords[MaxNumberOfTimers] GUARDED_BY(Mutex);
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TIMING_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
index 81d6bc585f09..3191091e1b96 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -12,17 +12,17 @@
#include "common.h"
#include "mutex.h"
-#include "string_utils.h"
#include "trusty.h"
#include <errno.h> // for errno
+#include <lk/err_ptr.h> // for PTR_ERR and IS_ERR
#include <stdio.h> // for printf()
#include <stdlib.h> // for getenv()
#include <sys/auxv.h> // for getauxval()
#include <time.h> // for clock_gettime()
+#include <trusty_err.h> // for lk_err_to_errno()
#include <trusty_syscalls.h> // for _trusty_brk()
-
-#define SBRK_ALIGN 32
+#include <uapi/mm.h> // for MMAP flags
namespace scudo {
@@ -30,35 +30,38 @@ uptr getPageSize() { return getauxval(AT_PAGESZ); }
void NORETURN die() { abort(); }
-void *map(UNUSED void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
- // Calling _trusty_brk(0) returns the current program break.
- uptr ProgramBreak = reinterpret_cast<uptr>(_trusty_brk(0));
- uptr Start;
- uptr End;
-
- Start = roundUpTo(ProgramBreak, SBRK_ALIGN);
- // Don't actually extend the heap if MAP_NOACCESS flag is set since this is
- // the case where Scudo tries to reserve a memory region without mapping
- // physical pages.
+ uint32_t MmapFlags =
+ MMAP_FLAG_ANONYMOUS | MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE;
+
+ // If the MAP_NOACCESS flag is set, Scudo tries to reserve
+ // a memory region without mapping physical pages. This corresponds
+ // to MMAP_FLAG_NO_PHYSICAL in Trusty.
if (Flags & MAP_NOACCESS)
- return reinterpret_cast<void *>(Start);
-
- // Attempt to extend the heap by Size bytes using _trusty_brk.
- End = roundUpTo(Start + Size, SBRK_ALIGN);
- ProgramBreak =
- reinterpret_cast<uptr>(_trusty_brk(reinterpret_cast<void *>(End)));
- if (ProgramBreak < End) {
- errno = ENOMEM;
+ MmapFlags |= MMAP_FLAG_NO_PHYSICAL;
+ if (Addr)
+ MmapFlags |= MMAP_FLAG_FIXED_NOREPLACE;
+
+ if (Flags & MAP_MEMTAG)
+ MmapFlags |= MMAP_FLAG_PROT_MTE;
+
+ void *P = (void *)_trusty_mmap(Addr, Size, MmapFlags, 0);
+
+ if (IS_ERR(P)) {
+ errno = lk_err_to_errno(PTR_ERR(P));
dieOnMapUnmapError(Size);
return nullptr;
}
- return reinterpret_cast<void *>(Start); // Base of new reserved region.
+
+ return P;
}
-// Unmap is a no-op since Trusty uses sbrk instead of memory mapping.
void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
- UNUSED MapPlatformData *Data) {}
+ UNUSED MapPlatformData *Data) {
+ if (_trusty_munmap(Addr, Size) != 0)
+ dieOnMapUnmapError();
+}
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {}
@@ -76,6 +79,8 @@ void HybridMutex::lockSlow() {}
void HybridMutex::unlock() {}
+void HybridMutex::assertHeldImpl() {}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
@@ -83,6 +88,17 @@ u64 getMonotonicTime() {
static_cast<u64>(TS.tv_nsec);
}
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
u32 getNumberOfCPUs() { return 0; }
u32 getThreadID() { return 0; }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
index b400a3b56da9..f4fa545de5e0 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
@@ -12,6 +12,7 @@
#include "atomic_helpers.h"
#include "common.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
#include <pthread.h>
@@ -24,41 +25,61 @@
namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
- typename Allocator::CacheT Cache;
- typename Allocator::QuarantineCacheT QuarantineCache;
using ThisT = TSD<Allocator>;
u8 DestructorIterations = 0;
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(DestructorIterations, 0U);
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
Instance->initCache(&Cache);
DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
}
- void commitBack(Allocator *Instance) { Instance->commitBack(this); }
-
- inline bool tryLock() {
+ inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
if (Mutex.tryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
- atomic_store_relaxed(
- &Precedence,
- static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ atomic_store_relaxed(&Precedence,
+ static_cast<uptr>(getMonotonicTimeFast() >>
+ FIRST_32_SECOND_64(16, 0)));
return false;
}
- inline void lock() {
+ inline void lock() NO_THREAD_SAFETY_ANALYSIS {
atomic_store_relaxed(&Precedence, 0);
Mutex.lock();
}
- inline void unlock() { Mutex.unlock(); }
+ inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+ void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
+ Instance->commitBack(this);
+ }
+
+ // Ideally, we may want to assert that all the operations on
+ // Cache/QuarantineCache always have the `Mutex` acquired. However, the
+ // current architecture of accessing TSD is not easy to cooperate with the
+ // thread-safety analysis because of pointer aliasing. So now we just add the
+ // assertion on the getters of Cache/QuarantineCache.
+ //
+ // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
+ // TSD doesn't always require holding the lock. Add this assertion while the
+ // lock is always acquired.
+ typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
+ return Cache;
+ }
+ typename Allocator::QuarantineCacheT &getQuarantineCache()
+ ASSERT_CAPABILITY(Mutex) {
+ return QuarantineCache;
+ }
+
private:
HybridMutex Mutex;
atomic_uptr Precedence = {};
+
+ typename Allocator::CacheT Cache GUARDED_BY(Mutex);
+ typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index d49427b2005b..238367420238 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -11,6 +11,8 @@
#include "tsd.h"
+#include "string_utils.h"
+
namespace scudo {
struct ThreadState {
@@ -25,7 +27,7 @@ struct ThreadState {
template <class Allocator> void teardownThread(void *Ptr);
template <class Allocator> struct TSDRegistryExT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
@@ -33,14 +35,14 @@ template <class Allocator> struct TSDRegistryExT {
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
DCHECK(Instance);
if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
@@ -53,16 +55,32 @@ template <class Allocator> struct TSDRegistryExT {
FallbackTSD.commitBack(Instance);
FallbackTSD = {};
State = {};
+ ScopedLock L(Mutex);
Initialized = false;
}
+ void drainCaches(Allocator *Instance) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
+ // drain the `ThreadTSD` of current thread and `FallbackTSD`.
+ Instance->drainCache(&ThreadTSD);
+ FallbackTSD.lock();
+ Instance->drainCache(&FallbackTSD);
+ FallbackTSD.unlock();
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State.InitState != ThreadState::NotInitialized))
return;
initThread(Instance, MinimalInit);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
+ // embedding the logic into TSD or always locking the TSD. It will enable us
+ // to properly mark thread annotation here and adding proper runtime
+ // assertions in the member functions of TSD. For example, assert the lock is
+ // acquired before calling TSD::commitBack().
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
@@ -75,13 +93,13 @@ template <class Allocator> struct TSDRegistryExT {
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
FallbackTSD.lock();
atomic_store(&Disabled, 1U, memory_order_release);
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
atomic_store(&Disabled, 0U, memory_order_release);
FallbackTSD.unlock();
Mutex.unlock();
@@ -97,6 +115,13 @@ template <class Allocator> struct TSDRegistryExT {
bool getDisableMemInit() { return State.DisableMemInit; }
+ void getStats(ScopedString *Str) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
+ // printing only self `ThreadTSD` which may mislead the usage, we just skip
+ // it.
+ Str->append("Exclusive TSD don't support iterating each TSD\n");
+ }
+
private:
// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
@@ -113,7 +138,7 @@ private:
}
pthread_key_t PThreadKey = {};
- bool Initialized = false;
+ bool Initialized GUARDED_BY(Mutex) = false;
atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
@@ -128,7 +153,8 @@ thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
thread_local ThreadState TSDRegistryExT<Allocator>::State;
-template <class Allocator> void teardownThread(void *Ptr) {
+template <class Allocator>
+void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
typedef TSDRegistryExT<Allocator> TSDRegistryT;
Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
index 1c2a880416b9..dcb0948ad78f 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -11,6 +11,8 @@
#include "tsd.h"
+#include "string_utils.h"
+
#if SCUDO_HAS_PLATFORM_TLS_SLOT
// This is a platform-provided header that needs to be on the include path when
// Scudo is compiled. It must declare a function with the prototype:
@@ -24,7 +26,7 @@ namespace scudo {
template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
struct TSDRegistrySharedT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
for (u32 I = 0; I < TSDsArraySize; I++)
@@ -35,22 +37,32 @@ struct TSDRegistrySharedT {
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
for (u32 I = 0; I < TSDsArraySize; I++) {
TSDs[I].commitBack(Instance);
TSDs[I] = {};
}
setCurrentTSD(nullptr);
+ ScopedLock L(Mutex);
Initialized = false;
}
+ void drainCaches(Allocator *Instance) {
+ ScopedLock L(MutexTSDs);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ Instance->drainCache(&TSDs[I]);
+ TSDs[I].unlock();
+ }
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
UNUSED bool MinimalInit) {
if (LIKELY(getCurrentTSD()))
@@ -58,7 +70,10 @@ struct TSDRegistrySharedT {
initThread(Instance);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TSDs is an array of locks and which is not supported for marking
+ // thread-safety capability.
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
TSD<Allocator> *TSD = getCurrentTSD();
DCHECK(TSD);
*UnlockRequired = true;
@@ -75,13 +90,13 @@ struct TSDRegistrySharedT {
return getTSDAndLockSlow(TSD);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
TSDs[I].unlock();
Mutex.unlock();
@@ -98,6 +113,19 @@ struct TSDRegistrySharedT {
bool getDisableMemInit() const { return *getTlsPtr() & 1; }
+ void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
+ ScopedLock L(MutexTSDs);
+
+ Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
+ TSDsArraySize);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ Str->append(" Shared TSD[%zu]:\n", I);
+ TSDs[I].getCache().getStats(Str);
+ TSDs[I].unlock();
+ }
+ }
+
private:
ALWAYS_INLINE uptr *getTlsPtr() const {
#if SCUDO_HAS_PLATFORM_TLS_SLOT
@@ -119,7 +147,7 @@ private:
return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
}
- bool setNumberOfTSDs(u32 N) {
+ bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
ScopedLock L(MutexTSDs);
if (N < NumberOfTSDs)
return false;
@@ -150,7 +178,7 @@ private:
*getTlsPtr() |= B;
}
- NOINLINE void initThread(Allocator *Instance) {
+ NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
initOnceMaybe(Instance);
// Initial context assignment is done in a plain round-robin fashion.
const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
@@ -158,7 +186,10 @@ private:
Instance->callPostInitCallback();
}
- NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ // TSDs is an array of locks which is not supported for marking thread-safety
+ // capability.
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
+ EXCLUDES(MutexTSDs) {
// Use the Precedence of the current TSD as our random seed. Since we are
// in the slow path, it means that tryLock failed, and as a result it's
// very likely that said Precedence is non-zero.
@@ -202,10 +233,10 @@ private:
}
atomic_u32 CurrentIndex = {};
- u32 NumberOfTSDs = 0;
- u32 NumberOfCoPrimes = 0;
- u32 CoPrimes[TSDsArraySize] = {};
- bool Initialized = false;
+ u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
+ u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
+ u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
+ bool Initialized GUARDED_BY(Mutex) = false;
HybridMutex Mutex;
HybridMutex MutexTSDs;
TSD<Allocator> TSDs[TSDsArraySize];
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
index d43205a7111d..9f2c200958fe 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
@@ -40,7 +40,7 @@ public:
void push_back(const T &Element) {
DCHECK_LE(Size, capacity());
if (Size == capacity()) {
- const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
reallocate(NewCapacity);
}
memcpy(&Data[Size++], &Element, sizeof(T));
@@ -82,7 +82,7 @@ private:
void reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
- NewCapacity = roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
T *NewData = reinterpret_cast<T *>(
map(nullptr, NewCapacity, "scudo:vector", 0, &MapData));
memcpy(NewData, Data, Size * sizeof(T));
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index bbe3617dd0d6..2c8e382dba0b 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -54,6 +54,8 @@ INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
return Info;
}
+// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
+#if !SCUDO_ANDROID
INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
struct __scudo_mallinfo2 Info = {};
scudo::StatCounters Stats;
@@ -70,6 +72,7 @@ INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
Info.fordblks = Info.fsmblks;
return Info;
}
+#endif
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
@@ -91,7 +94,7 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
alignment = 1U;
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
- alignment = scudo::roundUpToPowerOfTwo(alignment);
+ alignment = scudo::roundUpPowerOfTwo(alignment);
}
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
@@ -131,9 +134,9 @@ INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size ? scudo::roundUpTo(size, PageSize) : PageSize,
- scudo::Chunk::Origin::Memalign, PageSize));
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize));
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
@@ -188,7 +191,13 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
- SCUDO_ALLOCATOR.releaseToOS();
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
+ return 1;
+ } else if (param == M_PURGE_ALL) {
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
+ return 1;
+ } else if (param == M_LOG_STATS) {
+ SCUDO_ALLOCATOR.printStats();
return 1;
} else {
scudo::Option option;
@@ -238,7 +247,10 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
if (size < max_size)
sizes[size]++;
};
+
+ SCUDO_ALLOCATOR.disable();
SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
+ SCUDO_ALLOCATOR.enable();
fputs("<malloc version=\"scudo-1\">\n", stream);
for (scudo::uptr i = 0; i != max_size; ++i)
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 18c3bf2c0edf..1b9fe67d920c 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -32,21 +32,6 @@ static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
-// Svelte MallocDispatch definitions.
-#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
-#define SCUDO_ALLOCATOR SvelteAllocator
-
-extern "C" void SCUDO_PREFIX(malloc_postinit)();
-SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-static scudo::Allocator<scudo::AndroidSvelteConfig,
- SCUDO_PREFIX(malloc_postinit)>
- SCUDO_ALLOCATOR;
-
-#include "wrappers_c.inc"
-
-#undef SCUDO_ALLOCATOR
-#undef SCUDO_PREFIX
-
// TODO(kostyak): support both allocators.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
index 815d40023b6a..9cd48e82792e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -64,7 +64,7 @@ inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of PageSize.
inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
- return roundUpTo(Size, PageSize) < Size;
+ return roundUp(Size, PageSize) < Size;
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
index 19ae174f20a5..98abff54e2b2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
@@ -46,10 +46,6 @@ const char *GetReportHeaderFromTag(uptr tag) {
return tag_data ? tag_data->header : nullptr;
}
-void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
- FuncEntry(thr, (uptr)&registered_tags[tag]);
-}
-
uptr TagFromShadowStackFrame(uptr pc) {
uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
void *pc_ptr = (void *)pc;
@@ -60,15 +56,26 @@ uptr TagFromShadowStackFrame(uptr pc) {
#if !SANITIZER_GO
-void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) {
+// We need to track tags for individual memory accesses, but there is no space
+// in the shadow cells for them. Instead we push/pop them onto the thread
+// traces and ignore the extra tag frames when printing reports.
+static void PushTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)&registered_tags[tag]);
+}
+static void PopTag(ThreadState *thr) { FuncExit(thr); }
+
+static void ExternalAccess(void *addr, uptr caller_pc, uptr tsan_caller_pc,
+ void *tag, AccessType typ) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ bool in_ignored_lib;
+ if (caller_pc && libignore()->IsIgnored(caller_pc, &in_ignored_lib))
+ return;
+
ThreadState *thr = cur_thread();
if (caller_pc) FuncEntry(thr, caller_pc);
- InsertShadowStackFrameForTag(thr, (uptr)tag);
- bool in_ignored_lib;
- if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib))
- MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ);
- FuncExit(thr);
+ PushTag(thr, (uptr)tag);
+ MemoryAccess(thr, tsan_caller_pc, (uptr)addr, 1, typ);
+ PopTag(thr);
if (caller_pc) FuncExit(thr);
}
@@ -112,12 +119,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessWrite);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
index ee78f25cc65c..3fd58f46983f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
@@ -10,19 +10,21 @@
//
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_flags.h"
+#include "tsan_flags.h"
+
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_flags.h"
-#include "tsan_rtl.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
+#include "tsan_rtl.h"
#include "ubsan/ubsan_flags.h"
namespace __tsan {
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-extern "C" const char* __tsan_default_options();
+extern "C" const char *__tsan_default_options();
#else
SANITIZER_WEAK_DEFAULT_IMPL
const char *__tsan_default_options() {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
index 60fbc58f988a..a357a870fdf8 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
@@ -29,6 +29,11 @@ class ScopedInterceptor {
void EnableIgnoresImpl();
};
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr pc;
+};
+
LibIgnore *libignore();
#if !SANITIZER_GO
@@ -82,7 +87,7 @@ inline bool MustIgnoreInterceptor(ThreadState *thr) {
#if SANITIZER_FREEBSD
# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...) \
TSAN_INTERCEPTOR(ret, _pthread_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
#else
# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...)
#endif
@@ -90,17 +95,38 @@ inline bool MustIgnoreInterceptor(ThreadState *thr) {
#if SANITIZER_NETBSD
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func2));
+ ALIAS(WRAP(pthread_##func2));
#else
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
#endif
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (!cur_thread_init()->is_inited)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, pc}; \
+ ctx = (void *)&_ctx; \
+ (void)ctx;
+
#endif // TSAN_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
index 88d5f0a48119..2104fe7fd059 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
@@ -558,7 +558,7 @@ TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
}
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, SIZE_T sz)
TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 1ee47bcd1237..e4f9e2915ced 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -18,6 +18,7 @@
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
+#include "tsan_spinlock_defs_mac.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include <errno.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp
new file mode 100644
index 000000000000..c8b6b2ef1948
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp
@@ -0,0 +1,43 @@
+//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+
+using namespace __tsan;
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
+extern "C" {
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 97aa4b77311f..177e338bf282 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -35,6 +35,9 @@
using namespace __tsan;
+DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
+DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
+
#if SANITIZER_FREEBSD || SANITIZER_APPLE
#define stdout __stdoutp
#define stderr __stderrp
@@ -128,7 +131,9 @@ const int SIGSYS = 12;
const int SIGBUS = 7;
const int SIGSYS = 31;
#endif
+#if SANITIZER_HAS_SIGINFO
const int SI_TIMER = -2;
+#endif
void *const MAP_FAILED = (void*)-1;
#if SANITIZER_NETBSD
const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
@@ -156,9 +161,6 @@ const int SA_SIGINFO = 4;
const int SIG_SETMASK = 2;
#endif
-#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
- (!cur_thread_init()->is_inited)
-
namespace __tsan {
struct SignalDesc {
bool armed;
@@ -592,58 +594,27 @@ TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
#define sigsetjmp_symname sigsetjmp
#endif
-#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
-#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
-#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
-#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
-
-#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
-#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
-
-// Not called. Merely to satisfy TSAN_INTERCEPT().
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-// FIXME: any reason to have a separate declaration?
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor__setjmp(void *env);
-extern "C" int __interceptor__setjmp(void *env) {
- CHECK(0);
- return 0;
-}
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-#if !SANITIZER_NETBSD
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor___sigsetjmp(void *env);
-extern "C" int __interceptor___sigsetjmp(void *env) {
- CHECK(0);
- return 0;
-}
-#endif
-
-extern "C" int setjmp_symname(void *env);
-extern "C" int _setjmp(void *env);
-extern "C" int sigsetjmp_symname(void *env);
-#if !SANITIZER_NETBSD
-extern "C" int __sigsetjmp(void *env);
-#endif
DEFINE_REAL(int, setjmp_symname, void *env)
DEFINE_REAL(int, _setjmp, void *env)
DEFINE_REAL(int, sigsetjmp_symname, void *env)
#if !SANITIZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
#endif
+
+// The real interceptor for setjmp is special, and implemented in pure asm. We
+// just need to initialize the REAL functions so that they can be used in asm.
+static void InitializeSetjmpInterceptors() {
+ // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
+ // setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+}
#endif // SANITIZER_APPLE
#if SANITIZER_NETBSD
@@ -824,10 +795,11 @@ static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
return res;
}
-TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
- SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+template <class Munmap>
+static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
+ void *addr, SIZE_T sz) {
UnmapShadow(thr, (uptr)addr, sz);
- int res = REAL(munmap)(addr, sz);
+ int res = real_munmap(addr, sz);
return res;
}
@@ -2420,11 +2392,6 @@ static int OnExit(ThreadState *thr) {
return status;
}
-struct TsanInterceptorContext {
- ThreadState *thr;
- const uptr pc;
-};
-
#if !SANITIZER_APPLE
static void HandleRecvmsg(ThreadState *thr, uptr pc,
__sanitizer_msghdr *msg) {
@@ -2446,28 +2413,11 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
-#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
INTERCEPT_FUNCTION_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
(INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
- true)
-
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
- ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
- false)
-
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
- TsanInterceptorContext _ctx = {thr, pc}; \
- ctx = (void *)&_ctx; \
- (void)ctx;
-
#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
TsanInterceptorContext _ctx = {thr, pc}; \
@@ -2497,11 +2447,21 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
res; \
})
+// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
+// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
+// intercepted calls, which can cause deadlockes with ReportRace() which also
+// uses this code.
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
- libignore()->OnLibraryLoaded(filename)
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryLoaded(filename); \
+ })
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
- libignore()->OnLibraryUnloaded()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryUnloaded(); \
+ })
#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
@@ -2545,6 +2505,11 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
off); \
} while (false)
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ do { \
+ return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
+ } while (false)
+
#if !SANITIZER_APPLE
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
@@ -2578,6 +2543,8 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
{ return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
+
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
int sigaction_impl(int sig, const __sanitizer_sigaction *act,
@@ -2883,16 +2850,7 @@ void InitializeInterceptors() {
InitializeLibdispatchInterceptors();
#if !SANITIZER_APPLE
- // We can not use TSAN_INTERCEPT to get setjmp addr,
- // because it does &setjmp and setjmp is not present in some versions of libc.
- using __interception::InterceptFunction;
- InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
- InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
- InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
- 0);
-#if !SANITIZER_NETBSD
- InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
-#endif
+ InitializeSetjmpInterceptors();
#endif
TSAN_INTERCEPT(longjmp_symname);
@@ -3159,22 +3117,4 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
}
}
-void *__tsan_memcpy(void *dst, const void *src, uptr size) {
- void *ctx;
-#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
-#else
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-#endif
-}
-
-void *__tsan_memset(void *dst, int c, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
-}
-
-void *__tsan_memmove(void *dst, const void *src, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-}
-}
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 5b9d664e503f..d53c1e3935df 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -32,6 +32,9 @@ extern "C" {
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
+__tsan_default_options();
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
@@ -85,6 +88,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_on_thread_idle();
+
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_external_register_tag(const char *object_type);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
index ac844ae8a44a..e973be963e57 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
@@ -17,6 +17,7 @@
#include "sanitizer_common/sanitizer_errno.h"
#include "tsan_interceptors.h"
#include "tsan_stack_trace.h"
+#include "tsan_mman.h"
using namespace __tsan;
#define COMMON_MALLOC_ZONE_NAME "tsan"
@@ -29,16 +30,30 @@ using namespace __tsan;
user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (in_symbolizer()) return InternalAlloc(size); \
- SCOPED_INTERCEPTOR_RAW(malloc, size); \
- void *p = user_alloc(thr, pc, size)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ p = user_alloc(thr, pc, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_REALLOC(ptr, size) \
if (in_symbolizer()) return InternalRealloc(ptr, size); \
- SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
- void *p = user_realloc(thr, pc, ptr, size)
+ if (ptr) \
+ invoke_free_hook(ptr); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ p = user_realloc(thr, pc, ptr, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_CALLOC(count, size) \
if (in_symbolizer()) return InternalCalloc(count, size); \
- SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
- void *p = user_calloc(thr, pc, size, count)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ p = user_calloc(thr, pc, size, count); \
+ } \
+ invoke_malloc_hook(p, size * count)
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
if (in_symbolizer()) { \
void *p = InternalAlloc(size, nullptr, alignment); \
@@ -55,6 +70,7 @@ using namespace __tsan;
void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (in_symbolizer()) return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 0937e521193f..6f118e0979e2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "tsan_report.h"
@@ -24,6 +25,8 @@ namespace __tsan {
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {};
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
@@ -351,6 +354,20 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
+static const void *user_alloc_begin(const void *p) {
+ if (p == nullptr || !IsAppMem((uptr)p))
+ return nullptr;
+ void *beg = allocator()->GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+
+ MBlock *b = ctx->metamap.GetBlock((uptr)beg);
+ if (!b)
+ return nullptr; // Not a valid pointer.
+
+ return (const void *)beg;
+}
+
uptr user_alloc_usable_size(const void *p) {
if (p == 0 || !IsAppMem((uptr)p))
return 0;
@@ -362,6 +379,17 @@ uptr user_alloc_usable_size(const void *p) {
return b->siz;
}
+uptr user_alloc_usable_size_fast(const void *p) {
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ // Static objects may have malloc'd before tsan completes
+ // initialization, and may believe returned ptrs to be valid.
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
void invoke_malloc_hook(void *ptr, uptr size) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
@@ -429,10 +457,25 @@ int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return user_alloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return user_alloc_usable_size(p);
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = user_alloc_usable_size_fast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() {
+ allocator()->ForceReleaseToOS();
+}
+
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->proc()->alloc_cache);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 5a92187f1431..f0cdaf48eaa3 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -50,8 +50,8 @@ C/C++ on linux/x86_64 and freebsd/x86_64
0040 0000 0000 - 0100 0000 0000: -
0100 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 5500 0000 0000: -
+3000 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 5500 0000 0000: -
5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
5680 0000 0000 - 7d00 0000 0000: -
7b00 0000 0000 - 7c00 0000 0000: heap
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index 9094469ba4eb..384a443c16b0 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -66,7 +66,8 @@ extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
-#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \
+ !SANITIZER_GO
# define INIT_LONGJMP_XOR_KEY 1
#else
# define INIT_LONGJMP_XOR_KEY 0
@@ -314,7 +315,7 @@ void InitializePlatform() {
}
#endif
-#if SANITIZER_LINUX && defined(__aarch64__)
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64))
// Initialize the xor key used in {sig}{set,long}jump.
InitializeLongjmpXorKey();
#endif
@@ -387,8 +388,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
# else
return mangled_sp;
# endif
-#elif defined(__loongarch__)
- return mangled_sp;
+#elif defined(__loongarch_lp64)
+ return mangled_sp ^ longjmp_xor_key;
#elif defined(__powerpc64__)
// Reverse of:
// ld r4, -28696(r13)
@@ -452,7 +453,11 @@ static void InitializeLongjmpXorKey() {
// 2. Retrieve vanilla/mangled SP.
uptr sp;
+#ifdef __loongarch__
+ asm("move %0, $sp" : "=r" (sp));
+#else
asm("mov %0, sp" : "=r" (sp));
+#endif
uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
// 3. xor SPs to obtain key.
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index 9b03adc16b99..3ae666e1212f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -98,12 +98,6 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
UNREACHABLE("missing case");
}
-#if SANITIZER_APPLE
-static const char *const kInterposedFunctionPrefix = "wrap_";
-#else
-static const char *const kInterposedFunctionPrefix = "__interceptor_";
-#endif
-
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
@@ -115,7 +109,7 @@ void PrintStack(const ReportStack *ent) {
RenderFrame(&res, common_flags()->stack_trace_format, i,
frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ common_flags()->strip_path_prefix);
Printf("%s\n", res.data());
}
Printf("\n");
@@ -284,6 +278,7 @@ static bool FrameIsInternal(const SymbolizedStack *frame) {
const char *module = frame->info.module;
if (file != 0 &&
(internal_strstr(file, "tsan_interceptors_posix.cpp") ||
+ internal_strstr(file, "tsan_interceptors_memintrinsics.cpp") ||
internal_strstr(file, "sanitizer_common_interceptors.inc") ||
internal_strstr(file, "tsan_interface_")))
return true;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
index e0b4c71dfed9..c6162659b876 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -3,28 +3,6 @@
#include "sanitizer_common/sanitizer_asm.h"
-#if defined(__APPLE__)
-.align 2
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _setjmp$non_lazy_ptr
-_setjmp$non_lazy_ptr:
-.indirect_symbol _setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long __setjmp$non_lazy_ptr
-__setjmp$non_lazy_ptr:
-.indirect_symbol __setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _sigsetjmp$non_lazy_ptr
-_sigsetjmp$non_lazy_ptr:
-.indirect_symbol _sigsetjmp
-.long 0
-#endif
-
#if !defined(__APPLE__)
.section .text
#else
@@ -75,9 +53,8 @@ ASM_SYMBOL_INTERCEPTOR(setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
ldr x1, [x1]
#else
- adrp x1, _setjmp$non_lazy_ptr@page
- add x1, x1, _setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, _setjmp@GOTPAGE
+ ldr x1, [x1, _setjmp@GOTPAGEOFF]
#endif
br x1
@@ -126,9 +103,8 @@ ASM_SYMBOL_INTERCEPTOR(_setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
ldr x1, [x1]
#else
- adrp x1, __setjmp$non_lazy_ptr@page
- add x1, x1, __setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, __setjmp@GOTPAGE
+ ldr x1, [x1, __setjmp@GOTPAGEOFF]
#endif
br x1
@@ -179,9 +155,8 @@ ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
ldr x2, [x2]
#else
- adrp x2, _sigsetjmp$non_lazy_ptr@page
- add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
- ldr x2, [x2]
+ adrp x2, _sigsetjmp@GOTPAGE
+ ldr x2, [x2, _sigsetjmp@GOTPAGEOFF]
#endif
br x2
CFI_ENDPROC
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index c2cff60e2da2..0311df553fdd 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -10,20 +10,20 @@
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_fd.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_report.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
-#include "tsan_report.h"
#include "tsan_sync.h"
-#include "tsan_mman.h"
-#include "tsan_flags.h"
-#include "tsan_fd.h"
namespace __tsan {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h
new file mode 100644
index 000000000000..1a99a81c0302
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h
@@ -0,0 +1,45 @@
+//===-- tsan_spinlock_defs_mac.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific forward-declared function defintions that may be
+// deprecated in later versions of the OS.
+// These are needed for interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_APPLE
+
+#ifndef TSAN_SPINLOCK_DEFS_MAC_H
+#define TSAN_SPINLOCK_DEFS_MAC_H
+
+#include <stdint.h>
+
+extern "C" {
+
+/*
+Provides forward declarations related to OSSpinLocks on Darwin. These functions are
+deprecated on macOS version 10.12 and later,
+and are no longer included in the system headers.
+
+However, the symbols are still available on the system, so we provide these forward
+declarations to prevent compilation errors in tsan_interceptors_mac.cpp, which
+references these functions when defining TSAN interceptor functions.
+*/
+
+typedef int32_t OSSpinLock;
+
+void OSSpinLockLock(volatile OSSpinLock *__lock);
+void OSSpinLockUnlock(volatile OSSpinLock *__lock);
+bool OSSpinLockTry(volatile OSSpinLock *__lock);
+
+}
+
+#endif //TSAN_SPINLOCK_DEFS_MAC_H
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
index a1c1bf81bf67..9cdfa32a9343 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
@@ -10,15 +10,16 @@
//
//===----------------------------------------------------------------------===//
+#include "tsan_suppressions.h"
+
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
-#include "tsan_suppressions.h"
-#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_rtl.h"
#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
index 3673e66539d0..dd99613abbe3 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
@@ -214,7 +214,12 @@ static void RenderText(InternalScopedString *Buffer, const char *Message,
// printf, and stop using snprintf here.
char FloatBuffer[32];
#if SANITIZER_WINDOWS
- sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
+ // On MSVC platforms, long doubles are equal to regular doubles.
+ // In MinGW environments on x86, long doubles are 80 bit, but here,
+ // we're calling an MS CRT provided printf function which considers
+ // long doubles to be 64 bit. Just cast the float value to a regular
+ // double to avoid the potential ambiguity in MinGW mode.
+ sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%g", (double)A.Float);
#else
snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
index 410292a0d538..0f16507d5d88 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
@@ -915,4 +915,39 @@ void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data,
Die();
}
+static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ReportOptions Opts) {
+ SourceLocation CallLoc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::FunctionTypeMismatch;
+ if (ignoreReport(CallLoc, Opts, ET))
+ return true;
+
+ ScopedReport R(Opts, CallLoc, ET);
+
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+
+ Diag(CallLoc, DL_Error, ET,
+ "call to function %0 through pointer to incorrect function type %1")
+ << FName << Data->Type;
+ Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
+ return true;
+}
+
+void __ubsan::__ubsan_handle_function_type_mismatch(
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+}
+
+void __ubsan::__ubsan_handle_function_type_mismatch_abort(
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ if (handleFunctionTypeMismatch(Data, Function, Opts))
+ Die();
+}
+
#endif // CAN_SANITIZE_UB
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
index 219fb15de55f..3bd5046de3d7 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
@@ -231,6 +231,17 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type(
CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable,
ReportOptions Opts);
+struct FunctionTypeMismatchData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Val);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch_abort(FunctionTypeMismatchData *Data,
+ ValueHandle Val);
}
#endif // UBSAN_HANDLERS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
index 0317a3d1428c..206a0bb485a9 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
@@ -156,50 +156,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1")
<< SrcModule << DstModule;
}
-
-static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI, ReportOptions Opts) {
- if (checkTypeInfoEquality(reinterpret_cast<void *>(calleeRTTI),
- reinterpret_cast<void *>(fnRTTI)))
- return false;
-
- SourceLocation CallLoc = Data->Loc.acquire();
- ErrorType ET = ErrorType::FunctionTypeMismatch;
-
- if (ignoreReport(CallLoc, Opts, ET))
- return true;
-
- ScopedReport R(Opts, CallLoc, ET);
-
- SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
- const char *FName = FLoc.get()->info.function;
- if (!FName)
- FName = "(unknown)";
-
- Diag(CallLoc, DL_Error, ET,
- "call to function %0 through pointer to incorrect function type %1")
- << FName << Data->Type;
- Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
- return true;
-}
-
-void __ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(false);
- handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts);
-}
-
-void __ubsan_handle_function_type_mismatch_v1_abort(
- FunctionTypeMismatchData *Data, ValueHandle Function,
- ValueHandle calleeRTTI, ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(true);
- if (handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts))
- Die();
-}
} // namespace __ubsan
#endif // CAN_SANITIZE_UB
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
index fd534c2573f6..71695cbdc090 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
@@ -33,22 +33,6 @@ void __ubsan_handle_dynamic_type_cache_miss(
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
-
-struct FunctionTypeMismatchData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
}
#endif // UBSAN_HANDLERS_CXX_H
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
index 94337d85017b..cb27feb5d7e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
@@ -21,8 +21,8 @@ INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss)
INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort)
INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin)
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
index 2c91db8ca397..354f847fab71 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
@@ -34,7 +34,12 @@ void InitializeDeadlySignals() {}
#else
+namespace __ubsan {
+void InitializeDeadlySignals();
+} // namespace __ubsan
+
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define SIGNAL_INTERCEPTOR_ENTER() __ubsan::InitializeDeadlySignals()
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
// TODO(yln): Temporary workaround. Will be removed.
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
index 00105d30b4db..c1d77758946e 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
@@ -24,7 +24,6 @@ namespace __xray {
// The machine codes for some instructions used in runtime patching.
enum class PatchOpcodes : uint32_t {
PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
- PO_LdrW0_12 = 0x18000060, // LDR W0, #12
PO_LdrX16_12 = 0x58000070, // LDR X16, #12
PO_BlrX16 = 0xD63F0200, // BLR X16
PO_LdpX0X30SP_16 = 0xA8C17BE0, // LDP X0, X30, [SP], #16
@@ -45,7 +44,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
//
// xray_sled_n:
// STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
- // LDR W0, #12 ; W0 := function ID
+ // LDR W17, #12 ; W17 := function ID
// LDR X16,#12 ; X16 := address of the trampoline
// BLR X16
// ;DATA: 32 bits of function ID
@@ -64,8 +63,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address());
uint32_t *CurAddress = FirstAddress + 1;
if (Enable) {
- *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
- CurAddress++;
+ *CurAddress++ = 0x18000071; // ldr w17, #12
*CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
CurAddress++;
*CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
@@ -105,15 +103,37 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
}
+// AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL generates this code sequence:
+//
+// .Lxray_event_sled_N:
+// b 1f
+// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// bl __xray_CustomEvent or __xray_TypedEvent
+// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// 1f
+//
+// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
+//
+// Enable: b .+24 => nop
+// Disable: nop => b .+24
bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
- const XRaySledEntry &Sled)
- XRAY_NEVER_INSTRUMENT { // FIXME: Implement in aarch64?
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ uint32_t Inst = Enable ? 0xd503201f : 0x14000006;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
+ std::memory_order_release);
return false;
}
+// Enable: b +36 => nop
+// Disable: nop => b +36
bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- // FIXME: Implement in aarch64?
+ uint32_t Inst = Enable ? 0xd503201f : 0x14000009;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
+ std::memory_order_release);
return false;
}
@@ -121,7 +141,3 @@ bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
} // namespace __xray
-
-extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
- // FIXME: this will have to be implemented in the trampoline assembly file
-}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
index 799814f437f9..378a8c0f4a70 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "xray_fdr_logging.h"
#include <cassert>
+#include <cstddef>
#include <errno.h>
#include <limits>
#include <memory>
@@ -140,7 +141,7 @@ static ThreadLocalData &getThreadLocalData() {
}
static XRayFileHeader &fdrCommonHeaderInfo() {
- static std::aligned_storage<sizeof(XRayFileHeader)>::type HStorage;
+ alignas(XRayFileHeader) static std::byte HStorage[sizeof(XRayFileHeader)];
static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
static bool TSCSupported = true;
static uint64_t CycleFrequency = NanosecondsPerSecond;
@@ -204,7 +205,8 @@ XRayBuffer fdrIterator(const XRayBuffer B) {
// initialized the first time this function is called. We'll update one part
// of this information with some relevant data (in particular the number of
// buffers to expect).
- static std::aligned_storage<sizeof(XRayFileHeader)>::type HeaderStorage;
+ alignas(
+ XRayFileHeader) static std::byte HeaderStorage[sizeof(XRayFileHeader)];
static pthread_once_t HeaderOnce = PTHREAD_ONCE_INIT;
pthread_once(
&HeaderOnce, +[] {
@@ -580,9 +582,9 @@ void fdrLoggingHandleCustomEvent(void *Event,
TLD.Controller->customEvent(TSC, CPU, Event, ReducedEventSize);
}
-void fdrLoggingHandleTypedEvent(
- uint16_t EventType, const void *Event,
- std::size_t EventSize) noexcept XRAY_NEVER_INSTRUMENT {
+void fdrLoggingHandleTypedEvent(size_t EventType, const void *Event,
+ size_t EventSize) noexcept
+ XRAY_NEVER_INSTRUMENT {
auto TC = getTimestamp();
auto &TSC = TC.TSC;
auto &CPU = TC.CPU;
@@ -607,7 +609,8 @@ void fdrLoggingHandleTypedEvent(
return;
int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
- TLD.Controller->typedEvent(TSC, CPU, EventType, Event, ReducedEventSize);
+ TLD.Controller->typedEvent(TSC, CPU, static_cast<uint16_t>(EventType), Event,
+ ReducedEventSize);
}
XRayLogInitStatus fdrLoggingInit(size_t, size_t, void *Options,
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
index 73e67618c9d5..5839043fcb93 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
@@ -46,6 +46,8 @@ static const int16_t cSledLength = 12;
static const int16_t cSledLength = 32;
#elif defined(__arm__)
static const int16_t cSledLength = 28;
+#elif SANITIZER_LOONGARCH64
+static const int16_t cSledLength = 48;
#elif SANITIZER_MIPS32
static const int16_t cSledLength = 48;
#elif SANITIZER_MIPS64
@@ -183,7 +185,7 @@ findFunctionSleds(int32_t FuncId,
const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT {
int32_t CurFn = 0;
uint64_t LastFnAddr = 0;
- XRayFunctionSledIndex Index = {nullptr, nullptr};
+ XRayFunctionSledIndex Index = {nullptr, 0};
for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) {
const auto &Sled = InstrMap.Sleds[I];
@@ -196,12 +198,10 @@ findFunctionSleds(int32_t FuncId,
if (CurFn == FuncId) {
if (Index.Begin == nullptr)
Index.Begin = &Sled;
- Index.End = &Sled;
+ Index.Size = &Sled - Index.Begin + 1;
}
}
- Index.End += 1;
-
return Index;
}
@@ -235,13 +235,17 @@ XRayPatchingStatus patchFunction(int32_t FuncId,
}
// Now we patch ths sleds for this specific function.
- auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
- : findFunctionSleds(FuncId, InstrMap);
+ XRayFunctionSledIndex SledRange;
+ if (InstrMap.SledsIndex) {
+ SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
+ InstrMap.SledsIndex[FuncId - 1].Size};
+ } else {
+ SledRange = findFunctionSleds(FuncId, InstrMap);
+ }
auto *f = SledRange.Begin;
- auto *e = SledRange.End;
bool SucceedOnce = false;
- while (f != e)
- SucceedOnce |= patchSled(*f++, Enable, FuncId);
+ for (size_t i = 0; i != SledRange.Size; ++i)
+ SucceedOnce |= patchSled(f[i], Enable, FuncId);
atomic_store(&XRayPatching, false,
memory_order_release);
@@ -365,12 +369,17 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
// Here we compute the minimum sled and maximum sled associated with a
// particular function ID.
- auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
- : findFunctionSleds(FuncId, InstrMap);
+ XRayFunctionSledIndex SledRange;
+ if (InstrMap.SledsIndex) {
+ SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
+ InstrMap.SledsIndex[FuncId - 1].Size};
+ } else {
+ SledRange = findFunctionSleds(FuncId, InstrMap);
+ }
auto *f = SledRange.Begin;
- auto *e = SledRange.End;
+ auto *e = SledRange.Begin + SledRange.Size;
auto *MinSled = f;
- auto *MaxSled = (SledRange.End - 1);
+ auto *MaxSled = e - 1;
while (f != e) {
if (f->address() < MinSled->address())
MinSled = f;
@@ -426,8 +435,8 @@ int __xray_set_customevent_handler(void (*entry)(void *, size_t))
return 0;
}
-int __xray_set_typedevent_handler(void (*entry)(
- uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
+int __xray_set_typedevent_handler(void (*entry)(size_t, const void *,
+ size_t)) XRAY_NEVER_INSTRUMENT {
if (atomic_load(&XRayInitialized,
memory_order_acquire)) {
atomic_store(&__xray::XRayPatchedTypedEvent,
@@ -502,9 +511,9 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions)
return 0;
- const XRaySledEntry *Sled = InstrMap.SledsIndex
- ? InstrMap.SledsIndex[FuncId - 1].Begin
- : findFunctionSleds(FuncId, InstrMap).Begin;
+ const XRaySledEntry *Sled =
+ InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1].fromPCRelative()
+ : findFunctionSleds(FuncId, InstrMap).Begin;
return Sled->function()
// On PPC, function entries are always aligned to 16 bytes. The beginning of a
// sled might be a local entry, which is always +8 based on the global entry.
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
index 8c5973c58351..80c07c167f64 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
@@ -59,7 +59,13 @@ struct XRaySledEntry {
struct XRayFunctionSledIndex {
const XRaySledEntry *Begin;
- const XRaySledEntry *End;
+ size_t Size;
+ // For an entry in the xray_fn_idx section, the address is relative to the
+ // location of the Begin variable.
+ const XRaySledEntry *fromPCRelative() const {
+ return reinterpret_cast<const XRaySledEntry *>(uintptr_t(&Begin) +
+ uintptr_t(Begin));
+ }
};
}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp
new file mode 100644
index 000000000000..b839adba00d2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp
@@ -0,0 +1,160 @@
+//===-------- xray_loongarch64.cpp ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of loongarch-specific routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+enum RegNum : uint32_t {
+ RN_RA = 1,
+ RN_SP = 3,
+ RN_T0 = 12,
+ RN_T1 = 13,
+};
+
+// Encode instructions in the 2RIx format, where the primary formats here
+// are 2RI12-type and 2RI16-type.
+static inline uint32_t
+encodeInstruction2RIx(uint32_t Opcode, uint32_t Rd, uint32_t Rj,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return Opcode | (Imm << 10) | (Rj << 5) | Rd;
+}
+
+// Encode instructions in 1RI20 format, e.g. lu12i.w/lu32i.d.
+static inline uint32_t
+encodeInstruction1RI20(uint32_t Opcode, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return Opcode | (Imm << 5) | Rd;
+}
+
+static inline bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // .Lxray_sled_beginN:
+ // B .Lxray_sled_endN
+ // 11 NOPs (44 bytes)
+ // .Lxray_sled_endN:
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // addi.d sp, sp, -16 ; create the stack frame
+ // st.d ra, sp, 8 ; save the return address
+ // lu12i.w t0, %abs_hi20(__xray_FunctionEntry/Exit)
+ // ori t0, t0, %abs_lo12(__xray_FunctionEntry/Exit)
+ // lu32i.d t0, %abs64_lo20(__xray_FunctionEntry/Exit)
+ // lu52i.d t0, t0, %abs64_hi12(__xray_FunctionEntry/Exit)
+ // lu12i.w t1, %abs_hi20(function_id)
+ // ori t1, t1, %abs_lo12(function_id) ; pass the function id
+ // jirl ra, t0, 0 ; call the tracing hook
+ // ld.d ra, sp, 8 ; restore the return address
+ // addi.d sp, sp, 16 ; de-allocate the stack frame
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set the first instruction in the sled back to
+ // B #48
+
+ uint32_t *Address = reinterpret_cast<uint32_t *>(Sled.address());
+ if (Enable) {
+ uint32_t LoTracingHookAddr = reinterpret_cast<int64_t>(TracingHook) & 0xfff;
+ uint32_t HiTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 12) & 0xfffff;
+ uint32_t HigherTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 32) & 0xfffff;
+ uint32_t HighestTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 52) & 0xfff;
+ uint32_t LoFunctionID = FuncId & 0xfff;
+ uint32_t HiFunctionID = (FuncId >> 12) & 0xfffff;
+ Address[1] = encodeInstruction2RIx(0x29c00000, RegNum::RN_RA, RegNum::RN_SP,
+ 0x8); // st.d ra, sp, 8
+ Address[2] = encodeInstruction1RI20(
+ 0x14000000, RegNum::RN_T0,
+ HiTracingHookAddr); // lu12i.w t0, HiTracingHookAddr
+ Address[3] = encodeInstruction2RIx(
+ 0x03800000, RegNum::RN_T0, RegNum::RN_T0,
+ LoTracingHookAddr); // ori t0, t0, LoTracingHookAddr
+ Address[4] = encodeInstruction1RI20(
+ 0x16000000, RegNum::RN_T0,
+ HigherTracingHookAddr); // lu32i.d t0, HigherTracingHookAddr
+ Address[5] = encodeInstruction2RIx(
+ 0x03000000, RegNum::RN_T0, RegNum::RN_T0,
+ HighestTracingHookAddr); // lu52i.d t0, t0, HighestTracingHookAddr
+ Address[6] =
+ encodeInstruction1RI20(0x14000000, RegNum::RN_T1,
+ HiFunctionID); // lu12i.w t1, HiFunctionID
+ Address[7] =
+ encodeInstruction2RIx(0x03800000, RegNum::RN_T1, RegNum::RN_T1,
+ LoFunctionID); // ori t1, t1, LoFunctionID
+ Address[8] = encodeInstruction2RIx(0x4c000000, RegNum::RN_RA, RegNum::RN_T0,
+ 0); // jirl ra, t0, 0
+ Address[9] = encodeInstruction2RIx(0x28c00000, RegNum::RN_RA, RegNum::RN_SP,
+ 0x8); // ld.d ra, sp, 8
+ Address[10] = encodeInstruction2RIx(
+ 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0x10); // addi.d sp, sp, 16
+ uint32_t CreateStackSpace = encodeInstruction2RIx(
+ 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0xff0); // addi.d sp, sp, -16
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Address), CreateStackSpace,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Address),
+ uint32_t(0x50003000), std::memory_order_release); // b #48
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // TODO: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in loongarch?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in loongarch?
+ return false;
+}
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // TODO: This will have to be implemented in the trampoline assembly file.
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
index 81c33fae88c1..259ec65a76a1 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
@@ -253,8 +253,8 @@ XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT {
reinterpret_cast<const char *>(B.Data) + B.Size);
B = profileCollectorService::nextBuffer(B);
}
+ LogWriter::Close(LW);
}
- LogWriter::Close(LW);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
index 3bf52cef60fe..6f10dda3602b 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
@@ -1,163 +1,169 @@
#include "../builtins/assembly.h"
+#include "../sanitizer_common/sanitizer_asm.h"
- .text
- /* The variable containing the handler function pointer */
- .global _ZN6__xray19XRayPatchedFunctionE
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionEntry
- .hidden __xray_FunctionEntry
- .type __xray_FunctionEntry, %function
- /* In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
- FuncId passed in W0 register. */
-__xray_FunctionEntry:
+.macro SAVE_REGISTERS
+ stp x1, x2, [sp, #-16]!
+ stp x3, x4, [sp, #-16]!
+ stp x5, x6, [sp, #-16]!
+ stp x7, x30, [sp, #-16]!
+ stp q0, q1, [sp, #-32]!
+ stp q2, q3, [sp, #-32]!
+ stp q4, q5, [sp, #-32]!
+ stp q6, q7, [sp, #-32]!
+ // x8 is the indirect result register and needs to be preserved for the body of the function to use.
+ stp x8, x0, [sp, #-16]!
+.endm
+
+.macro RESTORE_REGISTERS
+ ldp x8, x0, [sp], #16
+ ldp q6, q7, [sp], #32
+ ldp q4, q5, [sp], #32
+ ldp q2, q3, [sp], #32
+ ldp q0, q1, [sp], #32
+ ldp x7, x30, [sp], #16
+ ldp x5, x6, [sp], #16
+ ldp x3, x4, [sp], #16
+ ldp x1, x2, [sp], #16
+.endm
+
+.text
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionEntry)
+ASM_HIDDEN(__xray_FunctionEntry)
+ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+ASM_SYMBOL(__xray_FunctionEntry):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* X8 is the indirect result register and needs to be preserved for the body
- of the function to use */
- STP X8, X0, [SP, #-16]!
+ add x30, x30, #12
+ // Push the registers which may be modified by the handler function.
+ SAVE_REGISTERS
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionEntry_restore
- /* Function ID is already in W0 (the first parameter).
- X1=0 means that we are tracing an entry event */
- MOV X1, #0
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionEntry_restore:
- /* Pop the saved registers */
- LDP X8, X0, [SP], #16
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ // Load the handler function pointer.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::ENTRY = 0.
+ mov w0, w17
+ mov x1, #0
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionEntry)
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionExit
- .hidden __xray_FunctionExit
- .type __xray_FunctionExit, %function
- /* In C++ it is void extern "C" __xray_FunctionExit(uint32_t FuncId) with
- FuncId passed in W0 register. */
-__xray_FunctionExit:
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionExit)
+ASM_HIDDEN(__xray_FunctionExit)
+ASM_TYPE_FUNCTION(__xray_FunctionExit)
+ASM_SYMBOL(__xray_FunctionExit):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* X8 is the indirect result register and needs to be preserved for the body
- of the function to use */
- STP X8, X0, [SP, #-16]!
+ add x30, x30, #12
+ SAVE_REGISTERS
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionExit_restore
- /* Function ID is already in W0 (the first parameter).
- X1=1 means that we are tracing an exit event */
- MOV X1, #1
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionExit_restore:
- LDP X8, X0, [SP], #16
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ // Load the handler function pointer into x2.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::EXIT = 1.
+ mov w0, w17
+ mov x1, #1
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionExit)
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionTailExit
- .hidden __xray_FunctionTailExit
- .type __xray_FunctionTailExit, %function
- /* In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
- with FuncId passed in W0 register. */
-__xray_FunctionTailExit:
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionTailExit)
+ASM_HIDDEN(__xray_FunctionTailExit)
+ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
+ASM_SYMBOL(__xray_FunctionTailExit):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- /* Push the parameters of the tail called function */
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionTailExit_restore
- /* Function ID is already in W0 (the first parameter).
- X1=2 means that we are tracing a tail exit event, but before the
- logging part of XRay is ready, we pretend that here a normal function
- exit happens, so we give the handler code 1 */
- MOV X1, #1
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionTailExit_restore:
- /* Pop the parameters of the tail called function */
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- /* Pop the registers which may be modified by the handler function */
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ add x30, x30, #12
+ // Save the registers which may be modified by the handler function.
+ SAVE_REGISTERS
+ // Load the handler function pointer into x2.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::TAIL = 2.
+ mov w0, w17
+ mov x1, #2
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionTailExit)
+
+.p2align 2
+.global ASM_SYMBOL(__xray_ArgLoggerEntry)
+ASM_HIDDEN(__xray_ArgLoggerEntry)
+ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
+ASM_SYMBOL(__xray_ArgLoggerEntry):
+ add x30, x30, #12
+ // Push the registers which may be modified by the handler function.
+ SAVE_REGISTERS
+
+ adrp x8, ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)]
+ cbnz x8, 2f
+
+ // Load the handler function pointer.
+ adrp x8, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x8, 1f
+
+2:
+ mov x2, x0
+ mov x1, #3 // XRayEntryType::LOG_ARGS_ENTRY
+ mov w0, w17
+ blr x8
+
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_ArgLoggerEntry)
+
+.global ASM_SYMBOL(__xray_CustomEvent)
+ASM_HIDDEN(__xray_CustomEvent)
+ASM_TYPE_FUNCTION(__xray_CustomEvent)
+ASM_SYMBOL(__xray_CustomEvent):
+ SAVE_REGISTERS
+ adrp x8, ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)]
+ cbz x8, 1f
+ blr x8
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_CustomEvent)
+
+.global ASM_SYMBOL(__xray_TypedEvent)
+ASM_HIDDEN(__xray_TypedEvent)
+ASM_TYPE_FUNCTION(__xray_TypedEvent)
+ASM_SYMBOL(__xray_TypedEvent):
+ SAVE_REGISTERS
+ adrp x8, ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)]
+ cbz x8, 1f
+ blr x8
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_TypedEvent)
NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S
new file mode 100644
index 000000000000..fcbefcc5f7a2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S
@@ -0,0 +1,124 @@
+//===-- xray_trampoline_loongarch64.s ---------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the loongarch-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../sanitizer_common/sanitizer_asm.h"
+
+#define FROM_0_TO_7 0,1,2,3,4,5,6,7
+#define FROM_7_TO_0 7,6,5,4,3,2,1,0
+
+.macro SAVE_ARG_REGISTERS
+ .irp i,FROM_7_TO_0
+ st.d $a\i, $sp, (8 * 8 + 8 * \i)
+ .endr
+ .irp i,FROM_7_TO_0
+ fst.d $f\i, $sp, (8 * \i)
+ .endr
+.endm
+
+.macro RESTORE_ARG_REGISTERS
+ .irp i,FROM_0_TO_7
+ fld.d $f\i, $sp, (8 * \i)
+ .endr
+ .irp i,FROM_0_TO_7
+ ld.d $a\i, $sp, (8 * 8 + 8 * \i)
+ .endr
+.endm
+
+.macro SAVE_RET_REGISTERS
+ st.d $a1, $sp, 24
+ st.d $a0, $sp, 16
+ fst.d $f1, $sp, 8
+ fst.d $f0, $sp, 0
+.endm
+
+.macro RESTORE_RET_REGISTERS
+ fld.d $f0, $sp, 0
+ fld.d $f1, $sp, 8
+ ld.d $a0, $sp, 16
+ ld.d $a1, $sp, 24
+.endm
+
+ .text
+ .file "xray_trampoline_loongarch64.S"
+ .globl ASM_SYMBOL(__xray_FunctionEntry)
+ ASM_HIDDEN(__xray_FunctionEntry)
+ .p2align 2
+ ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+ASM_SYMBOL(__xray_FunctionEntry):
+ .cfi_startproc
+ // Save argument registers before doing any actual work.
+ .cfi_def_cfa_offset 136
+ addi.d $sp, $sp, -136
+ st.d $ra, $sp, 128
+ .cfi_offset 1, -8
+ SAVE_ARG_REGISTERS
+
+ la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ld.d $t2, $t2, 0
+
+ beqz $t2, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event.
+ move $a1, $zero
+ // Function ID is in t1 (the first parameter).
+ move $a0, $t1
+ jirl $ra, $t2, 0
+
+FunctionEntry_restore:
+ // Restore argument registers.
+ RESTORE_ARG_REGISTERS
+ ld.d $ra, $sp, 128
+ addi.d $sp, $sp, 136
+ ret
+FunctionEntry_end:
+ ASM_SIZE(__xray_FunctionEntry)
+ .cfi_endproc
+
+ .text
+ .globl ASM_SYMBOL(__xray_FunctionExit)
+ ASM_HIDDEN(__xray_FunctionExit)
+ .p2align 2
+ ASM_TYPE_FUNCTION(__xray_FunctionExit)
+ASM_SYMBOL(__xray_FunctionExit):
+ .cfi_startproc
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 48
+ addi.d $sp, $sp, -48
+ st.d $ra, $sp, 40
+ .cfi_offset 1, -8
+ st.d $fp, $sp, 32
+ SAVE_RET_REGISTERS
+
+ la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ld.d $t2, $t2, 0
+
+ beqz $t2, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event.
+ li.w $a1, 1
+ // Function ID is in t1 (the first parameter).
+ move $a0, $t1
+ jirl $ra, $t2, 0
+
+FunctionExit_restore:
+ // Restore return registers.
+ RESTORE_RET_REGISTERS
+ ld.d $fp, $sp, 32
+ ld.d $ra, $sp, 40
+ addi.d $sp, $sp, 48
+ ret
+
+FunctionExit_end:
+ ASM_SIZE(__xray_FunctionExit)
+ .cfi_endproc
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
index 02cf69f766c4..0f00bcc41508 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
@@ -124,14 +124,14 @@ ASM_SYMBOL(__xray_FunctionEntry):
// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax, %rax
- je .Ltmp0
+ je LOCAL_LABEL(tmp0)
// The patched function prologue puts its xray_instr_map index into %r10d.
movl %r10d, %edi
xor %esi,%esi
callq *%rax
-.Ltmp0:
+LOCAL_LABEL(tmp0):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -162,13 +162,13 @@ ASM_SYMBOL(__xray_FunctionExit):
movq %rdx, 0(%rsp)
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax,%rax
- je .Ltmp2
+ je LOCAL_LABEL(tmp2)
movl %r10d, %edi
movl $1, %esi
callq *%rax
-.Ltmp2:
+LOCAL_LABEL(tmp2):
// Restore the important registers.
movq 48(%rsp), %rbp
movupd 32(%rsp), %xmm0
@@ -198,13 +198,13 @@ ASM_SYMBOL(__xray_FunctionTailExit):
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax,%rax
- je .Ltmp4
+ je LOCAL_LABEL(tmp4)
movl %r10d, %edi
movl $2, %esi
callq *%rax
-.Ltmp4:
+LOCAL_LABEL(tmp4):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -227,14 +227,14 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
// Again, these function pointer loads must be atomic; MOV is fine.
movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
testq %rax, %rax
- jne .Larg1entryLog
+ jne LOCAL_LABEL(arg1entryLog)
// If [arg1 logging handler] not set, defer to no-arg logging.
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax, %rax
- je .Larg1entryFail
+ je LOCAL_LABEL(arg1entryFail)
-.Larg1entryLog:
+LOCAL_LABEL(arg1entryLog):
// First argument will become the third
movq %rdi, %rdx
@@ -247,7 +247,7 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
callq *%rax
-.Larg1entryFail:
+LOCAL_LABEL(arg1entryFail):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -270,11 +270,11 @@ ASM_SYMBOL(__xray_CustomEvent):
// already.
movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
testq %rax,%rax
- je .LcustomEventCleanup
+ je LOCAL_LABEL(customEventCleanup)
callq *%rax
-.LcustomEventCleanup:
+LOCAL_LABEL(customEventCleanup):
RESTORE_REGISTERS
retq
# LLVM-MCA-END
@@ -296,11 +296,11 @@ ASM_SYMBOL(__xray_TypedEvent):
// and rdx without our intervention.
movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
testq %rax,%rax
- je .LtypedEventCleanup
+ je LOCAL_LABEL(typedEventCleanup)
callq *%rax
-.LtypedEventCleanup:
+LOCAL_LABEL(typedEventCleanup):
RESTORE_REGISTERS
retq
# LLVM-MCA-END
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
index 58347dca5f7a..e1cafe1bf11d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
@@ -43,7 +43,7 @@ inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
#elif defined(__powerpc64__)
#include "xray_powerpc64.inc"
#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
- defined(__hexagon__)
+ defined(__hexagon__) || defined(__loongarch_lp64)
// Emulated TSC.
// There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
// not have a constant frequency like TSC on x86(_64), it may go faster
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
index 1bf241c1223f..b9666a40861d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
@@ -250,10 +250,8 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
- // In Version 0:
- //
// xray_sled_n:
- // jmp +20 // 2 bytes
+ // jmp +15 // 2 bytes
// ...
//
// With the following:
@@ -262,36 +260,17 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
// ...
//
//
- // The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
- //
- // ---
- //
- // In Version 1 or 2:
- //
- // The jump offset is now 15 bytes (0x0f), so when restoring the nopw back
- // to a jmp, use 15 bytes instead.
- //
+ // The "unpatch" should just turn the 'nopw' back to a 'jmp +15'.
const uint64_t Address = Sled.address();
if (Enable) {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
- switch (Sled.Version) {
- case 1:
- case 2:
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
- std::memory_order_release);
- break;
- case 0:
- default:
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
- std::memory_order_release);
- break;
- }
- }
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
+ std::memory_order_release);
+ }
return false;
}