From 3a1720af1d7f43edc5b214cde0be11bfb94d077e Mon Sep 17 00:00:00 2001 From: Dimitry Andric Date: Wed, 23 Oct 2019 17:52:22 +0000 Subject: Vendor import of stripped compiler-rt trunk r375505, the last commit before the upstream Subversion repository was made read-only, and the LLVM project migrated to GitHub: https://llvm.org/svn/llvm-project/compiler-rt/trunk@375505 --- include/fuzzer/FuzzedDataProvider.h | 299 ++ include/sanitizer/dfsan_interface.h | 2 +- include/sanitizer/tsan_interface_atomic.h | 8 +- include/sanitizer/ubsan_interface.h | 32 + lib/asan/asan_activation.cc | 143 - lib/asan/asan_activation.cpp | 143 + lib/asan/asan_allocator.cc | 1119 -------- lib/asan/asan_allocator.cpp | 1119 ++++++++ lib/asan/asan_allocator.h | 2 +- lib/asan/asan_debugging.cc | 146 - lib/asan/asan_debugging.cpp | 146 + lib/asan/asan_descriptions.cc | 501 ---- lib/asan/asan_descriptions.cpp | 501 ++++ lib/asan/asan_descriptions.h | 4 +- lib/asan/asan_errors.cc | 597 ---- lib/asan/asan_errors.cpp | 598 ++++ lib/asan/asan_errors.h | 3 +- lib/asan/asan_fake_stack.cc | 282 -- lib/asan/asan_fake_stack.cpp | 282 ++ lib/asan/asan_fake_stack.h | 2 +- lib/asan/asan_flags.cc | 214 -- lib/asan/asan_flags.cpp | 214 ++ lib/asan/asan_flags.inc | 13 +- lib/asan/asan_fuchsia.cc | 224 -- lib/asan/asan_fuchsia.cpp | 224 ++ lib/asan/asan_globals.cc | 465 ---- lib/asan/asan_globals.cpp | 463 ++++ lib/asan/asan_globals_win.cc | 61 - lib/asan/asan_globals_win.cpp | 61 + lib/asan/asan_interceptors.cc | 675 ----- lib/asan/asan_interceptors.cpp | 722 +++++ lib/asan/asan_interceptors.h | 14 +- lib/asan/asan_interceptors_memintrinsics.cc | 43 - lib/asan/asan_interceptors_memintrinsics.cpp | 43 + lib/asan/asan_interceptors_memintrinsics.h | 2 +- lib/asan/asan_internal.h | 12 +- lib/asan/asan_linux.cc | 260 -- lib/asan/asan_linux.cpp | 260 ++ lib/asan/asan_mac.cc | 331 --- lib/asan/asan_mac.cpp | 331 +++ lib/asan/asan_malloc_linux.cc | 307 --- lib/asan/asan_malloc_linux.cpp | 307 +++ lib/asan/asan_malloc_mac.cc | 102 - lib/asan/asan_malloc_mac.cpp | 102 + lib/asan/asan_malloc_win.cc | 553 ---- lib/asan/asan_malloc_win.cpp | 554 ++++ lib/asan/asan_memory_profile.cc | 129 - lib/asan/asan_memory_profile.cpp | 129 + lib/asan/asan_new_delete.cc | 204 -- lib/asan/asan_new_delete.cpp | 204 ++ lib/asan/asan_poisoning.cc | 460 ---- lib/asan/asan_poisoning.cpp | 460 ++++ lib/asan/asan_posix.cc | 117 - lib/asan/asan_posix.cpp | 117 + lib/asan/asan_preinit.cc | 24 - lib/asan/asan_preinit.cpp | 24 + lib/asan/asan_premap_shadow.cc | 78 - lib/asan/asan_premap_shadow.cpp | 78 + lib/asan/asan_report.cc | 558 ---- lib/asan/asan_report.cpp | 562 ++++ lib/asan/asan_rtems.cc | 258 -- lib/asan/asan_rtems.cpp | 258 ++ lib/asan/asan_rtl.cc | 626 ----- lib/asan/asan_rtl.cpp | 626 +++++ lib/asan/asan_scariness_score.h | 2 +- lib/asan/asan_shadow_setup.cc | 164 -- lib/asan/asan_shadow_setup.cpp | 163 ++ lib/asan/asan_stack.cc | 88 - lib/asan/asan_stack.cpp | 88 + lib/asan/asan_stack.h | 2 +- lib/asan/asan_stats.cc | 173 -- lib/asan/asan_stats.cpp | 173 ++ lib/asan/asan_suppressions.cc | 104 - lib/asan/asan_suppressions.cpp | 104 + lib/asan/asan_suppressions.h | 2 +- lib/asan/asan_thread.cc | 535 ---- lib/asan/asan_thread.cpp | 536 ++++ lib/asan/asan_thread.h | 2 +- lib/asan/asan_win.cc | 401 --- lib/asan/asan_win.cpp | 401 +++ lib/asan/asan_win_dll_thunk.cc | 152 -- lib/asan/asan_win_dll_thunk.cpp | 152 ++ lib/asan/asan_win_dynamic_runtime_thunk.cc | 130 - lib/asan/asan_win_dynamic_runtime_thunk.cpp | 130 + lib/asan/asan_win_weak_interception.cc | 22 - lib/asan/asan_win_weak_interception.cpp | 22 + lib/builtins/aarch64/fp_mode.c | 59 + lib/builtins/adddf3.c | 3 +- lib/builtins/addsf3.c | 3 +- lib/builtins/addtf3.c | 5 +- lib/builtins/arm/fp_mode.c | 59 + lib/builtins/atomic.c | 4 +- lib/builtins/clear_cache.c | 68 +- lib/builtins/cpu_model.c | 12 +- lib/builtins/divtf3.c | 2 +- lib/builtins/emutls.c | 11 + lib/builtins/extenddftf2.c | 2 +- lib/builtins/extendsftf2.c | 2 +- lib/builtins/fixunsxfdi.c | 11 + lib/builtins/fixunsxfsi.c | 11 + lib/builtins/fixxfdi.c | 11 + lib/builtins/fp_add_impl.inc | 27 +- lib/builtins/fp_lib.h | 2 +- lib/builtins/fp_mode.c | 24 + lib/builtins/fp_mode.h | 29 + lib/builtins/fp_trunc_impl.inc | 2 +- lib/builtins/subdf3.c | 3 +- lib/builtins/subsf3.c | 3 +- lib/builtins/subtf3.c | 3 +- lib/builtins/udivmoddi4.c | 11 + lib/dfsan/dfsan.cc | 460 ---- lib/dfsan/dfsan.cpp | 460 ++++ lib/dfsan/dfsan_custom.cc | 1156 -------- lib/dfsan/dfsan_custom.cpp | 1156 ++++++++ lib/dfsan/dfsan_interceptors.cc | 45 - lib/dfsan/dfsan_interceptors.cpp | 45 + lib/fuzzer/FuzzerBuiltinsMsvc.h | 22 +- lib/fuzzer/FuzzerDefs.h | 5 +- lib/fuzzer/FuzzerDriver.cpp | 1 - lib/fuzzer/FuzzerExtFunctions.def | 11 +- lib/fuzzer/FuzzerFlags.def | 3 - lib/fuzzer/FuzzerInternal.h | 3 +- lib/fuzzer/FuzzerLoop.cpp | 19 +- lib/fuzzer/FuzzerMerge.cpp | 82 +- lib/fuzzer/FuzzerOptions.h | 1 - lib/fuzzer/FuzzerTracePC.cpp | 41 +- lib/fuzzer/FuzzerTracePC.h | 3 - lib/fuzzer/FuzzerUtil.h | 2 - lib/fuzzer/FuzzerUtilFuchsia.cpp | 26 +- lib/fuzzer/FuzzerUtilPosix.cpp | 6 - lib/fuzzer/FuzzerUtilWindows.cpp | 4 - lib/fuzzer/utils/FuzzedDataProvider.h | 245 -- lib/gwp_asan/guarded_pool_allocator.cpp | 61 +- lib/gwp_asan/guarded_pool_allocator.h | 21 +- lib/gwp_asan/optional/backtrace.h | 7 +- lib/gwp_asan/optional/backtrace_linux_libc.cpp | 22 +- .../optional/backtrace_sanitizer_common.cpp | 29 +- lib/gwp_asan/options.h | 69 +- lib/gwp_asan/options.inc | 6 +- lib/gwp_asan/scripts/symbolize.sh | 55 + lib/gwp_asan/stack_trace_compressor.cpp | 111 + lib/gwp_asan/stack_trace_compressor.h | 38 + lib/hwasan/hwasan.cpp | 155 +- lib/hwasan/hwasan.h | 2 +- lib/hwasan/hwasan_allocator.cpp | 29 - lib/hwasan/hwasan_allocator.h | 6 - lib/hwasan/hwasan_exceptions.cpp | 67 + lib/hwasan/hwasan_flags.inc | 2 +- lib/hwasan/hwasan_interceptors.cpp | 2 - lib/hwasan/hwasan_interface_internal.h | 9 + lib/hwasan/hwasan_linux.cpp | 42 +- lib/hwasan/hwasan_new_delete.cpp | 2 +- lib/hwasan/hwasan_report.cpp | 25 + lib/hwasan/hwasan_tag_mismatch_aarch64.S | 50 +- lib/interception/interception.h | 4 +- lib/interception/interception_linux.cc | 83 - lib/interception/interception_linux.cpp | 83 + lib/interception/interception_mac.cc | 18 - lib/interception/interception_mac.cpp | 18 + lib/interception/interception_type_test.cc | 39 - lib/interception/interception_type_test.cpp | 39 + lib/interception/interception_win.cc | 1022 ------- lib/interception/interception_win.cpp | 1022 +++++++ lib/lsan/lsan.cc | 135 - lib/lsan/lsan.cpp | 135 + lib/lsan/lsan_allocator.cc | 353 --- lib/lsan/lsan_allocator.cpp | 353 +++ lib/lsan/lsan_common.cc | 904 ------- lib/lsan/lsan_common.cpp | 900 ++++++ lib/lsan/lsan_common.h | 5 +- lib/lsan/lsan_common_linux.cc | 140 - lib/lsan/lsan_common_linux.cpp | 144 + lib/lsan/lsan_common_mac.cc | 202 -- lib/lsan/lsan_common_mac.cpp | 206 ++ lib/lsan/lsan_interceptors.cc | 465 ---- lib/lsan/lsan_interceptors.cpp | 520 ++++ lib/lsan/lsan_linux.cc | 32 - lib/lsan/lsan_linux.cpp | 32 + lib/lsan/lsan_mac.cc | 191 -- lib/lsan/lsan_mac.cpp | 191 ++ lib/lsan/lsan_malloc_mac.cc | 59 - lib/lsan/lsan_malloc_mac.cpp | 59 + lib/lsan/lsan_preinit.cc | 21 - lib/lsan/lsan_preinit.cpp | 21 + lib/lsan/lsan_thread.cc | 162 -- lib/lsan/lsan_thread.cpp | 162 ++ lib/msan/msan.cc | 675 ----- lib/msan/msan.cpp | 675 +++++ lib/msan/msan.h | 11 +- lib/msan/msan_allocator.cc | 349 --- lib/msan/msan_allocator.cpp | 349 +++ lib/msan/msan_chained_origin_depot.cc | 131 - lib/msan/msan_chained_origin_depot.cpp | 131 + lib/msan/msan_interceptors.cc | 1715 ------------ lib/msan/msan_interceptors.cpp | 1724 ++++++++++++ lib/msan/msan_linux.cc | 262 -- lib/msan/msan_linux.cpp | 262 ++ lib/msan/msan_new_delete.cc | 108 - lib/msan/msan_new_delete.cpp | 108 + lib/msan/msan_poisoning.cc | 174 -- lib/msan/msan_poisoning.cpp | 174 ++ lib/msan/msan_report.cc | 272 -- lib/msan/msan_report.cpp | 272 ++ lib/msan/msan_thread.cc | 82 - lib/msan/msan_thread.cpp | 82 + lib/profile/InstrProfiling.h | 17 +- lib/profile/InstrProfilingFile.c | 11 +- lib/profile/InstrProfilingPlatformFuchsia.c | 93 +- lib/profile/InstrProfilingRuntime.cc | 29 - lib/profile/InstrProfilingRuntime.cpp | 29 + lib/profile/InstrProfilingUtil.c | 20 +- lib/safestack/safestack.cc | 310 --- lib/safestack/safestack.cpp | 310 +++ lib/sanitizer_common/sancov_flags.cc | 58 - lib/sanitizer_common/sancov_flags.cpp | 58 + lib/sanitizer_common/sanitizer_allocator.cc | 267 -- lib/sanitizer_common/sanitizer_allocator.cpp | 267 ++ lib/sanitizer_common/sanitizer_allocator_checks.cc | 22 - .../sanitizer_allocator_checks.cpp | 22 + lib/sanitizer_common/sanitizer_allocator_checks.h | 2 +- lib/sanitizer_common/sanitizer_allocator_report.cc | 136 - .../sanitizer_allocator_report.cpp | 137 + lib/sanitizer_common/sanitizer_asm.h | 4 +- lib/sanitizer_common/sanitizer_atomic_msvc.h | 63 +- lib/sanitizer_common/sanitizer_common.cc | 346 --- lib/sanitizer_common/sanitizer_common.cpp | 346 +++ lib/sanitizer_common/sanitizer_common.h | 29 +- .../sanitizer_common_interceptors.inc | 144 +- .../sanitizer_common_interface.inc | 1 + lib/sanitizer_common/sanitizer_common_libcdep.cc | 139 - lib/sanitizer_common/sanitizer_common_libcdep.cpp | 139 + lib/sanitizer_common/sanitizer_common_nolibc.cc | 34 - lib/sanitizer_common/sanitizer_common_nolibc.cpp | 34 + lib/sanitizer_common/sanitizer_common_syscalls.inc | 12 + lib/sanitizer_common/sanitizer_coverage_fuchsia.cc | 240 -- .../sanitizer_coverage_fuchsia.cpp | 240 ++ .../sanitizer_coverage_libcdep_new.cc | 218 -- .../sanitizer_coverage_libcdep_new.cpp | 218 ++ .../sanitizer_coverage_win_dll_thunk.cc | 20 - .../sanitizer_coverage_win_dll_thunk.cpp | 20 + ...sanitizer_coverage_win_dynamic_runtime_thunk.cc | 26 - ...anitizer_coverage_win_dynamic_runtime_thunk.cpp | 26 + .../sanitizer_coverage_win_sections.cc | 67 - .../sanitizer_coverage_win_sections.cpp | 67 + .../sanitizer_coverage_win_weak_interception.cc | 23 - .../sanitizer_coverage_win_weak_interception.cpp | 23 + .../sanitizer_deadlock_detector1.cc | 194 -- .../sanitizer_deadlock_detector1.cpp | 194 ++ .../sanitizer_deadlock_detector2.cc | 423 --- .../sanitizer_deadlock_detector2.cpp | 423 +++ lib/sanitizer_common/sanitizer_errno.cc | 34 - lib/sanitizer_common/sanitizer_errno.cpp | 34 + lib/sanitizer_common/sanitizer_file.cc | 215 -- lib/sanitizer_common/sanitizer_file.cpp | 215 ++ lib/sanitizer_common/sanitizer_flag_parser.cc | 183 -- lib/sanitizer_common/sanitizer_flag_parser.cpp | 184 ++ lib/sanitizer_common/sanitizer_flag_parser.h | 4 +- lib/sanitizer_common/sanitizer_flags.cc | 121 - lib/sanitizer_common/sanitizer_flags.cpp | 121 + lib/sanitizer_common/sanitizer_fuchsia.cc | 527 ---- lib/sanitizer_common/sanitizer_fuchsia.cpp | 527 ++++ lib/sanitizer_common/sanitizer_getauxval.h | 30 +- lib/sanitizer_common/sanitizer_glibc_version.h | 26 + .../sanitizer_interceptors_ioctl_netbsd.inc | 31 +- lib/sanitizer_common/sanitizer_internal_defs.h | 75 +- lib/sanitizer_common/sanitizer_libc.cc | 279 -- lib/sanitizer_common/sanitizer_libc.cpp | 280 ++ lib/sanitizer_common/sanitizer_libignore.cc | 129 - lib/sanitizer_common/sanitizer_libignore.cpp | 129 + lib/sanitizer_common/sanitizer_linux.cc | 2102 -------------- lib/sanitizer_common/sanitizer_linux.cpp | 2141 +++++++++++++++ lib/sanitizer_common/sanitizer_linux_libcdep.cc | 850 ------ lib/sanitizer_common/sanitizer_linux_libcdep.cpp | 847 ++++++ lib/sanitizer_common/sanitizer_linux_s390.cc | 221 -- lib/sanitizer_common/sanitizer_linux_s390.cpp | 221 ++ lib/sanitizer_common/sanitizer_mac.cc | 1135 -------- lib/sanitizer_common/sanitizer_mac.cpp | 1219 +++++++++ lib/sanitizer_common/sanitizer_mac_libcdep.cc | 29 - lib/sanitizer_common/sanitizer_mac_libcdep.cpp | 29 + lib/sanitizer_common/sanitizer_malloc_mac.inc | 11 +- lib/sanitizer_common/sanitizer_netbsd.cc | 338 --- lib/sanitizer_common/sanitizer_netbsd.cpp | 338 +++ lib/sanitizer_common/sanitizer_openbsd.cc | 115 - lib/sanitizer_common/sanitizer_openbsd.cpp | 115 + .../sanitizer_persistent_allocator.cc | 18 - .../sanitizer_persistent_allocator.cpp | 18 + .../sanitizer_platform_interceptors.h | 24 +- .../sanitizer_platform_limits_freebsd.cc | 525 ---- .../sanitizer_platform_limits_freebsd.cpp | 525 ++++ .../sanitizer_platform_limits_freebsd.h | 1090 ++++---- .../sanitizer_platform_limits_linux.cc | 108 - .../sanitizer_platform_limits_linux.cpp | 108 + .../sanitizer_platform_limits_netbsd.cc | 2345 ---------------- .../sanitizer_platform_limits_netbsd.cpp | 2395 ++++++++++++++++ .../sanitizer_platform_limits_netbsd.h | 47 +- .../sanitizer_platform_limits_openbsd.cc | 278 -- .../sanitizer_platform_limits_openbsd.cpp | 278 ++ .../sanitizer_platform_limits_posix.cc | 1271 --------- .../sanitizer_platform_limits_posix.cpp | 1276 +++++++++ .../sanitizer_platform_limits_posix.h | 2168 +++++++-------- .../sanitizer_platform_limits_solaris.cc | 365 --- .../sanitizer_platform_limits_solaris.cpp | 365 +++ .../sanitizer_platform_limits_solaris.h | 7 +- lib/sanitizer_common/sanitizer_posix.cc | 388 --- lib/sanitizer_common/sanitizer_posix.cpp | 390 +++ lib/sanitizer_common/sanitizer_posix.h | 2 +- lib/sanitizer_common/sanitizer_posix_libcdep.cc | 486 ---- lib/sanitizer_common/sanitizer_posix_libcdep.cpp | 507 ++++ lib/sanitizer_common/sanitizer_printf.cc | 358 --- lib/sanitizer_common/sanitizer_printf.cpp | 358 +++ lib/sanitizer_common/sanitizer_procmaps.h | 2 +- lib/sanitizer_common/sanitizer_procmaps_bsd.cc | 139 - lib/sanitizer_common/sanitizer_procmaps_bsd.cpp | 139 + lib/sanitizer_common/sanitizer_procmaps_common.cc | 174 -- lib/sanitizer_common/sanitizer_procmaps_common.cpp | 174 ++ lib/sanitizer_common/sanitizer_procmaps_linux.cc | 81 - lib/sanitizer_common/sanitizer_procmaps_linux.cpp | 81 + lib/sanitizer_common/sanitizer_procmaps_mac.cc | 378 --- lib/sanitizer_common/sanitizer_procmaps_mac.cpp | 379 +++ lib/sanitizer_common/sanitizer_procmaps_solaris.cc | 67 - .../sanitizer_procmaps_solaris.cpp | 67 + lib/sanitizer_common/sanitizer_rtems.cc | 279 -- lib/sanitizer_common/sanitizer_rtems.cpp | 279 ++ lib/sanitizer_common/sanitizer_solaris.cc | 230 -- lib/sanitizer_common/sanitizer_solaris.cpp | 230 ++ lib/sanitizer_common/sanitizer_stackdepot.cc | 149 - lib/sanitizer_common/sanitizer_stackdepot.cpp | 149 + lib/sanitizer_common/sanitizer_stacktrace.cc | 133 - lib/sanitizer_common/sanitizer_stacktrace.cpp | 133 + .../sanitizer_stacktrace_libcdep.cc | 158 -- .../sanitizer_stacktrace_libcdep.cpp | 159 ++ .../sanitizer_stacktrace_printer.cc | 263 -- .../sanitizer_stacktrace_printer.cpp | 263 ++ lib/sanitizer_common/sanitizer_stacktrace_sparc.cc | 85 - .../sanitizer_stacktrace_sparc.cpp | 85 + .../sanitizer_stoptheworld_linux_libcdep.cc | 572 ---- .../sanitizer_stoptheworld_linux_libcdep.cpp | 573 ++++ lib/sanitizer_common/sanitizer_stoptheworld_mac.cc | 177 -- .../sanitizer_stoptheworld_mac.cpp | 177 ++ .../sanitizer_stoptheworld_netbsd_libcdep.cc | 356 --- .../sanitizer_stoptheworld_netbsd_libcdep.cpp | 356 +++ lib/sanitizer_common/sanitizer_suppressions.cc | 181 -- lib/sanitizer_common/sanitizer_suppressions.cpp | 181 ++ lib/sanitizer_common/sanitizer_suppressions.h | 2 +- lib/sanitizer_common/sanitizer_symbolizer.cc | 129 - lib/sanitizer_common/sanitizer_symbolizer.cpp | 129 + .../sanitizer_symbolizer_internal.h | 19 +- .../sanitizer_symbolizer_libbacktrace.cc | 209 -- .../sanitizer_symbolizer_libbacktrace.cpp | 209 ++ .../sanitizer_symbolizer_libcdep.cc | 556 ---- .../sanitizer_symbolizer_libcdep.cpp | 557 ++++ lib/sanitizer_common/sanitizer_symbolizer_mac.cc | 168 -- lib/sanitizer_common/sanitizer_symbolizer_mac.cpp | 173 ++ .../sanitizer_symbolizer_markup.cc | 144 - .../sanitizer_symbolizer_markup.cpp | 144 + .../sanitizer_symbolizer_posix_libcdep.cc | 539 ---- .../sanitizer_symbolizer_posix_libcdep.cpp | 487 ++++ .../sanitizer_symbolizer_report.cc | 283 -- .../sanitizer_symbolizer_report.cpp | 293 ++ lib/sanitizer_common/sanitizer_symbolizer_win.cc | 318 --- lib/sanitizer_common/sanitizer_symbolizer_win.cpp | 318 +++ lib/sanitizer_common/sanitizer_termination.cc | 94 - lib/sanitizer_common/sanitizer_termination.cpp | 94 + lib/sanitizer_common/sanitizer_thread_registry.cc | 351 --- lib/sanitizer_common/sanitizer_thread_registry.cpp | 351 +++ lib/sanitizer_common/sanitizer_tls_get_addr.cc | 154 -- lib/sanitizer_common/sanitizer_tls_get_addr.cpp | 154 ++ lib/sanitizer_common/sanitizer_tls_get_addr.h | 2 +- lib/sanitizer_common/sanitizer_type_traits.cc | 20 - lib/sanitizer_common/sanitizer_type_traits.cpp | 20 + .../sanitizer_unwind_linux_libcdep.cc | 176 -- .../sanitizer_unwind_linux_libcdep.cpp | 180 ++ lib/sanitizer_common/sanitizer_unwind_win.cc | 75 - lib/sanitizer_common/sanitizer_unwind_win.cpp | 75 + lib/sanitizer_common/sanitizer_vector.h | 6 +- lib/sanitizer_common/sanitizer_win.cc | 1115 -------- lib/sanitizer_common/sanitizer_win.cpp | 1124 ++++++++ lib/sanitizer_common/sanitizer_win_defs.h | 12 + lib/sanitizer_common/sanitizer_win_dll_thunk.cc | 101 - lib/sanitizer_common/sanitizer_win_dll_thunk.cpp | 101 + .../sanitizer_win_dynamic_runtime_thunk.cc | 26 - .../sanitizer_win_dynamic_runtime_thunk.cpp | 26 + .../sanitizer_win_weak_interception.cc | 93 - .../sanitizer_win_weak_interception.cpp | 94 + .../symbolizer/sanitizer_symbolize.cc | 80 - .../symbolizer/sanitizer_symbolize.cpp | 80 + .../symbolizer/sanitizer_wrappers.cc | 198 -- .../symbolizer/sanitizer_wrappers.cpp | 198 ++ .../symbolizer/scripts/build_symbolizer.sh | 34 +- .../symbolizer/scripts/global_symbols.txt | 7 + lib/scudo/scudo_allocator_secondary.h | 28 +- lib/scudo/scudo_errors.cpp | 5 +- lib/scudo/standalone/allocator_config.h | 4 +- lib/scudo/standalone/checksum.cc | 70 - lib/scudo/standalone/checksum.cpp | 70 + lib/scudo/standalone/chunk.h | 26 +- lib/scudo/standalone/combined.h | 131 +- lib/scudo/standalone/common.cc | 32 - lib/scudo/standalone/common.cpp | 32 + lib/scudo/standalone/crc32_hw.cc | 19 - lib/scudo/standalone/crc32_hw.cpp | 19 + lib/scudo/standalone/flags.cc | 57 - lib/scudo/standalone/flags.cpp | 57 + lib/scudo/standalone/flags_parser.cc | 164 -- lib/scudo/standalone/flags_parser.cpp | 164 ++ lib/scudo/standalone/fuchsia.cc | 189 -- lib/scudo/standalone/fuchsia.cpp | 189 ++ lib/scudo/standalone/internal_defs.h | 4 +- lib/scudo/standalone/linux.cc | 171 -- lib/scudo/standalone/linux.cpp | 171 ++ lib/scudo/standalone/linux.h | 2 +- lib/scudo/standalone/list.h | 12 +- lib/scudo/standalone/local_cache.h | 16 +- lib/scudo/standalone/mutex.h | 6 +- lib/scudo/standalone/primary32.h | 60 +- lib/scudo/standalone/primary64.h | 89 +- lib/scudo/standalone/quarantine.h | 20 +- lib/scudo/standalone/report.cc | 192 -- lib/scudo/standalone/report.cpp | 192 ++ lib/scudo/standalone/secondary.cc | 136 - lib/scudo/standalone/secondary.cpp | 135 + lib/scudo/standalone/secondary.h | 5 +- lib/scudo/standalone/size_class_map.h | 16 +- lib/scudo/standalone/stats.h | 2 +- lib/scudo/standalone/string_utils.cc | 236 -- lib/scudo/standalone/string_utils.cpp | 244 ++ lib/scudo/standalone/string_utils.h | 1 + lib/scudo/standalone/tsd_exclusive.h | 4 +- lib/scudo/standalone/tsd_shared.h | 5 +- lib/scudo/standalone/wrappers_c.cc | 39 - lib/scudo/standalone/wrappers_c.cpp | 39 + lib/scudo/standalone/wrappers_c.inc | 16 +- lib/scudo/standalone/wrappers_c_bionic.cc | 49 - lib/scudo/standalone/wrappers_c_bionic.cpp | 49 + lib/scudo/standalone/wrappers_cpp.cc | 107 - lib/scudo/standalone/wrappers_cpp.cpp | 107 + lib/stats/stats.cc | 136 - lib/stats/stats.cpp | 136 + lib/stats/stats_client.cc | 83 - lib/stats/stats_client.cpp | 83 + lib/tsan/benchmarks/func_entry_exit.cc | 20 - lib/tsan/benchmarks/func_entry_exit.cpp | 20 + lib/tsan/benchmarks/mini_bench_local.cc | 49 - lib/tsan/benchmarks/mini_bench_local.cpp | 49 + lib/tsan/benchmarks/mini_bench_shared.cc | 51 - lib/tsan/benchmarks/mini_bench_shared.cpp | 51 + lib/tsan/benchmarks/mop.cc | 80 - lib/tsan/benchmarks/mop.cpp | 80 + lib/tsan/benchmarks/start_many_threads.cc | 52 - lib/tsan/benchmarks/start_many_threads.cpp | 52 + lib/tsan/benchmarks/vts_many_threads_bench.cc | 120 - lib/tsan/benchmarks/vts_many_threads_bench.cpp | 120 + lib/tsan/dd/dd_interceptors.cc | 328 --- lib/tsan/dd/dd_interceptors.cpp | 328 +++ lib/tsan/dd/dd_rtl.cc | 158 -- lib/tsan/dd/dd_rtl.cpp | 158 ++ lib/tsan/go/tsan_go.cc | 283 -- lib/tsan/go/tsan_go.cpp | 294 ++ lib/tsan/rtl/tsan_clock.cc | 597 ---- lib/tsan/rtl/tsan_clock.cpp | 597 ++++ lib/tsan/rtl/tsan_debugging.cc | 262 -- lib/tsan/rtl/tsan_debugging.cpp | 262 ++ lib/tsan/rtl/tsan_dispatch_defs.h | 14 +- lib/tsan/rtl/tsan_external.cc | 124 - lib/tsan/rtl/tsan_external.cpp | 124 + lib/tsan/rtl/tsan_fd.cc | 315 --- lib/tsan/rtl/tsan_fd.cpp | 316 +++ lib/tsan/rtl/tsan_flags.cc | 125 - lib/tsan/rtl/tsan_flags.cpp | 125 + lib/tsan/rtl/tsan_ignoreset.cc | 46 - lib/tsan/rtl/tsan_ignoreset.cpp | 46 + lib/tsan/rtl/tsan_interceptors.cc | 2855 -------------------- lib/tsan/rtl/tsan_interceptors_libdispatch.cpp | 782 ++++++ lib/tsan/rtl/tsan_interceptors_mac.cc | 479 ---- lib/tsan/rtl/tsan_interceptors_mac.cpp | 519 ++++ lib/tsan/rtl/tsan_interceptors_mach_vm.cpp | 52 + lib/tsan/rtl/tsan_interceptors_posix.cpp | 2850 +++++++++++++++++++ lib/tsan/rtl/tsan_interface.cc | 160 -- lib/tsan/rtl/tsan_interface.cpp | 160 ++ lib/tsan/rtl/tsan_interface.h | 13 +- lib/tsan/rtl/tsan_interface_ann.cc | 552 ---- lib/tsan/rtl/tsan_interface_ann.cpp | 552 ++++ lib/tsan/rtl/tsan_interface_atomic.cc | 955 ------- lib/tsan/rtl/tsan_interface_atomic.cpp | 955 +++++++ lib/tsan/rtl/tsan_interface_inl.h | 10 +- lib/tsan/rtl/tsan_interface_java.cc | 267 -- lib/tsan/rtl/tsan_interface_java.cpp | 267 ++ lib/tsan/rtl/tsan_interface_java.h | 4 +- lib/tsan/rtl/tsan_libdispatch.cc | 782 ------ lib/tsan/rtl/tsan_malloc_mac.cc | 71 - lib/tsan/rtl/tsan_malloc_mac.cpp | 71 + lib/tsan/rtl/tsan_md5.cc | 250 -- lib/tsan/rtl/tsan_md5.cpp | 250 ++ lib/tsan/rtl/tsan_mman.cc | 396 --- lib/tsan/rtl/tsan_mman.cpp | 396 +++ lib/tsan/rtl/tsan_mman.h | 5 +- lib/tsan/rtl/tsan_mutex.cc | 289 -- lib/tsan/rtl/tsan_mutex.cpp | 289 ++ lib/tsan/rtl/tsan_mutexset.cc | 88 - lib/tsan/rtl/tsan_mutexset.cpp | 88 + lib/tsan/rtl/tsan_new_delete.cc | 199 -- lib/tsan/rtl/tsan_new_delete.cpp | 199 ++ lib/tsan/rtl/tsan_platform.h | 2 + lib/tsan/rtl/tsan_platform_linux.cc | 515 ---- lib/tsan/rtl/tsan_platform_linux.cpp | 515 ++++ lib/tsan/rtl/tsan_platform_mac.cc | 316 --- lib/tsan/rtl/tsan_platform_mac.cpp | 316 +++ lib/tsan/rtl/tsan_platform_posix.cc | 174 -- lib/tsan/rtl/tsan_platform_posix.cpp | 167 ++ lib/tsan/rtl/tsan_platform_windows.cc | 37 - lib/tsan/rtl/tsan_platform_windows.cpp | 37 + lib/tsan/rtl/tsan_preinit.cc | 26 - lib/tsan/rtl/tsan_preinit.cpp | 26 + lib/tsan/rtl/tsan_report.cc | 486 ---- lib/tsan/rtl/tsan_report.cpp | 486 ++++ lib/tsan/rtl/tsan_rtl.cc | 1099 -------- lib/tsan/rtl/tsan_rtl.cpp | 1117 ++++++++ lib/tsan/rtl/tsan_rtl.h | 5 +- lib/tsan/rtl/tsan_rtl_mutex.cc | 539 ---- lib/tsan/rtl/tsan_rtl_mutex.cpp | 539 ++++ lib/tsan/rtl/tsan_rtl_proc.cc | 60 - lib/tsan/rtl/tsan_rtl_proc.cpp | 60 + lib/tsan/rtl/tsan_rtl_report.cc | 756 ------ lib/tsan/rtl/tsan_rtl_report.cpp | 757 ++++++ lib/tsan/rtl/tsan_rtl_thread.cc | 444 --- lib/tsan/rtl/tsan_rtl_thread.cpp | 444 +++ lib/tsan/rtl/tsan_stack_trace.cc | 63 - lib/tsan/rtl/tsan_stack_trace.cpp | 63 + lib/tsan/rtl/tsan_stat.cc | 186 -- lib/tsan/rtl/tsan_stat.cpp | 186 ++ lib/tsan/rtl/tsan_suppressions.cc | 161 -- lib/tsan/rtl/tsan_suppressions.cpp | 161 ++ lib/tsan/rtl/tsan_symbolize.cc | 122 - lib/tsan/rtl/tsan_symbolize.cpp | 122 + lib/tsan/rtl/tsan_sync.cc | 296 -- lib/tsan/rtl/tsan_sync.cpp | 296 ++ lib/ubsan/ubsan_checks.inc | 5 + lib/ubsan/ubsan_diag.cc | 443 --- lib/ubsan/ubsan_diag.cpp | 443 +++ lib/ubsan/ubsan_diag_standalone.cc | 40 - lib/ubsan/ubsan_diag_standalone.cpp | 40 + lib/ubsan/ubsan_flags.cc | 85 - lib/ubsan/ubsan_flags.cpp | 84 + lib/ubsan/ubsan_handlers.cc | 824 ------ lib/ubsan/ubsan_handlers.cpp | 843 ++++++ lib/ubsan/ubsan_handlers_cxx.cc | 205 -- lib/ubsan/ubsan_handlers_cxx.cpp | 205 ++ lib/ubsan/ubsan_init.cc | 64 - lib/ubsan/ubsan_init.cpp | 64 + lib/ubsan/ubsan_init_standalone.cc | 33 - lib/ubsan/ubsan_init_standalone.cpp | 33 + lib/ubsan/ubsan_init_standalone_preinit.cc | 35 - lib/ubsan/ubsan_init_standalone_preinit.cpp | 35 + lib/ubsan/ubsan_monitor.cc | 75 - lib/ubsan/ubsan_monitor.cpp | 75 + lib/ubsan/ubsan_signals_standalone.cc | 71 - lib/ubsan/ubsan_signals_standalone.cpp | 71 + lib/ubsan/ubsan_type_hash.cc | 33 - lib/ubsan/ubsan_type_hash.cpp | 33 + lib/ubsan/ubsan_type_hash_itanium.cc | 268 -- lib/ubsan/ubsan_type_hash_itanium.cpp | 268 ++ lib/ubsan/ubsan_type_hash_win.cc | 84 - lib/ubsan/ubsan_type_hash_win.cpp | 84 + lib/ubsan/ubsan_value.cc | 112 - lib/ubsan/ubsan_value.cpp | 112 + lib/ubsan/ubsan_win_dll_thunk.cc | 20 - lib/ubsan/ubsan_win_dll_thunk.cpp | 20 + lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc | 20 - lib/ubsan/ubsan_win_dynamic_runtime_thunk.cpp | 20 + lib/ubsan/ubsan_win_weak_interception.cc | 23 - lib/ubsan/ubsan_win_weak_interception.cpp | 23 + lib/ubsan_minimal/ubsan_minimal_handlers.cc | 119 - lib/ubsan_minimal/ubsan_minimal_handlers.cpp | 119 + lib/xray/xray_AArch64.cc | 127 - lib/xray/xray_AArch64.cpp | 127 + lib/xray/xray_arm.cc | 164 -- lib/xray/xray_arm.cpp | 164 ++ lib/xray/xray_basic_flags.cc | 49 - lib/xray/xray_basic_flags.cpp | 49 + lib/xray/xray_basic_logging.cc | 515 ---- lib/xray/xray_basic_logging.cpp | 515 ++++ lib/xray/xray_buffer_queue.cc | 237 -- lib/xray/xray_buffer_queue.cpp | 237 ++ lib/xray/xray_fdr_flags.cc | 47 - lib/xray/xray_fdr_flags.cpp | 47 + lib/xray/xray_fdr_logging.cc | 757 ------ lib/xray/xray_fdr_logging.cpp | 757 ++++++ lib/xray/xray_flags.cc | 84 - lib/xray/xray_flags.cpp | 84 + lib/xray/xray_init.cc | 115 - lib/xray/xray_init.cpp | 115 + lib/xray/xray_interface.cc | 480 ---- lib/xray/xray_interface.cpp | 480 ++++ lib/xray/xray_log_interface.cc | 209 -- lib/xray/xray_log_interface.cpp | 209 ++ lib/xray/xray_mips.cc | 170 -- lib/xray/xray_mips.cpp | 170 ++ lib/xray/xray_mips64.cc | 178 -- lib/xray/xray_mips64.cpp | 178 ++ lib/xray/xray_powerpc64.cc | 111 - lib/xray/xray_powerpc64.cpp | 111 + lib/xray/xray_profile_collector.cc | 414 --- lib/xray/xray_profile_collector.cpp | 414 +++ lib/xray/xray_profiling.cc | 519 ---- lib/xray/xray_profiling.cpp | 519 ++++ lib/xray/xray_profiling_flags.cc | 39 - lib/xray/xray_profiling_flags.cpp | 39 + lib/xray/xray_trampoline_powerpc64.cc | 15 - lib/xray/xray_trampoline_powerpc64.cpp | 15 + lib/xray/xray_utils.cc | 195 -- lib/xray/xray_utils.cpp | 195 ++ lib/xray/xray_x86_64.cc | 353 --- lib/xray/xray_x86_64.cpp | 353 +++ tools/CMakeLists.txt | 1 + tools/gwp_asan/CMakeLists.txt | 20 + tools/gwp_asan/stack_trace_compressor_fuzzer.cpp | 49 + 616 files changed, 71399 insertions(+), 69623 deletions(-) create mode 100644 include/fuzzer/FuzzedDataProvider.h create mode 100644 include/sanitizer/ubsan_interface.h delete mode 100644 lib/asan/asan_activation.cc create mode 100644 lib/asan/asan_activation.cpp delete mode 100644 lib/asan/asan_allocator.cc create mode 100644 lib/asan/asan_allocator.cpp delete mode 100644 lib/asan/asan_debugging.cc create mode 100644 lib/asan/asan_debugging.cpp delete mode 100644 lib/asan/asan_descriptions.cc create mode 100644 lib/asan/asan_descriptions.cpp delete mode 100644 lib/asan/asan_errors.cc create mode 100644 lib/asan/asan_errors.cpp delete mode 100644 lib/asan/asan_fake_stack.cc create mode 100644 lib/asan/asan_fake_stack.cpp delete mode 100644 lib/asan/asan_flags.cc create mode 100644 lib/asan/asan_flags.cpp delete mode 100644 lib/asan/asan_fuchsia.cc create mode 100644 lib/asan/asan_fuchsia.cpp delete mode 100644 lib/asan/asan_globals.cc create mode 100644 lib/asan/asan_globals.cpp delete mode 100644 lib/asan/asan_globals_win.cc create mode 100644 lib/asan/asan_globals_win.cpp delete mode 100644 lib/asan/asan_interceptors.cc create mode 100644 lib/asan/asan_interceptors.cpp delete mode 100644 lib/asan/asan_interceptors_memintrinsics.cc create mode 100644 lib/asan/asan_interceptors_memintrinsics.cpp delete mode 100644 lib/asan/asan_linux.cc create mode 100644 lib/asan/asan_linux.cpp delete mode 100644 lib/asan/asan_mac.cc create mode 100644 lib/asan/asan_mac.cpp delete mode 100644 lib/asan/asan_malloc_linux.cc create mode 100644 lib/asan/asan_malloc_linux.cpp delete mode 100644 lib/asan/asan_malloc_mac.cc create mode 100644 lib/asan/asan_malloc_mac.cpp delete mode 100644 lib/asan/asan_malloc_win.cc create mode 100644 lib/asan/asan_malloc_win.cpp delete mode 100644 lib/asan/asan_memory_profile.cc create mode 100644 lib/asan/asan_memory_profile.cpp delete mode 100644 lib/asan/asan_new_delete.cc create mode 100644 lib/asan/asan_new_delete.cpp delete mode 100644 lib/asan/asan_poisoning.cc create mode 100644 lib/asan/asan_poisoning.cpp delete mode 100644 lib/asan/asan_posix.cc create mode 100644 lib/asan/asan_posix.cpp delete mode 100644 lib/asan/asan_preinit.cc create mode 100644 lib/asan/asan_preinit.cpp delete mode 100644 lib/asan/asan_premap_shadow.cc create mode 100644 lib/asan/asan_premap_shadow.cpp delete mode 100644 lib/asan/asan_report.cc create mode 100644 lib/asan/asan_report.cpp delete mode 100644 lib/asan/asan_rtems.cc create mode 100644 lib/asan/asan_rtems.cpp delete mode 100644 lib/asan/asan_rtl.cc create mode 100644 lib/asan/asan_rtl.cpp delete mode 100644 lib/asan/asan_shadow_setup.cc create mode 100644 lib/asan/asan_shadow_setup.cpp delete mode 100644 lib/asan/asan_stack.cc create mode 100644 lib/asan/asan_stack.cpp delete mode 100644 lib/asan/asan_stats.cc create mode 100644 lib/asan/asan_stats.cpp delete mode 100644 lib/asan/asan_suppressions.cc create mode 100644 lib/asan/asan_suppressions.cpp delete mode 100644 lib/asan/asan_thread.cc create mode 100644 lib/asan/asan_thread.cpp delete mode 100644 lib/asan/asan_win.cc create mode 100644 lib/asan/asan_win.cpp delete mode 100644 lib/asan/asan_win_dll_thunk.cc create mode 100644 lib/asan/asan_win_dll_thunk.cpp delete mode 100644 lib/asan/asan_win_dynamic_runtime_thunk.cc create mode 100644 lib/asan/asan_win_dynamic_runtime_thunk.cpp delete mode 100644 lib/asan/asan_win_weak_interception.cc create mode 100644 lib/asan/asan_win_weak_interception.cpp create mode 100644 lib/builtins/aarch64/fp_mode.c create mode 100644 lib/builtins/arm/fp_mode.c create mode 100644 lib/builtins/fp_mode.c create mode 100644 lib/builtins/fp_mode.h delete mode 100644 lib/dfsan/dfsan.cc create mode 100644 lib/dfsan/dfsan.cpp delete mode 100644 lib/dfsan/dfsan_custom.cc create mode 100644 lib/dfsan/dfsan_custom.cpp delete mode 100644 lib/dfsan/dfsan_interceptors.cc create mode 100644 lib/dfsan/dfsan_interceptors.cpp delete mode 100644 lib/fuzzer/utils/FuzzedDataProvider.h create mode 100755 lib/gwp_asan/scripts/symbolize.sh create mode 100644 lib/gwp_asan/stack_trace_compressor.cpp create mode 100644 lib/gwp_asan/stack_trace_compressor.h create mode 100644 lib/hwasan/hwasan_exceptions.cpp delete mode 100644 lib/interception/interception_linux.cc create mode 100644 lib/interception/interception_linux.cpp delete mode 100644 lib/interception/interception_mac.cc create mode 100644 lib/interception/interception_mac.cpp delete mode 100644 lib/interception/interception_type_test.cc create mode 100644 lib/interception/interception_type_test.cpp delete mode 100644 lib/interception/interception_win.cc create mode 100644 lib/interception/interception_win.cpp delete mode 100644 lib/lsan/lsan.cc create mode 100644 lib/lsan/lsan.cpp delete mode 100644 lib/lsan/lsan_allocator.cc create mode 100644 lib/lsan/lsan_allocator.cpp delete mode 100644 lib/lsan/lsan_common.cc create mode 100644 lib/lsan/lsan_common.cpp delete mode 100644 lib/lsan/lsan_common_linux.cc create mode 100644 lib/lsan/lsan_common_linux.cpp delete mode 100644 lib/lsan/lsan_common_mac.cc create mode 100644 lib/lsan/lsan_common_mac.cpp delete mode 100644 lib/lsan/lsan_interceptors.cc create mode 100644 lib/lsan/lsan_interceptors.cpp delete mode 100644 lib/lsan/lsan_linux.cc create mode 100644 lib/lsan/lsan_linux.cpp delete mode 100644 lib/lsan/lsan_mac.cc create mode 100644 lib/lsan/lsan_mac.cpp delete mode 100644 lib/lsan/lsan_malloc_mac.cc create mode 100644 lib/lsan/lsan_malloc_mac.cpp delete mode 100644 lib/lsan/lsan_preinit.cc create mode 100644 lib/lsan/lsan_preinit.cpp delete mode 100644 lib/lsan/lsan_thread.cc create mode 100644 lib/lsan/lsan_thread.cpp delete mode 100644 lib/msan/msan.cc create mode 100644 lib/msan/msan.cpp delete mode 100644 lib/msan/msan_allocator.cc create mode 100644 lib/msan/msan_allocator.cpp delete mode 100644 lib/msan/msan_chained_origin_depot.cc create mode 100644 lib/msan/msan_chained_origin_depot.cpp delete mode 100644 lib/msan/msan_interceptors.cc create mode 100644 lib/msan/msan_interceptors.cpp delete mode 100644 lib/msan/msan_linux.cc create mode 100644 lib/msan/msan_linux.cpp delete mode 100644 lib/msan/msan_new_delete.cc create mode 100644 lib/msan/msan_new_delete.cpp delete mode 100644 lib/msan/msan_poisoning.cc create mode 100644 lib/msan/msan_poisoning.cpp delete mode 100644 lib/msan/msan_report.cc create mode 100644 lib/msan/msan_report.cpp delete mode 100644 lib/msan/msan_thread.cc create mode 100644 lib/msan/msan_thread.cpp delete mode 100644 lib/profile/InstrProfilingRuntime.cc create mode 100644 lib/profile/InstrProfilingRuntime.cpp delete mode 100644 lib/safestack/safestack.cc create mode 100644 lib/safestack/safestack.cpp delete mode 100644 lib/sanitizer_common/sancov_flags.cc create mode 100644 lib/sanitizer_common/sancov_flags.cpp delete mode 100644 lib/sanitizer_common/sanitizer_allocator.cc create mode 100644 lib/sanitizer_common/sanitizer_allocator.cpp delete mode 100644 lib/sanitizer_common/sanitizer_allocator_checks.cc create mode 100644 lib/sanitizer_common/sanitizer_allocator_checks.cpp delete mode 100644 lib/sanitizer_common/sanitizer_allocator_report.cc create mode 100644 lib/sanitizer_common/sanitizer_allocator_report.cpp delete mode 100644 lib/sanitizer_common/sanitizer_common.cc create mode 100644 lib/sanitizer_common/sanitizer_common.cpp delete mode 100644 lib/sanitizer_common/sanitizer_common_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_common_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_common_nolibc.cc create mode 100644 lib/sanitizer_common/sanitizer_common_nolibc.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_fuchsia.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_win_sections.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_win_sections.cpp delete mode 100644 lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc create mode 100644 lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cpp delete mode 100644 lib/sanitizer_common/sanitizer_deadlock_detector1.cc create mode 100644 lib/sanitizer_common/sanitizer_deadlock_detector1.cpp delete mode 100644 lib/sanitizer_common/sanitizer_deadlock_detector2.cc create mode 100644 lib/sanitizer_common/sanitizer_deadlock_detector2.cpp delete mode 100644 lib/sanitizer_common/sanitizer_errno.cc create mode 100644 lib/sanitizer_common/sanitizer_errno.cpp delete mode 100644 lib/sanitizer_common/sanitizer_file.cc create mode 100644 lib/sanitizer_common/sanitizer_file.cpp delete mode 100644 lib/sanitizer_common/sanitizer_flag_parser.cc create mode 100644 lib/sanitizer_common/sanitizer_flag_parser.cpp delete mode 100644 lib/sanitizer_common/sanitizer_flags.cc create mode 100644 lib/sanitizer_common/sanitizer_flags.cpp delete mode 100644 lib/sanitizer_common/sanitizer_fuchsia.cc create mode 100644 lib/sanitizer_common/sanitizer_fuchsia.cpp create mode 100644 lib/sanitizer_common/sanitizer_glibc_version.h delete mode 100644 lib/sanitizer_common/sanitizer_libc.cc create mode 100644 lib/sanitizer_common/sanitizer_libc.cpp delete mode 100644 lib/sanitizer_common/sanitizer_libignore.cc create mode 100644 lib/sanitizer_common/sanitizer_libignore.cpp delete mode 100644 lib/sanitizer_common/sanitizer_linux.cc create mode 100644 lib/sanitizer_common/sanitizer_linux.cpp delete mode 100644 lib/sanitizer_common/sanitizer_linux_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_linux_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_linux_s390.cc create mode 100644 lib/sanitizer_common/sanitizer_linux_s390.cpp delete mode 100644 lib/sanitizer_common/sanitizer_mac.cc create mode 100644 lib/sanitizer_common/sanitizer_mac.cpp delete mode 100644 lib/sanitizer_common/sanitizer_mac_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_mac_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_netbsd.cc create mode 100644 lib/sanitizer_common/sanitizer_netbsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_openbsd.cc create mode 100644 lib/sanitizer_common/sanitizer_openbsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_persistent_allocator.cc create mode 100644 lib/sanitizer_common/sanitizer_persistent_allocator.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_freebsd.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_linux.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_linux.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_posix.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_posix.cpp delete mode 100644 lib/sanitizer_common/sanitizer_platform_limits_solaris.cc create mode 100644 lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp delete mode 100644 lib/sanitizer_common/sanitizer_posix.cc create mode 100644 lib/sanitizer_common/sanitizer_posix.cpp delete mode 100644 lib/sanitizer_common/sanitizer_posix_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_posix_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_printf.cc create mode 100644 lib/sanitizer_common/sanitizer_printf.cpp delete mode 100644 lib/sanitizer_common/sanitizer_procmaps_bsd.cc create mode 100644 lib/sanitizer_common/sanitizer_procmaps_bsd.cpp delete mode 100644 lib/sanitizer_common/sanitizer_procmaps_common.cc create mode 100644 lib/sanitizer_common/sanitizer_procmaps_common.cpp delete mode 100644 lib/sanitizer_common/sanitizer_procmaps_linux.cc create mode 100644 lib/sanitizer_common/sanitizer_procmaps_linux.cpp delete mode 100644 lib/sanitizer_common/sanitizer_procmaps_mac.cc create mode 100644 lib/sanitizer_common/sanitizer_procmaps_mac.cpp delete mode 100644 lib/sanitizer_common/sanitizer_procmaps_solaris.cc create mode 100644 lib/sanitizer_common/sanitizer_procmaps_solaris.cpp delete mode 100644 lib/sanitizer_common/sanitizer_rtems.cc create mode 100644 lib/sanitizer_common/sanitizer_rtems.cpp delete mode 100644 lib/sanitizer_common/sanitizer_solaris.cc create mode 100644 lib/sanitizer_common/sanitizer_solaris.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stackdepot.cc create mode 100644 lib/sanitizer_common/sanitizer_stackdepot.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stacktrace.cc create mode 100644 lib/sanitizer_common/sanitizer_stacktrace.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stacktrace_printer.cc create mode 100644 lib/sanitizer_common/sanitizer_stacktrace_printer.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stacktrace_sparc.cc create mode 100644 lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_mac.cc create mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp delete mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_suppressions.cc create mode 100644 lib/sanitizer_common/sanitizer_suppressions.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_mac.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_mac.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_markup.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_markup.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_report.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_report.cpp delete mode 100644 lib/sanitizer_common/sanitizer_symbolizer_win.cc create mode 100644 lib/sanitizer_common/sanitizer_symbolizer_win.cpp delete mode 100644 lib/sanitizer_common/sanitizer_termination.cc create mode 100644 lib/sanitizer_common/sanitizer_termination.cpp delete mode 100644 lib/sanitizer_common/sanitizer_thread_registry.cc create mode 100644 lib/sanitizer_common/sanitizer_thread_registry.cpp delete mode 100644 lib/sanitizer_common/sanitizer_tls_get_addr.cc create mode 100644 lib/sanitizer_common/sanitizer_tls_get_addr.cpp delete mode 100644 lib/sanitizer_common/sanitizer_type_traits.cc create mode 100644 lib/sanitizer_common/sanitizer_type_traits.cpp delete mode 100644 lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc create mode 100644 lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp delete mode 100644 lib/sanitizer_common/sanitizer_unwind_win.cc create mode 100644 lib/sanitizer_common/sanitizer_unwind_win.cpp delete mode 100644 lib/sanitizer_common/sanitizer_win.cc create mode 100644 lib/sanitizer_common/sanitizer_win.cpp delete mode 100644 lib/sanitizer_common/sanitizer_win_dll_thunk.cc create mode 100644 lib/sanitizer_common/sanitizer_win_dll_thunk.cpp delete mode 100644 lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc create mode 100644 lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cpp delete mode 100644 lib/sanitizer_common/sanitizer_win_weak_interception.cc create mode 100644 lib/sanitizer_common/sanitizer_win_weak_interception.cpp delete mode 100644 lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc create mode 100644 lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp delete mode 100644 lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc create mode 100644 lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp delete mode 100644 lib/scudo/standalone/checksum.cc create mode 100644 lib/scudo/standalone/checksum.cpp delete mode 100644 lib/scudo/standalone/common.cc create mode 100644 lib/scudo/standalone/common.cpp delete mode 100644 lib/scudo/standalone/crc32_hw.cc create mode 100644 lib/scudo/standalone/crc32_hw.cpp delete mode 100644 lib/scudo/standalone/flags.cc create mode 100644 lib/scudo/standalone/flags.cpp delete mode 100644 lib/scudo/standalone/flags_parser.cc create mode 100644 lib/scudo/standalone/flags_parser.cpp delete mode 100644 lib/scudo/standalone/fuchsia.cc create mode 100644 lib/scudo/standalone/fuchsia.cpp delete mode 100644 lib/scudo/standalone/linux.cc create mode 100644 lib/scudo/standalone/linux.cpp delete mode 100644 lib/scudo/standalone/report.cc create mode 100644 lib/scudo/standalone/report.cpp delete mode 100644 lib/scudo/standalone/secondary.cc create mode 100644 lib/scudo/standalone/secondary.cpp delete mode 100644 lib/scudo/standalone/string_utils.cc create mode 100644 lib/scudo/standalone/string_utils.cpp delete mode 100644 lib/scudo/standalone/wrappers_c.cc create mode 100644 lib/scudo/standalone/wrappers_c.cpp delete mode 100644 lib/scudo/standalone/wrappers_c_bionic.cc create mode 100644 lib/scudo/standalone/wrappers_c_bionic.cpp delete mode 100644 lib/scudo/standalone/wrappers_cpp.cc create mode 100644 lib/scudo/standalone/wrappers_cpp.cpp delete mode 100644 lib/stats/stats.cc create mode 100644 lib/stats/stats.cpp delete mode 100644 lib/stats/stats_client.cc create mode 100644 lib/stats/stats_client.cpp delete mode 100644 lib/tsan/benchmarks/func_entry_exit.cc create mode 100644 lib/tsan/benchmarks/func_entry_exit.cpp delete mode 100644 lib/tsan/benchmarks/mini_bench_local.cc create mode 100644 lib/tsan/benchmarks/mini_bench_local.cpp delete mode 100644 lib/tsan/benchmarks/mini_bench_shared.cc create mode 100644 lib/tsan/benchmarks/mini_bench_shared.cpp delete mode 100644 lib/tsan/benchmarks/mop.cc create mode 100644 lib/tsan/benchmarks/mop.cpp delete mode 100644 lib/tsan/benchmarks/start_many_threads.cc create mode 100644 lib/tsan/benchmarks/start_many_threads.cpp delete mode 100644 lib/tsan/benchmarks/vts_many_threads_bench.cc create mode 100644 lib/tsan/benchmarks/vts_many_threads_bench.cpp delete mode 100644 lib/tsan/dd/dd_interceptors.cc create mode 100644 lib/tsan/dd/dd_interceptors.cpp delete mode 100644 lib/tsan/dd/dd_rtl.cc create mode 100644 lib/tsan/dd/dd_rtl.cpp delete mode 100644 lib/tsan/go/tsan_go.cc create mode 100644 lib/tsan/go/tsan_go.cpp delete mode 100644 lib/tsan/rtl/tsan_clock.cc create mode 100644 lib/tsan/rtl/tsan_clock.cpp delete mode 100644 lib/tsan/rtl/tsan_debugging.cc create mode 100644 lib/tsan/rtl/tsan_debugging.cpp delete mode 100644 lib/tsan/rtl/tsan_external.cc create mode 100644 lib/tsan/rtl/tsan_external.cpp delete mode 100644 lib/tsan/rtl/tsan_fd.cc create mode 100644 lib/tsan/rtl/tsan_fd.cpp delete mode 100644 lib/tsan/rtl/tsan_flags.cc create mode 100644 lib/tsan/rtl/tsan_flags.cpp delete mode 100644 lib/tsan/rtl/tsan_ignoreset.cc create mode 100644 lib/tsan/rtl/tsan_ignoreset.cpp delete mode 100644 lib/tsan/rtl/tsan_interceptors.cc create mode 100644 lib/tsan/rtl/tsan_interceptors_libdispatch.cpp delete mode 100644 lib/tsan/rtl/tsan_interceptors_mac.cc create mode 100644 lib/tsan/rtl/tsan_interceptors_mac.cpp create mode 100644 lib/tsan/rtl/tsan_interceptors_mach_vm.cpp create mode 100644 lib/tsan/rtl/tsan_interceptors_posix.cpp delete mode 100644 lib/tsan/rtl/tsan_interface.cc create mode 100644 lib/tsan/rtl/tsan_interface.cpp delete mode 100644 lib/tsan/rtl/tsan_interface_ann.cc create mode 100644 lib/tsan/rtl/tsan_interface_ann.cpp delete mode 100644 lib/tsan/rtl/tsan_interface_atomic.cc create mode 100644 lib/tsan/rtl/tsan_interface_atomic.cpp delete mode 100644 lib/tsan/rtl/tsan_interface_java.cc create mode 100644 lib/tsan/rtl/tsan_interface_java.cpp delete mode 100644 lib/tsan/rtl/tsan_libdispatch.cc delete mode 100644 lib/tsan/rtl/tsan_malloc_mac.cc create mode 100644 lib/tsan/rtl/tsan_malloc_mac.cpp delete mode 100644 lib/tsan/rtl/tsan_md5.cc create mode 100644 lib/tsan/rtl/tsan_md5.cpp delete mode 100644 lib/tsan/rtl/tsan_mman.cc create mode 100644 lib/tsan/rtl/tsan_mman.cpp delete mode 100644 lib/tsan/rtl/tsan_mutex.cc create mode 100644 lib/tsan/rtl/tsan_mutex.cpp delete mode 100644 lib/tsan/rtl/tsan_mutexset.cc create mode 100644 lib/tsan/rtl/tsan_mutexset.cpp delete mode 100644 lib/tsan/rtl/tsan_new_delete.cc create mode 100644 lib/tsan/rtl/tsan_new_delete.cpp delete mode 100644 lib/tsan/rtl/tsan_platform_linux.cc create mode 100644 lib/tsan/rtl/tsan_platform_linux.cpp delete mode 100644 lib/tsan/rtl/tsan_platform_mac.cc create mode 100644 lib/tsan/rtl/tsan_platform_mac.cpp delete mode 100644 lib/tsan/rtl/tsan_platform_posix.cc create mode 100644 lib/tsan/rtl/tsan_platform_posix.cpp delete mode 100644 lib/tsan/rtl/tsan_platform_windows.cc create mode 100644 lib/tsan/rtl/tsan_platform_windows.cpp delete mode 100644 lib/tsan/rtl/tsan_preinit.cc create mode 100644 lib/tsan/rtl/tsan_preinit.cpp delete mode 100644 lib/tsan/rtl/tsan_report.cc create mode 100644 lib/tsan/rtl/tsan_report.cpp delete mode 100644 lib/tsan/rtl/tsan_rtl.cc create mode 100644 lib/tsan/rtl/tsan_rtl.cpp delete mode 100644 lib/tsan/rtl/tsan_rtl_mutex.cc create mode 100644 lib/tsan/rtl/tsan_rtl_mutex.cpp delete mode 100644 lib/tsan/rtl/tsan_rtl_proc.cc create mode 100644 lib/tsan/rtl/tsan_rtl_proc.cpp delete mode 100644 lib/tsan/rtl/tsan_rtl_report.cc create mode 100644 lib/tsan/rtl/tsan_rtl_report.cpp delete mode 100644 lib/tsan/rtl/tsan_rtl_thread.cc create mode 100644 lib/tsan/rtl/tsan_rtl_thread.cpp delete mode 100644 lib/tsan/rtl/tsan_stack_trace.cc create mode 100644 lib/tsan/rtl/tsan_stack_trace.cpp delete mode 100644 lib/tsan/rtl/tsan_stat.cc create mode 100644 lib/tsan/rtl/tsan_stat.cpp delete mode 100644 lib/tsan/rtl/tsan_suppressions.cc create mode 100644 lib/tsan/rtl/tsan_suppressions.cpp delete mode 100644 lib/tsan/rtl/tsan_symbolize.cc create mode 100644 lib/tsan/rtl/tsan_symbolize.cpp delete mode 100644 lib/tsan/rtl/tsan_sync.cc create mode 100644 lib/tsan/rtl/tsan_sync.cpp delete mode 100644 lib/ubsan/ubsan_diag.cc create mode 100644 lib/ubsan/ubsan_diag.cpp delete mode 100644 lib/ubsan/ubsan_diag_standalone.cc create mode 100644 lib/ubsan/ubsan_diag_standalone.cpp delete mode 100644 lib/ubsan/ubsan_flags.cc create mode 100644 lib/ubsan/ubsan_flags.cpp delete mode 100644 lib/ubsan/ubsan_handlers.cc create mode 100644 lib/ubsan/ubsan_handlers.cpp delete mode 100644 lib/ubsan/ubsan_handlers_cxx.cc create mode 100644 lib/ubsan/ubsan_handlers_cxx.cpp delete mode 100644 lib/ubsan/ubsan_init.cc create mode 100644 lib/ubsan/ubsan_init.cpp delete mode 100644 lib/ubsan/ubsan_init_standalone.cc create mode 100644 lib/ubsan/ubsan_init_standalone.cpp delete mode 100644 lib/ubsan/ubsan_init_standalone_preinit.cc create mode 100644 lib/ubsan/ubsan_init_standalone_preinit.cpp delete mode 100644 lib/ubsan/ubsan_monitor.cc create mode 100644 lib/ubsan/ubsan_monitor.cpp delete mode 100644 lib/ubsan/ubsan_signals_standalone.cc create mode 100644 lib/ubsan/ubsan_signals_standalone.cpp delete mode 100644 lib/ubsan/ubsan_type_hash.cc create mode 100644 lib/ubsan/ubsan_type_hash.cpp delete mode 100644 lib/ubsan/ubsan_type_hash_itanium.cc create mode 100644 lib/ubsan/ubsan_type_hash_itanium.cpp delete mode 100644 lib/ubsan/ubsan_type_hash_win.cc create mode 100644 lib/ubsan/ubsan_type_hash_win.cpp delete mode 100644 lib/ubsan/ubsan_value.cc create mode 100644 lib/ubsan/ubsan_value.cpp delete mode 100644 lib/ubsan/ubsan_win_dll_thunk.cc create mode 100644 lib/ubsan/ubsan_win_dll_thunk.cpp delete mode 100644 lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc create mode 100644 lib/ubsan/ubsan_win_dynamic_runtime_thunk.cpp delete mode 100644 lib/ubsan/ubsan_win_weak_interception.cc create mode 100644 lib/ubsan/ubsan_win_weak_interception.cpp delete mode 100644 lib/ubsan_minimal/ubsan_minimal_handlers.cc create mode 100644 lib/ubsan_minimal/ubsan_minimal_handlers.cpp delete mode 100644 lib/xray/xray_AArch64.cc create mode 100644 lib/xray/xray_AArch64.cpp delete mode 100644 lib/xray/xray_arm.cc create mode 100644 lib/xray/xray_arm.cpp delete mode 100644 lib/xray/xray_basic_flags.cc create mode 100644 lib/xray/xray_basic_flags.cpp delete mode 100644 lib/xray/xray_basic_logging.cc create mode 100644 lib/xray/xray_basic_logging.cpp delete mode 100644 lib/xray/xray_buffer_queue.cc create mode 100644 lib/xray/xray_buffer_queue.cpp delete mode 100644 lib/xray/xray_fdr_flags.cc create mode 100644 lib/xray/xray_fdr_flags.cpp delete mode 100644 lib/xray/xray_fdr_logging.cc create mode 100644 lib/xray/xray_fdr_logging.cpp delete mode 100644 lib/xray/xray_flags.cc create mode 100644 lib/xray/xray_flags.cpp delete mode 100644 lib/xray/xray_init.cc create mode 100644 lib/xray/xray_init.cpp delete mode 100644 lib/xray/xray_interface.cc create mode 100644 lib/xray/xray_interface.cpp delete mode 100644 lib/xray/xray_log_interface.cc create mode 100644 lib/xray/xray_log_interface.cpp delete mode 100644 lib/xray/xray_mips.cc create mode 100644 lib/xray/xray_mips.cpp delete mode 100644 lib/xray/xray_mips64.cc create mode 100644 lib/xray/xray_mips64.cpp delete mode 100644 lib/xray/xray_powerpc64.cc create mode 100644 lib/xray/xray_powerpc64.cpp delete mode 100644 lib/xray/xray_profile_collector.cc create mode 100644 lib/xray/xray_profile_collector.cpp delete mode 100644 lib/xray/xray_profiling.cc create mode 100644 lib/xray/xray_profiling.cpp delete mode 100644 lib/xray/xray_profiling_flags.cc create mode 100644 lib/xray/xray_profiling_flags.cpp delete mode 100644 lib/xray/xray_trampoline_powerpc64.cc create mode 100644 lib/xray/xray_trampoline_powerpc64.cpp delete mode 100644 lib/xray/xray_utils.cc create mode 100644 lib/xray/xray_utils.cpp delete mode 100644 lib/xray/xray_x86_64.cc create mode 100644 lib/xray/xray_x86_64.cpp create mode 100644 tools/CMakeLists.txt create mode 100644 tools/gwp_asan/CMakeLists.txt create mode 100644 tools/gwp_asan/stack_trace_compressor_fuzzer.cpp diff --git a/include/fuzzer/FuzzedDataProvider.h b/include/fuzzer/FuzzedDataProvider.h new file mode 100644 index 000000000000..fd895b767d9e --- /dev/null +++ b/include/fuzzer/FuzzedDataProvider.h @@ -0,0 +1,299 @@ +//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// A single header library providing an utility class to break up an array of +// bytes. Whenever run on the same input, provides the same output, as long as +// its methods are called in the same order, with the same arguments. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ +#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// In addition to the comments below, the API is also briefly documented at +// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider +class FuzzedDataProvider { + public: + // |data| is an array of length |size| that the FuzzedDataProvider wraps to + // provide more granular access. |data| must outlive the FuzzedDataProvider. + FuzzedDataProvider(const uint8_t *data, size_t size) + : data_ptr_(data), remaining_bytes_(size) {} + ~FuzzedDataProvider() = default; + + // Returns a std::vector containing |num_bytes| of input data. If fewer than + // |num_bytes| of data remain, returns a shorter std::vector containing all + // of the data that's left. Can be used with any byte sized type, such as + // char, unsigned char, uint8_t, etc. + template std::vector ConsumeBytes(size_t num_bytes) { + num_bytes = std::min(num_bytes, remaining_bytes_); + return ConsumeBytes(num_bytes, num_bytes); + } + + // Similar to |ConsumeBytes|, but also appends the terminator value at the end + // of the resulting vector. Useful, when a mutable null-terminated C-string is + // needed, for example. But that is a rare case. Better avoid it, if possible, + // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. + template + std::vector ConsumeBytesWithTerminator(size_t num_bytes, + T terminator = 0) { + num_bytes = std::min(num_bytes, remaining_bytes_); + std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); + result.back() = terminator; + return result; + } + + // Returns a std::string containing |num_bytes| of input data. Using this and + // |.c_str()| on the resulting string is the best way to get an immutable + // null-terminated C string. If fewer than |num_bytes| of data remain, returns + // a shorter std::string containing all of the data that's left. + std::string ConsumeBytesAsString(size_t num_bytes) { + static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), + "ConsumeBytesAsString cannot convert the data to a string."); + + num_bytes = std::min(num_bytes, remaining_bytes_); + std::string result( + reinterpret_cast(data_ptr_), + num_bytes); + Advance(num_bytes); + return result; + } + + // Returns a number in the range [min, max] by consuming bytes from the + // input data. The value might not be uniformly distributed in the given + // range. If there's no input data left, always returns |min|. |min| must + // be less than or equal to |max|. + template T ConsumeIntegralInRange(T min, T max) { + static_assert(std::is_integral::value, "An integral type is required."); + static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); + + if (min > max) + abort(); + + // Use the biggest type possible to hold the range and the result. + uint64_t range = static_cast(max) - min; + uint64_t result = 0; + size_t offset = 0; + + while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && + remaining_bytes_ != 0) { + // Pull bytes off the end of the seed data. Experimentally, this seems to + // allow the fuzzer to more easily explore the input space. This makes + // sense, since it works by modifying inputs that caused new code to run, + // and this data is often used to encode length of data read by + // |ConsumeBytes|. Separating out read lengths makes it easier modify the + // contents of the data that is actually read. + --remaining_bytes_; + result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; + offset += CHAR_BIT; + } + + // Avoid division by 0, in case |range + 1| results in overflow. + if (range != std::numeric_limits::max()) + result = result % (range + 1); + + return static_cast(min + result); + } + + // Returns a std::string of length from 0 to |max_length|. When it runs out of + // input data, returns what remains of the input. Designed to be more stable + // with respect to a fuzzer inserting characters than just picking a random + // length and then consuming that many bytes with |ConsumeBytes|. + std::string ConsumeRandomLengthString(size_t max_length) { + // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" + // followed by anything else to the end of the string. As a result of this + // logic, a fuzzer can insert characters into the string, and the string + // will be lengthened to include those new characters, resulting in a more + // stable fuzzer than picking the length of a string independently from + // picking its contents. + std::string result; + + // Reserve the anticipated capaticity to prevent several reallocations. + result.reserve(std::min(max_length, remaining_bytes_)); + for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { + char next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next == '\\' && remaining_bytes_ != 0) { + next = ConvertUnsignedToSigned(data_ptr_[0]); + Advance(1); + if (next != '\\') + break; + } + result += next; + } + + result.shrink_to_fit(); + return result; + } + + // Returns a std::vector containing all remaining bytes of the input data. + template std::vector ConsumeRemainingBytes() { + return ConsumeBytes(remaining_bytes_); + } + + // Returns a std::string containing all remaining bytes of the input data. + // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string + // object. + std::string ConsumeRemainingBytesAsString() { + return ConsumeBytesAsString(remaining_bytes_); + } + + // Returns a number in the range [Type's min, Type's max]. The value might + // not be uniformly distributed in the given range. If there's no input data + // left, always returns |min|. + template T ConsumeIntegral() { + return ConsumeIntegralInRange(std::numeric_limits::min(), + std::numeric_limits::max()); + } + + // Reads one byte and returns a bool, or false when no data remains. + bool ConsumeBool() { return 1 & ConsumeIntegral(); } + + // Returns a copy of the value selected from the given fixed-size |array|. + template + T PickValueInArray(const T (&array)[size]) { + static_assert(size > 0, "The array must be non empty."); + return array[ConsumeIntegralInRange(0, size - 1)]; + } + + template + T PickValueInArray(std::initializer_list list) { + // TODO(Dor1s): switch to static_assert once C++14 is allowed. + if (!list.size()) + abort(); + + return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); + } + + // Returns an enum value. The enum must start at 0 and be contiguous. It must + // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: + // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; + template T ConsumeEnum() { + static_assert(std::is_enum::value, "|T| must be an enum type."); + return static_cast(ConsumeIntegralInRange( + 0, static_cast(T::kMaxValue))); + } + + // Returns a floating point number in the range [0.0, 1.0]. If there's no + // input data left, always returns 0. + template T ConsumeProbability() { + static_assert(std::is_floating_point::value, + "A floating point type is required."); + + // Use different integral types for different floating point types in order + // to provide better density of the resulting values. + using IntegralType = + typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t, + uint64_t>::type; + + T result = static_cast(ConsumeIntegral()); + result /= static_cast(std::numeric_limits::max()); + return result; + } + + // Returns a floating point value in the range [Type's lowest, Type's max] by + // consuming bytes from the input data. If there's no input data left, always + // returns approximately 0. + template T ConsumeFloatingPoint() { + return ConsumeFloatingPointInRange(std::numeric_limits::lowest(), + std::numeric_limits::max()); + } + + // Returns a floating point value in the given range by consuming bytes from + // the input data. If there's no input data left, returns |min|. Note that + // |min| must be less than or equal to |max|. + template T ConsumeFloatingPointInRange(T min, T max) { + if (min > max) + abort(); + + T range = .0; + T result = min; + constexpr T zero(.0); + if (max > zero && min < zero && max > min + std::numeric_limits::max()) { + // The diff |max - min| would overflow the given floating point type. Use + // the half of the diff as the range and consume a bool to decide whether + // the result is in the first of the second part of the diff. + range = (max / 2.0) - (min / 2.0); + if (ConsumeBool()) { + result += range; + } + } else { + range = max - min; + } + + return result + range * ConsumeProbability(); + } + + // Reports the remaining bytes available for fuzzed input. + size_t remaining_bytes() { return remaining_bytes_; } + + private: + FuzzedDataProvider(const FuzzedDataProvider &) = delete; + FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; + + void Advance(size_t num_bytes) { + if (num_bytes > remaining_bytes_) + abort(); + + data_ptr_ += num_bytes; + remaining_bytes_ -= num_bytes; + } + + template + std::vector ConsumeBytes(size_t size, size_t num_bytes_to_consume) { + static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); + + // The point of using the size-based constructor below is to increase the + // odds of having a vector object with capacity being equal to the length. + // That part is always implementation specific, but at least both libc++ and + // libstdc++ allocate the requested number of bytes in that constructor, + // which seems to be a natural choice for other implementations as well. + // To increase the odds even more, we also call |shrink_to_fit| below. + std::vector result(size); + std::memcpy(result.data(), data_ptr_, num_bytes_to_consume); + Advance(num_bytes_to_consume); + + // Even though |shrink_to_fit| is also implementation specific, we expect it + // to provide an additional assurance in case vector's constructor allocated + // a buffer which is larger than the actual amount of data we put inside it. + result.shrink_to_fit(); + return result; + } + + template TS ConvertUnsignedToSigned(TU value) { + static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); + static_assert(!std::numeric_limits::is_signed, + "Source type must be unsigned."); + + // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. + if (std::numeric_limits::is_modulo) + return static_cast(value); + + // Avoid using implementation-defined unsigned to signer conversions. + // To learn more, see https://stackoverflow.com/questions/13150449. + if (value <= std::numeric_limits::max()) { + return static_cast(value); + } else { + constexpr auto TS_min = std::numeric_limits::min(); + return TS_min + static_cast(value - TS_min); + } + } + + const uint8_t *data_ptr_; + size_t remaining_bytes_; +}; + +#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ diff --git a/include/sanitizer/dfsan_interface.h b/include/sanitizer/dfsan_interface.h index c189ee55790a..81546e5df71a 100644 --- a/include/sanitizer/dfsan_interface.h +++ b/include/sanitizer/dfsan_interface.h @@ -112,7 +112,7 @@ void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, } // extern "C" template -void dfsan_set_label(dfsan_label label, T &data) { // NOLINT +void dfsan_set_label(dfsan_label label, T &data) { // NOLINT dfsan_set_label(label, (void *)&data, sizeof(T)); } diff --git a/include/sanitizer/tsan_interface_atomic.h b/include/sanitizer/tsan_interface_atomic.h index 9ce0411917df..8052bc1d56b3 100644 --- a/include/sanitizer/tsan_interface_atomic.h +++ b/include/sanitizer/tsan_interface_atomic.h @@ -17,10 +17,10 @@ extern "C" { #endif -typedef char __tsan_atomic8; -typedef short __tsan_atomic16; // NOLINT -typedef int __tsan_atomic32; -typedef long __tsan_atomic64; // NOLINT +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; #if defined(__SIZEOF_INT128__) \ || (__clang_major__ * 100 + __clang_minor__ >= 302) __extension__ typedef __int128 __tsan_atomic128; diff --git a/include/sanitizer/ubsan_interface.h b/include/sanitizer/ubsan_interface.h new file mode 100644 index 000000000000..59fc6c3c184c --- /dev/null +++ b/include/sanitizer/ubsan_interface.h @@ -0,0 +1,32 @@ +//===-- sanitizer/ubsan_interface.h -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of UBSanitizer (UBSan). +// +// Public interface header. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_UBSAN_INTERFACE_H +#define SANITIZER_UBSAN_INTERFACE_H + +#ifdef __cplusplus +extern "C" { +#endif +/// User-provided default option settings. +/// +/// You can provide your own implementation of this function to return a string +/// containing UBSan runtime options (for example, +/// verbosity=1:halt_on_error=0). +/// +/// \returns Default options string. +const char* __ubsan_default_options(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_UBSAN_INTERFACE_H diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc deleted file mode 100644 index fc97cbb554d0..000000000000 --- a/lib/asan/asan_activation.cc +++ /dev/null @@ -1,143 +0,0 @@ -//===-- asan_activation.cc --------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan activation/deactivation logic. -//===----------------------------------------------------------------------===// - -#include "asan_activation.h" -#include "asan_allocator.h" -#include "asan_flags.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" - -namespace __asan { - -static struct AsanDeactivatedFlags { - AllocatorOptions allocator_options; - int malloc_context_size; - bool poison_heap; - bool coverage; - const char *coverage_dir; - - void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) { -#define ASAN_ACTIVATION_FLAG(Type, Name) \ - RegisterFlag(parser, #Name, "", &f->Name); -#define COMMON_ACTIVATION_FLAG(Type, Name) \ - RegisterFlag(parser, #Name, "", &cf->Name); -#include "asan_activation_flags.inc" -#undef ASAN_ACTIVATION_FLAG -#undef COMMON_ACTIVATION_FLAG - - RegisterIncludeFlags(parser, cf); - } - - void OverrideFromActivationFlags() { - Flags f; - CommonFlags cf; - FlagParser parser; - RegisterActivationFlags(&parser, &f, &cf); - - cf.SetDefaults(); - // Copy the current activation flags. - allocator_options.CopyTo(&f, &cf); - cf.malloc_context_size = malloc_context_size; - f.poison_heap = poison_heap; - cf.coverage = coverage; - cf.coverage_dir = coverage_dir; - cf.verbosity = Verbosity(); - cf.help = false; // this is activation-specific help - - // Check if activation flags need to be overriden. - if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { - parser.ParseString(env); - } - - InitializeCommonFlags(&cf); - - if (Verbosity()) ReportUnrecognizedFlags(); - - if (cf.help) parser.PrintFlagDescriptions(); - - allocator_options.SetFrom(&f, &cf); - malloc_context_size = cf.malloc_context_size; - poison_heap = f.poison_heap; - coverage = cf.coverage; - coverage_dir = cf.coverage_dir; - } - - void Print() { - Report( - "quarantine_size_mb %d, thread_local_quarantine_size_kb %d, " - "max_redzone %d, poison_heap %d, malloc_context_size %d, " - "alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, " - "coverage_dir %s, allocator_release_to_os_interval_ms %d\n", - allocator_options.quarantine_size_mb, - allocator_options.thread_local_quarantine_size_kb, - allocator_options.max_redzone, poison_heap, malloc_context_size, - allocator_options.alloc_dealloc_mismatch, - allocator_options.may_return_null, coverage, coverage_dir, - allocator_options.release_to_os_interval_ms); - } -} asan_deactivated_flags; - -static bool asan_is_deactivated; - -void AsanDeactivate() { - CHECK(!asan_is_deactivated); - VReport(1, "Deactivating ASan\n"); - - // Stash runtime state. - GetAllocatorOptions(&asan_deactivated_flags.allocator_options); - asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); - asan_deactivated_flags.poison_heap = CanPoisonMemory(); - asan_deactivated_flags.coverage = common_flags()->coverage; - asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir; - - // Deactivate the runtime. - SetCanPoisonMemory(false); - SetMallocContextSize(1); - - AllocatorOptions disabled = asan_deactivated_flags.allocator_options; - disabled.quarantine_size_mb = 0; - disabled.thread_local_quarantine_size_kb = 0; - // Redzone must be at least Max(16, granularity) bytes long. - disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY); - disabled.max_redzone = disabled.min_redzone; - disabled.alloc_dealloc_mismatch = false; - disabled.may_return_null = true; - ReInitializeAllocator(disabled); - - asan_is_deactivated = true; -} - -void AsanActivate() { - if (!asan_is_deactivated) return; - VReport(1, "Activating ASan\n"); - - UpdateProcessName(); - - asan_deactivated_flags.OverrideFromActivationFlags(); - - SetCanPoisonMemory(asan_deactivated_flags.poison_heap); - SetMallocContextSize(asan_deactivated_flags.malloc_context_size); - ReInitializeAllocator(asan_deactivated_flags.allocator_options); - - asan_is_deactivated = false; - if (Verbosity()) { - Report("Activated with flags:\n"); - asan_deactivated_flags.Print(); - } -} - -} // namespace __asan diff --git a/lib/asan/asan_activation.cpp b/lib/asan/asan_activation.cpp new file mode 100644 index 000000000000..795df95a5414 --- /dev/null +++ b/lib/asan/asan_activation.cpp @@ -0,0 +1,143 @@ +//===-- asan_activation.cpp -------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan activation/deactivation logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +static struct AsanDeactivatedFlags { + AllocatorOptions allocator_options; + int malloc_context_size; + bool poison_heap; + bool coverage; + const char *coverage_dir; + + void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) { +#define ASAN_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &f->Name); +#define COMMON_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &cf->Name); +#include "asan_activation_flags.inc" +#undef ASAN_ACTIVATION_FLAG +#undef COMMON_ACTIVATION_FLAG + + RegisterIncludeFlags(parser, cf); + } + + void OverrideFromActivationFlags() { + Flags f; + CommonFlags cf; + FlagParser parser; + RegisterActivationFlags(&parser, &f, &cf); + + cf.SetDefaults(); + // Copy the current activation flags. + allocator_options.CopyTo(&f, &cf); + cf.malloc_context_size = malloc_context_size; + f.poison_heap = poison_heap; + cf.coverage = coverage; + cf.coverage_dir = coverage_dir; + cf.verbosity = Verbosity(); + cf.help = false; // this is activation-specific help + + // Check if activation flags need to be overriden. + if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { + parser.ParseString(env); + } + + InitializeCommonFlags(&cf); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (cf.help) parser.PrintFlagDescriptions(); + + allocator_options.SetFrom(&f, &cf); + malloc_context_size = cf.malloc_context_size; + poison_heap = f.poison_heap; + coverage = cf.coverage; + coverage_dir = cf.coverage_dir; + } + + void Print() { + Report( + "quarantine_size_mb %d, thread_local_quarantine_size_kb %d, " + "max_redzone %d, poison_heap %d, malloc_context_size %d, " + "alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, " + "coverage_dir %s, allocator_release_to_os_interval_ms %d\n", + allocator_options.quarantine_size_mb, + allocator_options.thread_local_quarantine_size_kb, + allocator_options.max_redzone, poison_heap, malloc_context_size, + allocator_options.alloc_dealloc_mismatch, + allocator_options.may_return_null, coverage, coverage_dir, + allocator_options.release_to_os_interval_ms); + } +} asan_deactivated_flags; + +static bool asan_is_deactivated; + +void AsanDeactivate() { + CHECK(!asan_is_deactivated); + VReport(1, "Deactivating ASan\n"); + + // Stash runtime state. + GetAllocatorOptions(&asan_deactivated_flags.allocator_options); + asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); + asan_deactivated_flags.poison_heap = CanPoisonMemory(); + asan_deactivated_flags.coverage = common_flags()->coverage; + asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir; + + // Deactivate the runtime. + SetCanPoisonMemory(false); + SetMallocContextSize(1); + + AllocatorOptions disabled = asan_deactivated_flags.allocator_options; + disabled.quarantine_size_mb = 0; + disabled.thread_local_quarantine_size_kb = 0; + // Redzone must be at least Max(16, granularity) bytes long. + disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY); + disabled.max_redzone = disabled.min_redzone; + disabled.alloc_dealloc_mismatch = false; + disabled.may_return_null = true; + ReInitializeAllocator(disabled); + + asan_is_deactivated = true; +} + +void AsanActivate() { + if (!asan_is_deactivated) return; + VReport(1, "Activating ASan\n"); + + UpdateProcessName(); + + asan_deactivated_flags.OverrideFromActivationFlags(); + + SetCanPoisonMemory(asan_deactivated_flags.poison_heap); + SetMallocContextSize(asan_deactivated_flags.malloc_context_size); + ReInitializeAllocator(asan_deactivated_flags.allocator_options); + + asan_is_deactivated = false; + if (Verbosity()) { + Report("Activated with flags:\n"); + asan_deactivated_flags.Print(); + } +} + +} // namespace __asan diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc deleted file mode 100644 index 2ca6220d8fd1..000000000000 --- a/lib/asan/asan_allocator.cc +++ /dev/null @@ -1,1119 +0,0 @@ -//===-- asan_allocator.cc -------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Implementation of ASan's memory allocator, 2-nd version. -// This variant uses the allocator from sanitizer_common, i.e. the one shared -// with ThreadSanitizer and MemorySanitizer. -// -//===----------------------------------------------------------------------===// - -#include "asan_allocator.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_allocator_checks.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_list.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_quarantine.h" -#include "lsan/lsan_common.h" - -namespace __asan { - -// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. -// We use adaptive redzones: for larger allocation larger redzones are used. -static u32 RZLog2Size(u32 rz_log) { - CHECK_LT(rz_log, 8); - return 16 << rz_log; -} - -static u32 RZSize2Log(u32 rz_size) { - CHECK_GE(rz_size, 16); - CHECK_LE(rz_size, 2048); - CHECK(IsPowerOfTwo(rz_size)); - u32 res = Log2(rz_size) - 4; - CHECK_EQ(rz_size, RZLog2Size(res)); - return res; -} - -static AsanAllocator &get_allocator(); - -// The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R -// L -- left redzone words (0 or more bytes) -// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. -// U -- user memory. -// R -- right redzone (0 or more bytes) -// ChunkBase consists of ChunkHeader and other bytes that overlap with user -// memory. - -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L L H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -static const uptr kAllocBegMagic = 0xCC6E96B9; - -struct ChunkHeader { - // 1-st 8 bytes. - u32 chunk_state : 8; // Must be first. - u32 alloc_tid : 24; - - u32 free_tid : 24; - u32 from_memalign : 1; - u32 alloc_type : 2; - u32 rz_log : 3; - u32 lsan_tag : 2; - // 2-nd 8 bytes - // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. - u32 user_requested_size : 29; - // align < 8 -> 0 - // else -> log2(min(align, 512)) - 2 - u32 user_requested_alignment_log : 3; - u32 alloc_context_id; -}; - -struct ChunkBase : ChunkHeader { - // Header2, intersects with user memory. - u32 free_context_id; -}; - -static const uptr kChunkHeaderSize = sizeof(ChunkHeader); -static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; -COMPILER_CHECK(kChunkHeaderSize == 16); -COMPILER_CHECK(kChunkHeader2Size <= 16); - -// Every chunk of memory allocated by this allocator can be in one of 3 states: -// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. -// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. -// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. -enum { - CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. - CHUNK_ALLOCATED = 2, - CHUNK_QUARANTINE = 3 -}; - -struct AsanChunk: ChunkBase { - uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast( - get_allocator().GetMetaData(AllocBeg(locked_version))); - } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return get_allocator().GetBlockBeginFastLocked( - reinterpret_cast(this)); - return get_allocator().GetBlockBegin(reinterpret_cast(this)); - } - return reinterpret_cast(Beg() - RZLog2Size(rz_log)); - } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); - } -}; - -struct QuarantineCallback { - QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) - : cache_(cache), - stack_(stack) { - } - - void Recycle(AsanChunk *m) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); - CHECK_NE(m->alloc_tid, kInvalidTid); - CHECK_NE(m->free_tid, kInvalidTid); - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); - } - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.real_frees++; - thread_stats.really_freed += m->UsedSize(); - - get_allocator().Deallocate(cache_, p); - } - - void *Allocate(uptr size) { - void *res = get_allocator().Allocate(cache_, size, 1); - // TODO(alekseys): Consider making quarantine OOM-friendly. - if (UNLIKELY(!res)) - ReportOutOfMemory(size, stack_); - return res; - } - - void Deallocate(void *p) { - get_allocator().Deallocate(cache_, p); - } - - private: - AllocatorCache* const cache_; - BufferedStackTrace* const stack_; -}; - -typedef Quarantine AsanQuarantine; -typedef AsanQuarantine::Cache QuarantineCache; - -void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { - PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mmaps++; - thread_stats.mmaped += size; -} -void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { - PoisonShadow(p, size, 0); - // We are about to unmap a chunk of user memory. - // Mark the corresponding shadow memory as not needed. - FlushUnneededASanShadowMemory(p, size); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.munmaps++; - thread_stats.munmaped += size; -} - -// We can not use THREADLOCAL because it is not supported on some of the -// platforms we care about (OSX 10.6, Android). -// static THREADLOCAL AllocatorCache cache; -AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - return &ms->allocator_cache; -} - -QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); - return reinterpret_cast(ms->quarantine_cache); -} - -void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { - quarantine_size_mb = f->quarantine_size_mb; - thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; - min_redzone = f->redzone; - max_redzone = f->max_redzone; - may_return_null = cf->allocator_may_return_null; - alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; - release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; -} - -void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { - f->quarantine_size_mb = quarantine_size_mb; - f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; - f->redzone = min_redzone; - f->max_redzone = max_redzone; - cf->allocator_may_return_null = may_return_null; - f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; - cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; -} - -struct Allocator { - static const uptr kMaxAllowedMallocSize = - FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); - - AsanAllocator allocator; - AsanQuarantine quarantine; - StaticSpinMutex fallback_mutex; - AllocatorCache fallback_allocator_cache; - QuarantineCache fallback_quarantine_cache; - - atomic_uint8_t rss_limit_exceeded; - - // ------------------- Options -------------------------- - atomic_uint16_t min_redzone; - atomic_uint16_t max_redzone; - atomic_uint8_t alloc_dealloc_mismatch; - - // ------------------- Initialization ------------------------ - explicit Allocator(LinkerInitialized) - : quarantine(LINKER_INITIALIZED), - fallback_quarantine_cache(LINKER_INITIALIZED) {} - - void CheckOptions(const AllocatorOptions &options) const { - CHECK_GE(options.min_redzone, 16); - CHECK_GE(options.max_redzone, options.min_redzone); - CHECK_LE(options.max_redzone, 2048); - CHECK(IsPowerOfTwo(options.min_redzone)); - CHECK(IsPowerOfTwo(options.max_redzone)); - } - - void SharedInitCode(const AllocatorOptions &options) { - CheckOptions(options); - quarantine.Init((uptr)options.quarantine_size_mb << 20, - (uptr)options.thread_local_quarantine_size_kb << 10); - atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, - memory_order_release); - atomic_store(&min_redzone, options.min_redzone, memory_order_release); - atomic_store(&max_redzone, options.max_redzone, memory_order_release); - } - - void InitLinkerInitialized(const AllocatorOptions &options) { - SetAllocatorMayReturnNull(options.may_return_null); - allocator.InitLinkerInitialized(options.release_to_os_interval_ms); - SharedInitCode(options); - } - - bool RssLimitExceeded() { - return atomic_load(&rss_limit_exceeded, memory_order_relaxed); - } - - void SetRssLimitExceeded(bool limit_exceeded) { - atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); - } - - void RePoisonChunk(uptr chunk) { - // This could be a user-facing chunk (with redzones), or some internal - // housekeeping chunk, like TransferBatch. Start by assuming the former. - AsanChunk *ac = GetAsanChunk((void *)chunk); - uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); - uptr beg = ac->Beg(); - uptr end = ac->Beg() + ac->UsedSize(true); - uptr chunk_end = chunk + allocated_size; - if (chunk < beg && beg < end && end <= chunk_end && - ac->chunk_state == CHUNK_ALLOCATED) { - // Looks like a valid AsanChunk in use, poison redzones only. - PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); - uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); - FastPoisonShadowPartialRightRedzone( - end_aligned_down, end - end_aligned_down, - chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); - } else { - // This is either not an AsanChunk or freed or quarantined AsanChunk. - // In either case, poison everything. - PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); - } - } - - void ReInitialize(const AllocatorOptions &options) { - SetAllocatorMayReturnNull(options.may_return_null); - allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); - SharedInitCode(options); - - // Poison all existing allocation's redzones. - if (CanPoisonMemory()) { - allocator.ForceLock(); - allocator.ForEachChunk( - [](uptr chunk, void *alloc) { - ((Allocator *)alloc)->RePoisonChunk(chunk); - }, - this); - allocator.ForceUnlock(); - } - } - - void GetOptions(AllocatorOptions *options) const { - options->quarantine_size_mb = quarantine.GetSize() >> 20; - options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; - options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); - options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); - options->may_return_null = AllocatorMayReturnNull(); - options->alloc_dealloc_mismatch = - atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); - options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); - } - - // -------------------- Helper methods. ------------------------- - uptr ComputeRZLog(uptr user_requested_size) { - u32 rz_log = - user_requested_size <= 64 - 16 ? 0 : - user_requested_size <= 128 - 32 ? 1 : - user_requested_size <= 512 - 64 ? 2 : - user_requested_size <= 4096 - 128 ? 3 : - user_requested_size <= (1 << 14) - 256 ? 4 : - user_requested_size <= (1 << 15) - 512 ? 5 : - user_requested_size <= (1 << 16) - 1024 ? 6 : 7; - u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); - u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); - return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); - } - - static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { - if (user_requested_alignment < 8) - return 0; - if (user_requested_alignment > 512) - user_requested_alignment = 512; - return Log2(user_requested_alignment) - 2; - } - - static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { - if (user_requested_alignment_log == 0) - return 0; - return 1LL << (user_requested_alignment_log + 2); - } - - // We have an address between two chunks, and we want to report just one. - AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, - AsanChunk *right_chunk) { - // Prefer an allocated chunk over freed chunk and freed chunk - // over available chunk. - if (left_chunk->chunk_state != right_chunk->chunk_state) { - if (left_chunk->chunk_state == CHUNK_ALLOCATED) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_ALLOCATED) - return right_chunk; - if (left_chunk->chunk_state == CHUNK_QUARANTINE) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_QUARANTINE) - return right_chunk; - } - // Same chunk_state: choose based on offset. - sptr l_offset = 0, r_offset = 0; - CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); - CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); - if (l_offset < r_offset) - return left_chunk; - return right_chunk; - } - - // -------------------- Allocation/Deallocation routines --------------- - void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, - AllocType alloc_type, bool can_fill) { - if (UNLIKELY(!asan_inited)) - AsanInitFromRtl(); - if (RssLimitExceeded()) { - if (AllocatorMayReturnNull()) - return nullptr; - ReportRssLimitExceeded(stack); - } - Flags &fl = *flags(); - CHECK(stack); - const uptr min_alignment = SHADOW_GRANULARITY; - const uptr user_requested_alignment_log = - ComputeUserRequestedAlignmentLog(alignment); - if (alignment < min_alignment) - alignment = min_alignment; - if (size == 0) { - // We'd be happy to avoid allocating memory for zero-size requests, but - // some programs/tests depend on this behavior and assume that malloc - // would not return NULL even for zero-size allocations. Moreover, it - // looks like operator new should never return NULL, and results of - // consecutive "new" calls must be different even if the allocated size - // is zero. - size = 1; - } - CHECK(IsPowerOfTwo(alignment)); - uptr rz_log = ComputeRZLog(size); - uptr rz_size = RZLog2Size(rz_log); - uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); - uptr needed_size = rounded_size + rz_size; - if (alignment > min_alignment) - needed_size += alignment; - bool using_primary_allocator = true; - // If we are allocating from the secondary allocator, there will be no - // automatic right redzone, so add the right redzone manually. - if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { - needed_size += rz_size; - using_primary_allocator = false; - } - CHECK(IsAligned(needed_size, min_alignment)); - if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - if (AllocatorMayReturnNull()) { - Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", - (void*)size); - return nullptr; - } - ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize, - stack); - } - - AsanThread *t = GetCurrentThread(); - void *allocated; - if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, needed_size, 8); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, needed_size, 8); - } - if (UNLIKELY(!allocated)) { - SetAllocatorOutOfMemory(); - if (AllocatorMayReturnNull()) - return nullptr; - ReportOutOfMemory(size, stack); - } - - if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { - // Heap poisoning is enabled, but the allocator provides an unpoisoned - // chunk. This is possible if CanPoisonMemory() was false for some - // time, for example, due to flags()->start_disabled. - // Anyway, poison the block before using it for anything else. - uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); - PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); - } - - uptr alloc_beg = reinterpret_cast(allocated); - uptr alloc_end = alloc_beg + needed_size; - uptr beg_plus_redzone = alloc_beg + rz_size; - uptr user_beg = beg_plus_redzone; - if (!IsAligned(user_beg, alignment)) - user_beg = RoundUpTo(user_beg, alignment); - uptr user_end = user_beg + size; - CHECK_LE(user_end, alloc_end); - uptr chunk_beg = user_beg - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - m->alloc_type = alloc_type; - m->rz_log = rz_log; - u32 alloc_tid = t ? t->tid() : 0; - m->alloc_tid = alloc_tid; - CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? - m->free_tid = kInvalidTid; - m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } - if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; - CHECK(allocator.FromPrimary(allocated)); - } else { - CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[0] = size; - meta[1] = chunk_beg; - } - m->user_requested_alignment_log = user_requested_alignment_log; - - m->alloc_context_id = StackDepotPut(*stack); - - uptr size_rounded_down_to_granularity = - RoundDownTo(size, SHADOW_GRANULARITY); - // Unpoison the bulk of the memory region. - if (size_rounded_down_to_granularity) - PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); - // Deal with the end of the region if size is not aligned to granularity. - if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { - u8 *shadow = - (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); - *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; - } - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mallocs++; - thread_stats.malloced += size; - thread_stats.malloced_redzones += needed_size - size; - if (needed_size > SizeClassMap::kMaxSize) - thread_stats.malloc_large++; - else - thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; - - void *res = reinterpret_cast(user_beg); - if (can_fill && fl.max_malloc_fill_size) { - uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); - REAL(memset)(res, fl.malloc_fill_byte, fill_size); - } -#if CAN_SANITIZE_LEAKS - m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored - : __lsan::kDirectlyLeaked; -#endif - // Must be the last mutation of metadata in this function. - atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); - ASAN_MALLOC_HOOK(res, size); - return res; - } - - // Set quarantine flag if chunk is allocated, issue ASan error report on - // available and quarantined chunks. Return true on success, false otherwise. - bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, - BufferedStackTrace *stack) { - u8 old_chunk_state = CHUNK_ALLOCATED; - // Flip the chunk_state atomically to avoid race on double-free. - if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, - CHUNK_QUARANTINE, - memory_order_acquire)) { - ReportInvalidFree(ptr, old_chunk_state, stack); - // It's not safe to push a chunk in quarantine on invalid free. - return false; - } - CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); - return true; - } - - // Expects the chunk to already be marked as quarantined by using - // AtomicallySetQuarantineFlagIfAllocated. - void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - CHECK_GE(m->alloc_tid, 0); - if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. - CHECK_EQ(m->free_tid, kInvalidTid); - AsanThread *t = GetCurrentThread(); - m->free_tid = t ? t->tid() : 0; - m->free_context_id = StackDepotPut(*stack); - - Flags &fl = *flags(); - if (fl.max_free_fill_size > 0) { - // We have to skip the chunk header, it contains free_context_id. - uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; - if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. - uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; - size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); - REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); - } - } - - // Poison the region. - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapFreeMagic); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.frees++; - thread_stats.freed += m->UsedSize(); - - // Push into quarantine. - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, - m->UsedSize()); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *ac = &fallback_allocator_cache; - quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), - m, m->UsedSize()); - } - } - - void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, - BufferedStackTrace *stack, AllocType alloc_type) { - uptr p = reinterpret_cast(ptr); - if (p == 0) return; - - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - - // On Windows, uninstrumented DLLs may allocate memory before ASan hooks - // malloc. Don't report an invalid free in this case. - if (SANITIZER_WINDOWS && - !get_allocator().PointerIsMine(ptr)) { - if (!IsSystemHeapAddress(p)) - ReportFreeNotMalloced(p, stack); - return; - } - - ASAN_FREE_HOOK(ptr); - - // Must mark the chunk as quarantined before any changes to its metadata. - // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. - if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; - - if (m->alloc_type != alloc_type) { - if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { - ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, - (AllocType)alloc_type); - } - } else { - if (flags()->new_delete_type_mismatch && - (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && - ((delete_size && delete_size != m->UsedSize()) || - ComputeUserRequestedAlignmentLog(delete_alignment) != - m->user_requested_alignment_log)) { - ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); - } - } - - QuarantineChunk(m, ptr, stack); - } - - void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { - CHECK(old_ptr && new_size); - uptr p = reinterpret_cast(old_ptr); - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.reallocs++; - thread_stats.realloced += new_size; - - void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); - if (new_ptr) { - u8 chunk_state = m->chunk_state; - if (chunk_state != CHUNK_ALLOCATED) - ReportInvalidFree(old_ptr, chunk_state, stack); - CHECK_NE(REAL(memcpy), nullptr); - uptr memcpy_size = Min(new_size, m->UsedSize()); - // If realloc() races with free(), we may start copying freed memory. - // However, we will report racy double-free later anyway. - REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); - } - return new_ptr; - } - - void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - if (AllocatorMayReturnNull()) - return nullptr; - ReportCallocOverflow(nmemb, size, stack); - } - void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); - // If the memory comes from the secondary allocator no need to clear it - // as it comes directly from mmap. - if (ptr && allocator.FromPrimary(ptr)) - REAL(memset)(ptr, 0, nmemb * size); - return ptr; - } - - void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { - if (chunk_state == CHUNK_QUARANTINE) - ReportDoubleFree((uptr)ptr, stack); - else - ReportFreeNotMalloced((uptr)ptr, stack); - } - - void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { - AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); - allocator.SwallowCache(ac); - } - - // -------------------------- Chunk lookup ---------------------- - - // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). - AsanChunk *GetAsanChunk(void *alloc_beg) { - if (!alloc_beg) return nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - AsanChunk *m = reinterpret_cast(meta[1]); - return m; - } - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - return reinterpret_cast(alloc_magic[1]); - return reinterpret_cast(alloc_beg); - } - - AsanChunk *GetAsanChunkByAddr(uptr p) { - void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); - return GetAsanChunk(alloc_beg); - } - - // Allocator must be locked when this function is called. - AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { - void *alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); - return GetAsanChunk(alloc_beg); - } - - uptr AllocationSize(uptr p) { - AsanChunk *m = GetAsanChunkByAddr(p); - if (!m) return 0; - if (m->chunk_state != CHUNK_ALLOCATED) return 0; - if (m->Beg() != p) return 0; - return m->UsedSize(); - } - - AsanChunkView FindHeapChunkByAddress(uptr addr) { - AsanChunk *m1 = GetAsanChunkByAddr(addr); - if (!m1) return AsanChunkView(m1); - sptr offset = 0; - if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { - // The address is in the chunk's left redzone, so maybe it is actually - // a right buffer overflow from the other chunk to the left. - // Search a bit to the left to see if there is another chunk. - AsanChunk *m2 = nullptr; - for (uptr l = 1; l < GetPageSizeCached(); l++) { - m2 = GetAsanChunkByAddr(addr - l); - if (m2 == m1) continue; // Still the same chunk. - break; - } - if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) - m1 = ChooseChunk(addr, m2, m1); - } - return AsanChunkView(m1); - } - - void Purge(BufferedStackTrace *stack) { - AsanThread *t = GetCurrentThread(); - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - quarantine.DrainAndRecycle(GetQuarantineCache(ms), - QuarantineCallback(GetAllocatorCache(ms), - stack)); - } - { - SpinMutexLock l(&fallback_mutex); - quarantine.DrainAndRecycle(&fallback_quarantine_cache, - QuarantineCallback(&fallback_allocator_cache, - stack)); - } - - allocator.ForceReleaseToOS(); - } - - void PrintStats() { - allocator.PrintStats(); - quarantine.PrintStats(); - } - - void ForceLock() { - allocator.ForceLock(); - fallback_mutex.Lock(); - } - - void ForceUnlock() { - fallback_mutex.Unlock(); - allocator.ForceUnlock(); - } -}; - -static Allocator instance(LINKER_INITIALIZED); - -static AsanAllocator &get_allocator() { - return instance.allocator; -} - -bool AsanChunkView::IsValid() const { - return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; -} -bool AsanChunkView::IsAllocated() const { - return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; -} -bool AsanChunkView::IsQuarantined() const { - return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; -} -uptr AsanChunkView::Beg() const { return chunk_->Beg(); } -uptr AsanChunkView::End() const { return Beg() + UsedSize(); } -uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } -u32 AsanChunkView::UserRequestedAlignment() const { - return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); -} -uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } -uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } -AllocType AsanChunkView::GetAllocType() const { - return (AllocType)chunk_->alloc_type; -} - -static StackTrace GetStackTraceFromId(u32 id) { - CHECK(id); - StackTrace res = StackDepotGet(id); - CHECK(res.trace); - return res; -} - -u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } -u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } - -StackTrace AsanChunkView::GetAllocStack() const { - return GetStackTraceFromId(GetAllocStackId()); -} - -StackTrace AsanChunkView::GetFreeStack() const { - return GetStackTraceFromId(GetFreeStackId()); -} - -void InitializeAllocator(const AllocatorOptions &options) { - instance.InitLinkerInitialized(options); -} - -void ReInitializeAllocator(const AllocatorOptions &options) { - instance.ReInitialize(options); -} - -void GetAllocatorOptions(AllocatorOptions *options) { - instance.GetOptions(options); -} - -AsanChunkView FindHeapChunkByAddress(uptr addr) { - return instance.FindHeapChunkByAddress(addr); -} -AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { - return AsanChunkView(instance.GetAsanChunk(reinterpret_cast(addr))); -} - -void AsanThreadLocalMallocStorage::CommitBack() { - GET_STACK_TRACE_MALLOC; - instance.CommitBack(this, &stack); -} - -void PrintInternalAllocatorStats() { - instance.PrintStats(); -} - -void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { - instance.Deallocate(ptr, 0, 0, stack, alloc_type); -} - -void asan_delete(void *ptr, uptr size, uptr alignment, - BufferedStackTrace *stack, AllocType alloc_type) { - instance.Deallocate(ptr, size, alignment, stack, alloc_type); -} - -void *asan_malloc(uptr size, BufferedStackTrace *stack) { - return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); -} - -void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); -} - -void *asan_reallocarray(void *p, uptr nmemb, uptr size, - BufferedStackTrace *stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportReallocArrayOverflow(nmemb, size, stack); - } - return asan_realloc(p, nmemb * size, stack); -} - -void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { - if (!p) - return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); - if (size == 0) { - if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { - instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); - return nullptr; - } - // Allocate a size of 1 if we shouldn't free() on Realloc to 0 - size = 1; - } - return SetErrnoOnNull(instance.Reallocate(p, size, stack)); -} - -void *asan_valloc(uptr size, BufferedStackTrace *stack) { - return SetErrnoOnNull( - instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); -} - -void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportPvallocOverflow(size, stack); - } - // pvalloc(0) should allocate one page. - size = size ? RoundUpTo(size, PageSize) : PageSize; - return SetErrnoOnNull( - instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); -} - -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - if (UNLIKELY(!IsPowerOfTwo(alignment))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAllocationAlignment(alignment, stack); - } - return SetErrnoOnNull( - instance.Allocate(size, alignment, stack, alloc_type, true)); -} - -void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { - if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAlignedAllocAlignment(size, alignment, stack); - } - return SetErrnoOnNull( - instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); -} - -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - BufferedStackTrace *stack) { - if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - if (AllocatorMayReturnNull()) - return errno_EINVAL; - ReportInvalidPosixMemalignAlignment(alignment, stack); - } - void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); - if (UNLIKELY(!ptr)) - // OOM error is already taken care of by Allocate. - return errno_ENOMEM; - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; -} - -uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { - if (!ptr) return 0; - uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); - if (flags()->check_malloc_usable_size && (usable_size == 0)) { - GET_STACK_TRACE_FATAL(pc, bp); - ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); - } - return usable_size; -} - -uptr asan_mz_size(const void *ptr) { - return instance.AllocationSize(reinterpret_cast(ptr)); -} - -void asan_mz_force_lock() { - instance.ForceLock(); -} - -void asan_mz_force_unlock() { - instance.ForceUnlock(); -} - -void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { - instance.SetRssLimitExceeded(limit_exceeded); -} - -} // namespace __asan - -// --- Implementation of LSan-specific functions --- {{{1 -namespace __lsan { -void LockAllocator() { - __asan::get_allocator().ForceLock(); -} - -void UnlockAllocator() { - __asan::get_allocator().ForceUnlock(); -} - -void GetAllocatorGlobalRange(uptr *begin, uptr *end) { - *begin = (uptr)&__asan::get_allocator(); - *end = *begin + sizeof(__asan::get_allocator()); -} - -uptr PointsIntoChunk(void* p) { - uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); - if (!m) return 0; - uptr chunk = m->Beg(); - if (m->chunk_state != __asan::CHUNK_ALLOCATED) - return 0; - if (m->AddrIsInside(addr, /*locked_version=*/true)) - return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) - return chunk; - return 0; -} - -uptr GetUserBegin(uptr chunk) { - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); - CHECK(m); - return m->Beg(); -} - -LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); -} - -bool LsanMetadata::allocated() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->chunk_state == __asan::CHUNK_ALLOCATED; -} - -ChunkTag LsanMetadata::tag() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return static_cast(m->lsan_tag); -} - -void LsanMetadata::set_tag(ChunkTag value) { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - m->lsan_tag = value; -} - -uptr LsanMetadata::requested_size() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); -} - -u32 LsanMetadata::stack_trace_id() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->alloc_context_id; -} - -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - __asan::get_allocator().ForEachChunk(callback, arg); -} - -IgnoreObjectResult IgnoreObjectLocked(const void *p) { - uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); - if (!m) return kIgnoreObjectInvalid; - if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { - if (m->lsan_tag == kIgnored) - return kIgnoreObjectAlreadyIgnored; - m->lsan_tag = __lsan::kIgnored; - return kIgnoreObjectSuccess; - } else { - return kIgnoreObjectInvalid; - } -} -} // namespace __lsan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// ASan allocator doesn't reserve extra bytes, so normally we would -// just return "size". We don't want to expose our redzone sizes, etc here. -uptr __sanitizer_get_estimated_allocated_size(uptr size) { - return size; -} - -int __sanitizer_get_ownership(const void *p) { - uptr ptr = reinterpret_cast(p); - return instance.AllocationSize(ptr) > 0; -} - -uptr __sanitizer_get_allocated_size(const void *p) { - if (!p) return 0; - uptr ptr = reinterpret_cast(p); - uptr allocated_size = instance.AllocationSize(ptr); - // Die if p is not malloced or if it is already freed. - if (allocated_size == 0) { - GET_STACK_TRACE_FATAL_HERE; - ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); - } - return allocated_size; -} - -void __sanitizer_purge_allocator() { - GET_STACK_TRACE_MALLOC; - instance.Purge(&stack); -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, - void *ptr, uptr size) { - (void)ptr; - (void)size; -} - -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { - (void)ptr; -} -#endif diff --git a/lib/asan/asan_allocator.cpp b/lib/asan/asan_allocator.cpp new file mode 100644 index 000000000000..c9e9f5a93d0d --- /dev/null +++ b/lib/asan/asan_allocator.cpp @@ -0,0 +1,1119 @@ +//===-- asan_allocator.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Implementation of ASan's memory allocator, 2-nd version. +// This variant uses the allocator from sanitizer_common, i.e. the one shared +// with ThreadSanitizer and MemorySanitizer. +// +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_list.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. +// We use adaptive redzones: for larger allocation larger redzones are used. +static u32 RZLog2Size(u32 rz_log) { + CHECK_LT(rz_log, 8); + return 16 << rz_log; +} + +static u32 RZSize2Log(u32 rz_size) { + CHECK_GE(rz_size, 16); + CHECK_LE(rz_size, 2048); + CHECK(IsPowerOfTwo(rz_size)); + u32 res = Log2(rz_size) - 4; + CHECK_EQ(rz_size, RZLog2Size(res)); + return res; +} + +static AsanAllocator &get_allocator(); + +// The memory chunk allocated from the underlying allocator looks like this: +// L L L L L L H H U U U U U U R R +// L -- left redzone words (0 or more bytes) +// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// U -- user memory. +// R -- right redzone (0 or more bytes) +// ChunkBase consists of ChunkHeader and other bytes that overlap with user +// memory. + +// If the left redzone is greater than the ChunkHeader size we store a magic +// value in the first uptr word of the memory block and store the address of +// ChunkBase in the next uptr. +// M B L L L L L L L L L H H U U U U U U +// | ^ +// ---------------------| +// M -- magic value kAllocBegMagic +// B -- address of ChunkHeader pointing to the first 'H' +static const uptr kAllocBegMagic = 0xCC6E96B9; + +struct ChunkHeader { + // 1-st 8 bytes. + u32 chunk_state : 8; // Must be first. + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes + // This field is used for small sizes. For large sizes it is equal to + // SizeClassMap::kMaxSize and the actual size is stored in the + // SecondaryAllocator's metadata. + u32 user_requested_size : 29; + // align < 8 -> 0 + // else -> log2(min(align, 512)) - 2 + u32 user_requested_alignment_log : 3; + u32 alloc_context_id; +}; + +struct ChunkBase : ChunkHeader { + // Header2, intersects with user memory. + u32 free_context_id; +}; + +static const uptr kChunkHeaderSize = sizeof(ChunkHeader); +static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 16); +COMPILER_CHECK(kChunkHeader2Size <= 16); + +// Every chunk of memory allocated by this allocator can be in one of 3 states: +// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. +// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. +// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +enum { + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3 +}; + +struct AsanChunk: ChunkBase { + uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } + uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + return *reinterpret_cast( + get_allocator().GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) + return get_allocator().GetBlockBeginFastLocked( + reinterpret_cast(this)); + return get_allocator().GetBlockBegin(reinterpret_cast(this)); + } + return reinterpret_cast(Beg() - RZLog2Size(rz_log)); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } +}; + +struct QuarantineCallback { + QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) + : cache_(cache), + stack_(stack) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + get_allocator().Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + void *res = get_allocator().Allocate(cache_, size, 1); + // TODO(alekseys): Consider making quarantine OOM-friendly. + if (UNLIKELY(!res)) + ReportOutOfMemory(size, stack_); + return res; + } + + void Deallocate(void *p) { + get_allocator().Deallocate(cache_, p); + } + + private: + AllocatorCache* const cache_; + BufferedStackTrace* const stack_; +}; + +typedef Quarantine AsanQuarantine; +typedef AsanQuarantine::Cache QuarantineCache; + +void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { + PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mmaps++; + thread_stats.mmaped += size; +} +void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { + PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + FlushUnneededASanShadowMemory(p, size); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.munmaps++; + thread_stats.munmaped += size; +} + +// We can not use THREADLOCAL because it is not supported on some of the +// platforms we care about (OSX 10.6, Android). +// static THREADLOCAL AllocatorCache cache; +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator_cache; +} + +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); + return reinterpret_cast(ms->quarantine_cache); +} + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + quarantine_size_mb = f->quarantine_size_mb; + thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb; + min_redzone = f->redzone; + max_redzone = f->max_redzone; + may_return_null = cf->allocator_may_return_null; + alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; + release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + f->quarantine_size_mb = quarantine_size_mb; + f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb; + f->redzone = min_redzone; + f->max_redzone = max_redzone; + cf->allocator_may_return_null = may_return_null; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; + cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; +} + +struct Allocator { + static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); + + AsanAllocator allocator; + AsanQuarantine quarantine; + StaticSpinMutex fallback_mutex; + AllocatorCache fallback_allocator_cache; + QuarantineCache fallback_quarantine_cache; + + atomic_uint8_t rss_limit_exceeded; + + // ------------------- Options -------------------------- + atomic_uint16_t min_redzone; + atomic_uint16_t max_redzone; + atomic_uint8_t alloc_dealloc_mismatch; + + // ------------------- Initialization ------------------------ + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED), + fallback_quarantine_cache(LINKER_INITIALIZED) {} + + void CheckOptions(const AllocatorOptions &options) const { + CHECK_GE(options.min_redzone, 16); + CHECK_GE(options.max_redzone, options.min_redzone); + CHECK_LE(options.max_redzone, 2048); + CHECK(IsPowerOfTwo(options.min_redzone)); + CHECK(IsPowerOfTwo(options.max_redzone)); + } + + void SharedInitCode(const AllocatorOptions &options) { + CheckOptions(options); + quarantine.Init((uptr)options.quarantine_size_mb << 20, + (uptr)options.thread_local_quarantine_size_kb << 10); + atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, + memory_order_release); + atomic_store(&min_redzone, options.min_redzone, memory_order_release); + atomic_store(&max_redzone, options.max_redzone, memory_order_release); + } + + void InitLinkerInitialized(const AllocatorOptions &options) { + SetAllocatorMayReturnNull(options.may_return_null); + allocator.InitLinkerInitialized(options.release_to_os_interval_ms); + SharedInitCode(options); + } + + bool RssLimitExceeded() { + return atomic_load(&rss_limit_exceeded, memory_order_relaxed); + } + + void SetRssLimitExceeded(bool limit_exceeded) { + atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); + } + + void RePoisonChunk(uptr chunk) { + // This could be a user-facing chunk (with redzones), or some internal + // housekeeping chunk, like TransferBatch. Start by assuming the former. + AsanChunk *ac = GetAsanChunk((void *)chunk); + uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); + uptr beg = ac->Beg(); + uptr end = ac->Beg() + ac->UsedSize(true); + uptr chunk_end = chunk + allocated_size; + if (chunk < beg && beg < end && end <= chunk_end && + ac->chunk_state == CHUNK_ALLOCATED) { + // Looks like a valid AsanChunk in use, poison redzones only. + PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); + uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone( + end_aligned_down, end - end_aligned_down, + chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); + } else { + // This is either not an AsanChunk or freed or quarantined AsanChunk. + // In either case, poison everything. + PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); + } + } + + void ReInitialize(const AllocatorOptions &options) { + SetAllocatorMayReturnNull(options.may_return_null); + allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); + SharedInitCode(options); + + // Poison all existing allocation's redzones. + if (CanPoisonMemory()) { + allocator.ForceLock(); + allocator.ForEachChunk( + [](uptr chunk, void *alloc) { + ((Allocator *)alloc)->RePoisonChunk(chunk); + }, + this); + allocator.ForceUnlock(); + } + } + + void GetOptions(AllocatorOptions *options) const { + options->quarantine_size_mb = quarantine.GetSize() >> 20; + options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; + options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); + options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); + options->may_return_null = AllocatorMayReturnNull(); + options->alloc_dealloc_mismatch = + atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); + } + + // -------------------- Helper methods. ------------------------- + uptr ComputeRZLog(uptr user_requested_size) { + u32 rz_log = + user_requested_size <= 64 - 16 ? 0 : + user_requested_size <= 128 - 32 ? 1 : + user_requested_size <= 512 - 64 ? 2 : + user_requested_size <= 4096 - 128 ? 3 : + user_requested_size <= (1 << 14) - 256 ? 4 : + user_requested_size <= (1 << 15) - 512 ? 5 : + user_requested_size <= (1 << 16) - 1024 ? 6 : 7; + u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); + u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); + return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); + } + + static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) { + if (user_requested_alignment < 8) + return 0; + if (user_requested_alignment > 512) + user_requested_alignment = 512; + return Log2(user_requested_alignment) - 2; + } + + static uptr ComputeUserAlignment(uptr user_requested_alignment_log) { + if (user_requested_alignment_log == 0) + return 0; + return 1LL << (user_requested_alignment_log + 2); + } + + // We have an address between two chunks, and we want to report just one. + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, + AsanChunk *right_chunk) { + // Prefer an allocated chunk over freed chunk and freed chunk + // over available chunk. + if (left_chunk->chunk_state != right_chunk->chunk_state) { + if (left_chunk->chunk_state == CHUNK_ALLOCATED) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_ALLOCATED) + return right_chunk; + if (left_chunk->chunk_state == CHUNK_QUARANTINE) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_QUARANTINE) + return right_chunk; + } + // Same chunk_state: choose based on offset. + sptr l_offset = 0, r_offset = 0; + CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); + CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); + if (l_offset < r_offset) + return left_chunk; + return right_chunk; + } + + // -------------------- Allocation/Deallocation routines --------------- + void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + AllocType alloc_type, bool can_fill) { + if (UNLIKELY(!asan_inited)) + AsanInitFromRtl(); + if (RssLimitExceeded()) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportRssLimitExceeded(stack); + } + Flags &fl = *flags(); + CHECK(stack); + const uptr min_alignment = SHADOW_GRANULARITY; + const uptr user_requested_alignment_log = + ComputeUserRequestedAlignmentLog(alignment); + if (alignment < min_alignment) + alignment = min_alignment; + if (size == 0) { + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc + // would not return NULL even for zero-size allocations. Moreover, it + // looks like operator new should never return NULL, and results of + // consecutive "new" calls must be different even if the allocated size + // is zero. + size = 1; + } + CHECK(IsPowerOfTwo(alignment)); + uptr rz_log = ComputeRZLog(size); + uptr rz_size = RZLog2Size(rz_log); + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); + uptr needed_size = rounded_size + rz_size; + if (alignment > min_alignment) + needed_size += alignment; + bool using_primary_allocator = true; + // If we are allocating from the secondary allocator, there will be no + // automatic right redzone, so add the right redzone manually. + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + needed_size += rz_size; + using_primary_allocator = false; + } + CHECK(IsAligned(needed_size, min_alignment)); + if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { + if (AllocatorMayReturnNull()) { + Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", + (void*)size); + return nullptr; + } + ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize, + stack); + } + + AsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, needed_size, 8); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, needed_size, 8); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } + + if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { + // Heap poisoning is enabled, but the allocator provides an unpoisoned + // chunk. This is possible if CanPoisonMemory() was false for some + // time, for example, due to flags()->start_disabled. + // Anyway, poison the block before using it for anything else. + uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); + } + + uptr alloc_beg = reinterpret_cast(allocated); + uptr alloc_end = alloc_beg + needed_size; + uptr beg_plus_redzone = alloc_beg + rz_size; + uptr user_beg = beg_plus_redzone; + if (!IsAligned(user_beg, alignment)) + user_beg = RoundUpTo(user_beg, alignment); + uptr user_end = user_beg + size; + CHECK_LE(user_end, alloc_end); + uptr chunk_beg = user_beg - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + m->alloc_type = alloc_type; + m->rz_log = rz_log; + u32 alloc_tid = t ? t->tid() : 0; + m->alloc_tid = alloc_tid; + CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? + m->free_tid = kInvalidTid; + m->from_memalign = user_beg != beg_plus_redzone; + if (alloc_beg != chunk_beg) { + CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); + reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; + reinterpret_cast(alloc_beg)[1] = chunk_beg; + } + if (using_primary_allocator) { + CHECK(size); + m->user_requested_size = size; + CHECK(allocator.FromPrimary(allocated)); + } else { + CHECK(!allocator.FromPrimary(allocated)); + m->user_requested_size = SizeClassMap::kMaxSize; + uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); + meta[0] = size; + meta[1] = chunk_beg; + } + m->user_requested_alignment_log = user_requested_alignment_log; + + m->alloc_context_id = StackDepotPut(*stack); + + uptr size_rounded_down_to_granularity = + RoundDownTo(size, SHADOW_GRANULARITY); + // Unpoison the bulk of the memory region. + if (size_rounded_down_to_granularity) + PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); + // Deal with the end of the region if size is not aligned to granularity. + if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { + u8 *shadow = + (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); + *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + } + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mallocs++; + thread_stats.malloced += size; + thread_stats.malloced_redzones += needed_size - size; + if (needed_size > SizeClassMap::kMaxSize) + thread_stats.malloc_large++; + else + thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; + + void *res = reinterpret_cast(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } +#if CAN_SANITIZE_LEAKS + m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored + : __lsan::kDirectlyLeaked; +#endif + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + ASAN_MALLOC_HOOK(res, size); + return res; + } + + // Set quarantine flag if chunk is allocated, issue ASan error report on + // available and quarantined chunks. Return true on success, false otherwise. + bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; + // Flip the chunk_state atomically to avoid race on double-free. + if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, + CHUNK_QUARANTINE, + memory_order_acquire)) { + ReportInvalidFree(ptr, old_chunk_state, stack); + // It's not safe to push a chunk in quarantine on invalid free. + return false; + } + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); + return true; + } + + // Expects the chunk to already be marked as quarantined by using + // AtomicallySetQuarantineFlagIfAllocated. + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + CHECK_GE(m->alloc_tid, 0); + if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. + CHECK_EQ(m->free_tid, kInvalidTid); + AsanThread *t = GetCurrentThread(); + m->free_tid = t ? t->tid() : 0; + m->free_context_id = StackDepotPut(*stack); + + Flags &fl = *flags(); + if (fl.max_free_fill_size > 0) { + // We have to skip the chunk header, it contains free_context_id. + uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size; + if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area. + uptr size_to_fill = m->UsedSize() - kChunkHeader2Size; + size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size); + REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill); + } + } + + // Poison the region. + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapFreeMagic); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.frees++; + thread_stats.freed += m->UsedSize(); + + // Push into quarantine. + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, + m->UsedSize()); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *ac = &fallback_allocator_cache; + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), + m, m->UsedSize()); + } + } + + void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, + BufferedStackTrace *stack, AllocType alloc_type) { + uptr p = reinterpret_cast(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + + // On Windows, uninstrumented DLLs may allocate memory before ASan hooks + // malloc. Don't report an invalid free in this case. + if (SANITIZER_WINDOWS && + !get_allocator().PointerIsMine(ptr)) { + if (!IsSystemHeapAddress(p)) + ReportFreeNotMalloced(p, stack); + return; + } + + ASAN_FREE_HOOK(ptr); + + // Must mark the chunk as quarantined before any changes to its metadata. + // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag. + if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return; + + if (m->alloc_type != alloc_type) { + if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { + ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, + (AllocType)alloc_type); + } + } else { + if (flags()->new_delete_type_mismatch && + (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) && + ((delete_size && delete_size != m->UsedSize()) || + ComputeUserRequestedAlignmentLog(delete_alignment) != + m->user_requested_alignment_log)) { + ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack); + } + } + + QuarantineChunk(m, ptr, stack); + } + + void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { + CHECK(old_ptr && new_size); + uptr p = reinterpret_cast(old_ptr); + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.reallocs++; + thread_stats.realloced += new_size; + + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + if (new_ptr) { + u8 chunk_state = m->chunk_state; + if (chunk_state != CHUNK_ALLOCATED) + ReportInvalidFree(old_ptr, chunk_state, stack); + CHECK_NE(REAL(memcpy), nullptr); + uptr memcpy_size = Min(new_size, m->UsedSize()); + // If realloc() races with free(), we may start copying freed memory. + // However, we will report racy double-free later anyway. + REAL(memcpy)(new_ptr, old_ptr, memcpy_size); + Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); + } + return new_ptr; + } + + void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) + REAL(memset)(ptr, 0, nmemb * size); + return ptr; + } + + void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + if (chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); + } + + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); + allocator.SwallowCache(ac); + } + + // -------------------------- Chunk lookup ---------------------- + + // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). + AsanChunk *GetAsanChunk(void *alloc_beg) { + if (!alloc_beg) return nullptr; + if (!allocator.FromPrimary(alloc_beg)) { + uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); + AsanChunk *m = reinterpret_cast(meta[1]); + return m; + } + uptr *alloc_magic = reinterpret_cast(alloc_beg); + if (alloc_magic[0] == kAllocBegMagic) + return reinterpret_cast(alloc_magic[1]); + return reinterpret_cast(alloc_beg); + } + + AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); + return GetAsanChunk(alloc_beg); + } + + // Allocator must be locked when this function is called. + AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + return GetAsanChunk(alloc_beg); + } + + uptr AllocationSize(uptr p) { + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; + if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (m->Beg() != p) return 0; + return m->UsedSize(); + } + + AsanChunkView FindHeapChunkByAddress(uptr addr) { + AsanChunk *m1 = GetAsanChunkByAddr(addr); + if (!m1) return AsanChunkView(m1); + sptr offset = 0; + if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { + // The address is in the chunk's left redzone, so maybe it is actually + // a right buffer overflow from the other chunk to the left. + // Search a bit to the left to see if there is another chunk. + AsanChunk *m2 = nullptr; + for (uptr l = 1; l < GetPageSizeCached(); l++) { + m2 = GetAsanChunkByAddr(addr - l); + if (m2 == m1) continue; // Still the same chunk. + break; + } + if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) + m1 = ChooseChunk(addr, m2, m1); + } + return AsanChunkView(m1); + } + + void Purge(BufferedStackTrace *stack) { + AsanThread *t = GetCurrentThread(); + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + quarantine.DrainAndRecycle(GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms), + stack)); + } + { + SpinMutexLock l(&fallback_mutex); + quarantine.DrainAndRecycle(&fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache, + stack)); + } + + allocator.ForceReleaseToOS(); + } + + void PrintStats() { + allocator.PrintStats(); + quarantine.PrintStats(); + } + + void ForceLock() { + allocator.ForceLock(); + fallback_mutex.Lock(); + } + + void ForceUnlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static AsanAllocator &get_allocator() { + return instance.allocator; +} + +bool AsanChunkView::IsValid() const { + return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; +} +bool AsanChunkView::IsAllocated() const { + return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; +} +bool AsanChunkView::IsQuarantined() const { + return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; +} +uptr AsanChunkView::Beg() const { return chunk_->Beg(); } +uptr AsanChunkView::End() const { return Beg() + UsedSize(); } +uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); } +u32 AsanChunkView::UserRequestedAlignment() const { + return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log); +} +uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; } +uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; } +AllocType AsanChunkView::GetAllocType() const { + return (AllocType)chunk_->alloc_type; +} + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } +u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; } + +StackTrace AsanChunkView::GetAllocStack() const { + return GetStackTraceFromId(GetAllocStackId()); +} + +StackTrace AsanChunkView::GetFreeStack() const { + return GetStackTraceFromId(GetFreeStackId()); +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.InitLinkerInitialized(options); +} + +void ReInitializeAllocator(const AllocatorOptions &options) { + instance.ReInitialize(options); +} + +void GetAllocatorOptions(AllocatorOptions *options) { + instance.GetOptions(options); +} + +AsanChunkView FindHeapChunkByAddress(uptr addr) { + return instance.FindHeapChunkByAddress(addr); +} +AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { + return AsanChunkView(instance.GetAsanChunk(reinterpret_cast(addr))); +} + +void AsanThreadLocalMallocStorage::CommitBack() { + GET_STACK_TRACE_MALLOC; + instance.CommitBack(this, &stack); +} + +void PrintInternalAllocatorStats() { + instance.PrintStats(); +} + +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, 0, 0, stack, alloc_type); +} + +void asan_delete(void *ptr, uptr size, uptr alignment, + BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, size, alignment, stack, alloc_type); +} + +void *asan_malloc(uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); +} + +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); +} + +void *asan_reallocarray(void *p, uptr nmemb, uptr size, + BufferedStackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportReallocArrayOverflow(nmemb, size, stack); + } + return asan_realloc(p, nmemb * size, stack); +} + +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { + if (!p) + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); + if (size == 0) { + if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { + instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); + return nullptr; + } + // Allocate a size of 1 if we shouldn't free() on Realloc to 0 + size = 1; + } + return SetErrnoOnNull(instance.Reallocate(p, size, stack)); +} + +void *asan_valloc(uptr size, BufferedStackTrace *stack) { + return SetErrnoOnNull( + instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); +} + +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull( + instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); + } + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, alloc_type, true)); +} + +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); + } + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); +} + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); + } + void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { + if (!ptr) return 0; + uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); + if (flags()->check_malloc_usable_size && (usable_size == 0)) { + GET_STACK_TRACE_FATAL(pc, bp); + ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); + } + return usable_size; +} + +uptr asan_mz_size(const void *ptr) { + return instance.AllocationSize(reinterpret_cast(ptr)); +} + +void asan_mz_force_lock() { + instance.ForceLock(); +} + +void asan_mz_force_unlock() { + instance.ForceUnlock(); +} + +void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { + instance.SetRssLimitExceeded(limit_exceeded); +} + +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::get_allocator().ForceLock(); +} + +void UnlockAllocator() { + __asan::get_allocator().ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::get_allocator(); + *end = *begin + sizeof(__asan::get_allocator()); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if (m->chunk_state != __asan::CHUNK_ALLOCATED) + return 0; + if (m->AddrIsInside(addr, /*locked_version=*/true)) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), + addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + CHECK(m); + return m->Beg(); +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(/*locked_version=*/true); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::get_allocator().ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + uptr addr = reinterpret_cast(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); + if (!m) return kIgnoreObjectInvalid; + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if (m->lsan_tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->lsan_tag = __lsan::kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +// ASan allocator doesn't reserve extra bytes, so normally we would +// just return "size". We don't want to expose our redzone sizes, etc here. +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + uptr ptr = reinterpret_cast(p); + return instance.AllocationSize(ptr) > 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + if (!p) return 0; + uptr ptr = reinterpret_cast(p); + uptr allocated_size = instance.AllocationSize(ptr); + // Die if p is not malloced or if it is already freed. + if (allocated_size == 0) { + GET_STACK_TRACE_FATAL_HERE; + ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); + } + return allocated_size; +} + +void __sanitizer_purge_allocator() { + GET_STACK_TRACE_MALLOC; + instance.Purge(&stack); +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, + void *ptr, uptr size) { + (void)ptr; + (void)size; +} + +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { + (void)ptr; +} +#endif diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h index 6add47be2c9c..b37d8ef4e8d2 100644 --- a/lib/asan/asan_allocator.h +++ b/lib/asan/asan_allocator.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_allocator.cc. +// ASan-private header for asan_allocator.cpp. //===----------------------------------------------------------------------===// #ifndef ASAN_ALLOCATOR_H diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc deleted file mode 100644 index 7052a371e676..000000000000 --- a/lib/asan/asan_debugging.cc +++ /dev/null @@ -1,146 +0,0 @@ -//===-- asan_debugging.cc -------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// This file contains various functions that are generally useful to call when -// using a debugger (LLDB, GDB). -//===----------------------------------------------------------------------===// - -#include "asan_allocator.h" -#include "asan_descriptions.h" -#include "asan_flags.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_thread.h" - -namespace { -using namespace __asan; - -static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset, - char *name, uptr name_size, - uptr ®ion_address, uptr ®ion_size) { - InternalMmapVector vars; - vars.reserve(16); - if (!ParseFrameDescription(frame_descr, &vars)) { - return; - } - - for (uptr i = 0; i < vars.size(); i++) { - if (offset <= vars[i].beg + vars[i].size) { - // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so - // if we're limiting the copy due to name_len, we add 1 to ensure we copy - // the whole name and then terminate with '\0'. - internal_strlcpy(name, vars[i].name_pos, - Min(name_size, vars[i].name_len + 1)); - region_address = addr - (offset - vars[i].beg); - region_size = vars[i].size; - return; - } - } -} - -uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id, - bool alloc_stack) { - AsanChunkView chunk = FindHeapChunkByAddress(addr); - if (!chunk.IsValid()) return 0; - - StackTrace stack(nullptr, 0); - if (alloc_stack) { - if (chunk.AllocTid() == kInvalidTid) return 0; - stack = chunk.GetAllocStack(); - if (thread_id) *thread_id = chunk.AllocTid(); - } else { - if (chunk.FreeTid() == kInvalidTid) return 0; - stack = chunk.GetFreeStack(); - if (thread_id) *thread_id = chunk.FreeTid(); - } - - if (trace && size) { - size = Min(size, Min(stack.size, kStackTraceMax)); - for (uptr i = 0; i < size; i++) - trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); - - return size; - } - - return 0; -} - -} // namespace - -SANITIZER_INTERFACE_ATTRIBUTE -const char *__asan_locate_address(uptr addr, char *name, uptr name_size, - uptr *region_address_ptr, - uptr *region_size_ptr) { - AddressDescription descr(addr); - uptr region_address = 0; - uptr region_size = 0; - const char *region_kind = nullptr; - if (name && name_size > 0) name[0] = 0; - - if (auto shadow = descr.AsShadow()) { - // region_{address,size} are already 0 - switch (shadow->kind) { - case kShadowKindLow: - region_kind = "low shadow"; - break; - case kShadowKindGap: - region_kind = "shadow gap"; - break; - case kShadowKindHigh: - region_kind = "high shadow"; - break; - } - } else if (auto heap = descr.AsHeap()) { - region_kind = "heap"; - region_address = heap->chunk_access.chunk_begin; - region_size = heap->chunk_access.chunk_size; - } else if (auto stack = descr.AsStack()) { - region_kind = "stack"; - if (!stack->frame_descr) { - // region_{address,size} are already 0 - } else { - FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name, - name_size, region_address, region_size); - } - } else if (auto global = descr.AsGlobal()) { - region_kind = "global"; - auto &g = global->globals[0]; - internal_strlcpy(name, g.name, name_size); - region_address = g.beg; - region_size = g.size; - } else { - // region_{address,size} are already 0 - region_kind = "heap-invalid"; - } - - CHECK(region_kind); - if (region_address_ptr) *region_address_ptr = region_address; - if (region_size_ptr) *region_size_ptr = region_size; - return region_kind; -} - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { - return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true); -} - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { - return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) { - if (shadow_scale) - *shadow_scale = SHADOW_SCALE; - if (shadow_offset) - *shadow_offset = SHADOW_OFFSET; -} diff --git a/lib/asan/asan_debugging.cpp b/lib/asan/asan_debugging.cpp new file mode 100644 index 000000000000..c01360b52fc9 --- /dev/null +++ b/lib/asan/asan_debugging.cpp @@ -0,0 +1,146 @@ +//===-- asan_debugging.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains various functions that are generally useful to call when +// using a debugger (LLDB, GDB). +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_descriptions.h" +#include "asan_flags.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_thread.h" + +namespace { +using namespace __asan; + +static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset, + char *name, uptr name_size, + uptr *region_address, uptr *region_size) { + InternalMmapVector vars; + vars.reserve(16); + if (!ParseFrameDescription(frame_descr, &vars)) { + return; + } + + for (uptr i = 0; i < vars.size(); i++) { + if (offset <= vars[i].beg + vars[i].size) { + // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so + // if we're limiting the copy due to name_len, we add 1 to ensure we copy + // the whole name and then terminate with '\0'. + internal_strlcpy(name, vars[i].name_pos, + Min(name_size, vars[i].name_len + 1)); + *region_address = addr - (offset - vars[i].beg); + *region_size = vars[i].size; + return; + } + } +} + +uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id, + bool alloc_stack) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) return 0; + + StackTrace stack(nullptr, 0); + if (alloc_stack) { + if (chunk.AllocTid() == kInvalidTid) return 0; + stack = chunk.GetAllocStack(); + if (thread_id) *thread_id = chunk.AllocTid(); + } else { + if (chunk.FreeTid() == kInvalidTid) return 0; + stack = chunk.GetFreeStack(); + if (thread_id) *thread_id = chunk.FreeTid(); + } + + if (trace && size) { + size = Min(size, Min(stack.size, kStackTraceMax)); + for (uptr i = 0; i < size; i++) + trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); + + return size; + } + + return 0; +} + +} // namespace + +SANITIZER_INTERFACE_ATTRIBUTE +const char *__asan_locate_address(uptr addr, char *name, uptr name_size, + uptr *region_address_ptr, + uptr *region_size_ptr) { + AddressDescription descr(addr); + uptr region_address = 0; + uptr region_size = 0; + const char *region_kind = nullptr; + if (name && name_size > 0) name[0] = 0; + + if (auto shadow = descr.AsShadow()) { + // region_{address,size} are already 0 + switch (shadow->kind) { + case kShadowKindLow: + region_kind = "low shadow"; + break; + case kShadowKindGap: + region_kind = "shadow gap"; + break; + case kShadowKindHigh: + region_kind = "high shadow"; + break; + } + } else if (auto heap = descr.AsHeap()) { + region_kind = "heap"; + region_address = heap->chunk_access.chunk_begin; + region_size = heap->chunk_access.chunk_size; + } else if (auto stack = descr.AsStack()) { + region_kind = "stack"; + if (!stack->frame_descr) { + // region_{address,size} are already 0 + } else { + FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name, + name_size, ®ion_address, ®ion_size); + } + } else if (auto global = descr.AsGlobal()) { + region_kind = "global"; + auto &g = global->globals[0]; + internal_strlcpy(name, g.name, name_size); + region_address = g.beg; + region_size = g.size; + } else { + // region_{address,size} are already 0 + region_kind = "heap-invalid"; + } + + CHECK(region_kind); + if (region_address_ptr) *region_address_ptr = region_address; + if (region_size_ptr) *region_size_ptr = region_size; + return region_kind; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true); +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) { + return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) { + if (shadow_scale) + *shadow_scale = SHADOW_SCALE; + if (shadow_offset) + *shadow_offset = SHADOW_OFFSET; +} diff --git a/lib/asan/asan_descriptions.cc b/lib/asan/asan_descriptions.cc deleted file mode 100644 index 9b1217a86652..000000000000 --- a/lib/asan/asan_descriptions.cc +++ /dev/null @@ -1,501 +0,0 @@ -//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan functions for getting information about an address and/or printing it. -//===----------------------------------------------------------------------===// - -#include "asan_descriptions.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_stackdepot.h" - -namespace __asan { - -AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) { - Init(t->tid, t->name); -} - -AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) { - if (tid == kInvalidTid) { - Init(tid, ""); - } else { - asanThreadRegistry().CheckLocked(); - AsanThreadContext *t = GetThreadContextByTidLocked(tid); - Init(tid, t->name); - } -} - -void AsanThreadIdAndName::Init(u32 tid, const char *tname) { - int len = internal_snprintf(name, sizeof(name), "T%d", tid); - CHECK(((unsigned int)len) < sizeof(name)); - if (tname[0] != '\0') - internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname); -} - -void DescribeThread(AsanThreadContext *context) { - CHECK(context); - asanThreadRegistry().CheckLocked(); - // No need to announce the main thread. - if (context->tid == 0 || context->announced) { - return; - } - context->announced = true; - InternalScopedString str(1024); - str.append("Thread %s", AsanThreadIdAndName(context).c_str()); - if (context->parent_tid == kInvalidTid) { - str.append(" created by unknown thread\n"); - Printf("%s", str.data()); - return; - } - str.append(" created by %s here:\n", - AsanThreadIdAndName(context->parent_tid).c_str()); - Printf("%s", str.data()); - StackDepotGet(context->stack_id).Print(); - // Recursively described parent thread if needed. - if (flags()->print_full_thread_history) { - AsanThreadContext *parent_context = - GetThreadContextByTidLocked(context->parent_tid); - DescribeThread(parent_context); - } -} - -// Shadow descriptions -static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) { - CHECK(!AddrIsInMem(addr)); - if (AddrIsInShadowGap(addr)) { - *shadow_kind = kShadowKindGap; - } else if (AddrIsInHighShadow(addr)) { - *shadow_kind = kShadowKindHigh; - } else if (AddrIsInLowShadow(addr)) { - *shadow_kind = kShadowKindLow; - } else { - CHECK(0 && "Address is not in memory and not in shadow?"); - return false; - } - return true; -} - -bool DescribeAddressIfShadow(uptr addr) { - ShadowAddressDescription descr; - if (!GetShadowAddressInformation(addr, &descr)) return false; - descr.Print(); - return true; -} - -bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) { - if (AddrIsInMem(addr)) return false; - ShadowKind shadow_kind; - if (!GetShadowKind(addr, &shadow_kind)) return false; - if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr; - descr->addr = addr; - descr->kind = shadow_kind; - return true; -} - -// Heap descriptions -static void GetAccessToHeapChunkInformation(ChunkAccess *descr, - AsanChunkView chunk, uptr addr, - uptr access_size) { - descr->bad_addr = addr; - if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) { - descr->access_type = kAccessTypeLeft; - } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) { - descr->access_type = kAccessTypeRight; - if (descr->offset < 0) { - descr->bad_addr -= descr->offset; - descr->offset = 0; - } - } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) { - descr->access_type = kAccessTypeInside; - } else { - descr->access_type = kAccessTypeUnknown; - } - descr->chunk_begin = chunk.Beg(); - descr->chunk_size = chunk.UsedSize(); - descr->user_requested_alignment = chunk.UserRequestedAlignment(); - descr->alloc_type = chunk.GetAllocType(); -} - -static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) { - Decorator d; - InternalScopedString str(4096); - str.append("%s", d.Location()); - switch (descr.access_type) { - case kAccessTypeLeft: - str.append("%p is located %zd bytes to the left of", - (void *)descr.bad_addr, descr.offset); - break; - case kAccessTypeRight: - str.append("%p is located %zd bytes to the right of", - (void *)descr.bad_addr, descr.offset); - break; - case kAccessTypeInside: - str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr, - descr.offset); - break; - case kAccessTypeUnknown: - str.append( - "%p is located somewhere around (this is AddressSanitizer bug!)", - (void *)descr.bad_addr); - } - str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size, - (void *)descr.chunk_begin, - (void *)(descr.chunk_begin + descr.chunk_size)); - str.append("%s", d.Default()); - Printf("%s", str.data()); -} - -bool GetHeapAddressInformation(uptr addr, uptr access_size, - HeapAddressDescription *descr) { - AsanChunkView chunk = FindHeapChunkByAddress(addr); - if (!chunk.IsValid()) { - return false; - } - descr->addr = addr; - GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr, - access_size); - CHECK_NE(chunk.AllocTid(), kInvalidTid); - descr->alloc_tid = chunk.AllocTid(); - descr->alloc_stack_id = chunk.GetAllocStackId(); - descr->free_tid = chunk.FreeTid(); - if (descr->free_tid != kInvalidTid) - descr->free_stack_id = chunk.GetFreeStackId(); - return true; -} - -static StackTrace GetStackTraceFromId(u32 id) { - CHECK(id); - StackTrace res = StackDepotGet(id); - CHECK(res.trace); - return res; -} - -bool DescribeAddressIfHeap(uptr addr, uptr access_size) { - HeapAddressDescription descr; - if (!GetHeapAddressInformation(addr, access_size, &descr)) { - Printf( - "AddressSanitizer can not describe address in more detail " - "(wild memory access suspected).\n"); - return false; - } - descr.Print(); - return true; -} - -// Stack descriptions -bool GetStackAddressInformation(uptr addr, uptr access_size, - StackAddressDescription *descr) { - AsanThread *t = FindThreadByStackAddress(addr); - if (!t) return false; - - descr->addr = addr; - descr->tid = t->tid(); - // Try to fetch precise stack frame for this access. - AsanThread::StackFrameAccess access; - if (!t->GetStackFrameAccessByAddr(addr, &access)) { - descr->frame_descr = nullptr; - return true; - } - - descr->offset = access.offset; - descr->access_size = access_size; - descr->frame_pc = access.frame_pc; - descr->frame_descr = access.frame_descr; - -#if SANITIZER_PPC64V1 - // On PowerPC64 ELFv1, the address of a function actually points to a - // three-doubleword data structure with the first field containing - // the address of the function's code. - descr->frame_pc = *reinterpret_cast(descr->frame_pc); -#endif - descr->frame_pc += 16; - - return true; -} - -static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, - uptr access_size, uptr prev_var_end, - uptr next_var_beg) { - uptr var_end = var.beg + var.size; - uptr addr_end = addr + access_size; - const char *pos_descr = nullptr; - // If the variable [var.beg, var_end) is the nearest variable to the - // current memory access, indicate it in the log. - if (addr >= var.beg) { - if (addr_end <= var_end) - pos_descr = "is inside"; // May happen if this is a use-after-return. - else if (addr < var_end) - pos_descr = "partially overflows"; - else if (addr_end <= next_var_beg && - next_var_beg - addr_end >= addr - var_end) - pos_descr = "overflows"; - } else { - if (addr_end > var.beg) - pos_descr = "partially underflows"; - else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end) - pos_descr = "underflows"; - } - InternalScopedString str(1024); - str.append(" [%zd, %zd)", var.beg, var_end); - // Render variable name. - str.append(" '"); - for (uptr i = 0; i < var.name_len; ++i) { - str.append("%c", var.name_pos[i]); - } - str.append("'"); - if (var.line > 0) { - str.append(" (line %d)", var.line); - } - if (pos_descr) { - Decorator d; - // FIXME: we may want to also print the size of the access here, - // but in case of accesses generated by memset it may be confusing. - str.append("%s <== Memory access at offset %zd %s this variable%s\n", - d.Location(), addr, pos_descr, d.Default()); - } else { - str.append("\n"); - } - Printf("%s", str.data()); -} - -bool DescribeAddressIfStack(uptr addr, uptr access_size) { - StackAddressDescription descr; - if (!GetStackAddressInformation(addr, access_size, &descr)) return false; - descr.Print(); - return true; -} - -// Global descriptions -static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, - const __asan_global &g) { - InternalScopedString str(4096); - Decorator d; - str.append("%s", d.Location()); - if (addr < g.beg) { - str.append("%p is located %zd bytes to the left", (void *)addr, - g.beg - addr); - } else if (addr + access_size > g.beg + g.size) { - if (addr < g.beg + g.size) addr = g.beg + g.size; - str.append("%p is located %zd bytes to the right", (void *)addr, - addr - (g.beg + g.size)); - } else { - // Can it happen? - str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg); - } - str.append(" of global variable '%s' defined in '", - MaybeDemangleGlobalName(g.name)); - PrintGlobalLocation(&str, g); - str.append("' (0x%zx) of size %zu\n", g.beg, g.size); - str.append("%s", d.Default()); - PrintGlobalNameIfASCII(&str, g); - Printf("%s", str.data()); -} - -bool GetGlobalAddressInformation(uptr addr, uptr access_size, - GlobalAddressDescription *descr) { - descr->addr = addr; - int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites, - ARRAY_SIZE(descr->globals)); - descr->size = globals_num; - descr->access_size = access_size; - return globals_num != 0; -} - -bool DescribeAddressIfGlobal(uptr addr, uptr access_size, - const char *bug_type) { - GlobalAddressDescription descr; - if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false; - - descr.Print(bug_type); - return true; -} - -void ShadowAddressDescription::Print() const { - Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]); -} - -void GlobalAddressDescription::Print(const char *bug_type) const { - for (int i = 0; i < size; i++) { - DescribeAddressRelativeToGlobal(addr, access_size, globals[i]); - if (bug_type && - 0 == internal_strcmp(bug_type, "initialization-order-fiasco") && - reg_sites[i]) { - Printf(" registered at:\n"); - StackDepotGet(reg_sites[i]).Print(); - } - } -} - -bool GlobalAddressDescription::PointsInsideTheSameVariable( - const GlobalAddressDescription &other) const { - if (size == 0 || other.size == 0) return false; - - for (uptr i = 0; i < size; i++) { - const __asan_global &a = globals[i]; - for (uptr j = 0; j < other.size; j++) { - const __asan_global &b = other.globals[j]; - if (a.beg == b.beg && - a.beg <= addr && - b.beg <= other.addr && - (addr + access_size) < (a.beg + a.size) && - (other.addr + other.access_size) < (b.beg + b.size)) - return true; - } - } - - return false; -} - -void StackAddressDescription::Print() const { - Decorator d; - Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread %s", addr, - AsanThreadIdAndName(tid).c_str()); - - if (!frame_descr) { - Printf("%s\n", d.Default()); - return; - } - Printf(" at offset %zu in frame%s\n", offset, d.Default()); - - // Now we print the frame where the alloca has happened. - // We print this frame as a stack trace with one element. - // The symbolizer may print more than one frame if inlining was involved. - // The frame numbers may be different than those in the stack trace printed - // previously. That's unfortunate, but I have no better solution, - // especially given that the alloca may be from entirely different place - // (e.g. use-after-scope, or different thread's stack). - Printf("%s", d.Default()); - StackTrace alloca_stack(&frame_pc, 1); - alloca_stack.Print(); - - InternalMmapVector vars; - vars.reserve(16); - if (!ParseFrameDescription(frame_descr, &vars)) { - Printf( - "AddressSanitizer can't parse the stack frame " - "descriptor: |%s|\n", - frame_descr); - // 'addr' is a stack address, so return true even if we can't parse frame - return; - } - uptr n_objects = vars.size(); - // Report the number of stack objects. - Printf(" This frame has %zu object(s):\n", n_objects); - - // Report all objects in this frame. - for (uptr i = 0; i < n_objects; i++) { - uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0; - uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL); - PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end, - next_var_beg); - } - Printf( - "HINT: this may be a false positive if your program uses " - "some custom stack unwind mechanism, swapcontext or vfork\n"); - if (SANITIZER_WINDOWS) - Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); - else - Printf(" (longjmp and C++ exceptions *are* supported)\n"); - - DescribeThread(GetThreadContextByTidLocked(tid)); -} - -void HeapAddressDescription::Print() const { - PrintHeapChunkAccess(addr, chunk_access); - - asanThreadRegistry().CheckLocked(); - AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid); - StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id); - - Decorator d; - AsanThreadContext *free_thread = nullptr; - if (free_tid != kInvalidTid) { - free_thread = GetThreadContextByTidLocked(free_tid); - Printf("%sfreed by thread %s here:%s\n", d.Allocation(), - AsanThreadIdAndName(free_thread).c_str(), d.Default()); - StackTrace free_stack = GetStackTraceFromId(free_stack_id); - free_stack.Print(); - Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(), - AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); - } else { - Printf("%sallocated by thread %s here:%s\n", d.Allocation(), - AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); - } - alloc_stack.Print(); - DescribeThread(GetCurrentThread()); - if (free_thread) DescribeThread(free_thread); - DescribeThread(alloc_thread); -} - -AddressDescription::AddressDescription(uptr addr, uptr access_size, - bool shouldLockThreadRegistry) { - if (GetShadowAddressInformation(addr, &data.shadow)) { - data.kind = kAddressKindShadow; - return; - } - if (GetHeapAddressInformation(addr, access_size, &data.heap)) { - data.kind = kAddressKindHeap; - return; - } - - bool isStackMemory = false; - if (shouldLockThreadRegistry) { - ThreadRegistryLock l(&asanThreadRegistry()); - isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); - } else { - isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); - } - if (isStackMemory) { - data.kind = kAddressKindStack; - return; - } - - if (GetGlobalAddressInformation(addr, access_size, &data.global)) { - data.kind = kAddressKindGlobal; - return; - } - data.kind = kAddressKindWild; - addr = 0; -} - -void PrintAddressDescription(uptr addr, uptr access_size, - const char *bug_type) { - ShadowAddressDescription shadow_descr; - if (GetShadowAddressInformation(addr, &shadow_descr)) { - shadow_descr.Print(); - return; - } - - GlobalAddressDescription global_descr; - if (GetGlobalAddressInformation(addr, access_size, &global_descr)) { - global_descr.Print(bug_type); - return; - } - - StackAddressDescription stack_descr; - if (GetStackAddressInformation(addr, access_size, &stack_descr)) { - stack_descr.Print(); - return; - } - - HeapAddressDescription heap_descr; - if (GetHeapAddressInformation(addr, access_size, &heap_descr)) { - heap_descr.Print(); - return; - } - - // We exhausted our possibilities. Bail out. - Printf( - "AddressSanitizer can not describe address in more detail " - "(wild memory access suspected).\n"); -} -} // namespace __asan diff --git a/lib/asan/asan_descriptions.cpp b/lib/asan/asan_descriptions.cpp new file mode 100644 index 000000000000..153c874a4e77 --- /dev/null +++ b/lib/asan/asan_descriptions.cpp @@ -0,0 +1,501 @@ +//===-- asan_descriptions.cpp -----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan functions for getting information about an address and/or printing it. +//===----------------------------------------------------------------------===// + +#include "asan_descriptions.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) { + Init(t->tid, t->name); +} + +AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) { + if (tid == kInvalidTid) { + Init(tid, ""); + } else { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *t = GetThreadContextByTidLocked(tid); + Init(tid, t->name); + } +} + +void AsanThreadIdAndName::Init(u32 tid, const char *tname) { + int len = internal_snprintf(name, sizeof(name), "T%d", tid); + CHECK(((unsigned int)len) < sizeof(name)); + if (tname[0] != '\0') + internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname); +} + +void DescribeThread(AsanThreadContext *context) { + CHECK(context); + asanThreadRegistry().CheckLocked(); + // No need to announce the main thread. + if (context->tid == 0 || context->announced) { + return; + } + context->announced = true; + InternalScopedString str(1024); + str.append("Thread %s", AsanThreadIdAndName(context).c_str()); + if (context->parent_tid == kInvalidTid) { + str.append(" created by unknown thread\n"); + Printf("%s", str.data()); + return; + } + str.append(" created by %s here:\n", + AsanThreadIdAndName(context->parent_tid).c_str()); + Printf("%s", str.data()); + StackDepotGet(context->stack_id).Print(); + // Recursively described parent thread if needed. + if (flags()->print_full_thread_history) { + AsanThreadContext *parent_context = + GetThreadContextByTidLocked(context->parent_tid); + DescribeThread(parent_context); + } +} + +// Shadow descriptions +static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) { + CHECK(!AddrIsInMem(addr)); + if (AddrIsInShadowGap(addr)) { + *shadow_kind = kShadowKindGap; + } else if (AddrIsInHighShadow(addr)) { + *shadow_kind = kShadowKindHigh; + } else if (AddrIsInLowShadow(addr)) { + *shadow_kind = kShadowKindLow; + } else { + CHECK(0 && "Address is not in memory and not in shadow?"); + return false; + } + return true; +} + +bool DescribeAddressIfShadow(uptr addr) { + ShadowAddressDescription descr; + if (!GetShadowAddressInformation(addr, &descr)) return false; + descr.Print(); + return true; +} + +bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) { + if (AddrIsInMem(addr)) return false; + ShadowKind shadow_kind; + if (!GetShadowKind(addr, &shadow_kind)) return false; + if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr; + descr->addr = addr; + descr->kind = shadow_kind; + return true; +} + +// Heap descriptions +static void GetAccessToHeapChunkInformation(ChunkAccess *descr, + AsanChunkView chunk, uptr addr, + uptr access_size) { + descr->bad_addr = addr; + if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeLeft; + } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeRight; + if (descr->offset < 0) { + descr->bad_addr -= descr->offset; + descr->offset = 0; + } + } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) { + descr->access_type = kAccessTypeInside; + } else { + descr->access_type = kAccessTypeUnknown; + } + descr->chunk_begin = chunk.Beg(); + descr->chunk_size = chunk.UsedSize(); + descr->user_requested_alignment = chunk.UserRequestedAlignment(); + descr->alloc_type = chunk.GetAllocType(); +} + +static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) { + Decorator d; + InternalScopedString str(4096); + str.append("%s", d.Location()); + switch (descr.access_type) { + case kAccessTypeLeft: + str.append("%p is located %zd bytes to the left of", + (void *)descr.bad_addr, descr.offset); + break; + case kAccessTypeRight: + str.append("%p is located %zd bytes to the right of", + (void *)descr.bad_addr, descr.offset); + break; + case kAccessTypeInside: + str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr, + descr.offset); + break; + case kAccessTypeUnknown: + str.append( + "%p is located somewhere around (this is AddressSanitizer bug!)", + (void *)descr.bad_addr); + } + str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size, + (void *)descr.chunk_begin, + (void *)(descr.chunk_begin + descr.chunk_size)); + str.append("%s", d.Default()); + Printf("%s", str.data()); +} + +bool GetHeapAddressInformation(uptr addr, uptr access_size, + HeapAddressDescription *descr) { + AsanChunkView chunk = FindHeapChunkByAddress(addr); + if (!chunk.IsValid()) { + return false; + } + descr->addr = addr; + GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr, + access_size); + CHECK_NE(chunk.AllocTid(), kInvalidTid); + descr->alloc_tid = chunk.AllocTid(); + descr->alloc_stack_id = chunk.GetAllocStackId(); + descr->free_tid = chunk.FreeTid(); + if (descr->free_tid != kInvalidTid) + descr->free_stack_id = chunk.GetFreeStackId(); + return true; +} + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +bool DescribeAddressIfHeap(uptr addr, uptr access_size) { + HeapAddressDescription descr; + if (!GetHeapAddressInformation(addr, access_size, &descr)) { + Printf( + "AddressSanitizer can not describe address in more detail " + "(wild memory access suspected).\n"); + return false; + } + descr.Print(); + return true; +} + +// Stack descriptions +bool GetStackAddressInformation(uptr addr, uptr access_size, + StackAddressDescription *descr) { + AsanThread *t = FindThreadByStackAddress(addr); + if (!t) return false; + + descr->addr = addr; + descr->tid = t->tid(); + // Try to fetch precise stack frame for this access. + AsanThread::StackFrameAccess access; + if (!t->GetStackFrameAccessByAddr(addr, &access)) { + descr->frame_descr = nullptr; + return true; + } + + descr->offset = access.offset; + descr->access_size = access_size; + descr->frame_pc = access.frame_pc; + descr->frame_descr = access.frame_descr; + +#if SANITIZER_PPC64V1 + // On PowerPC64 ELFv1, the address of a function actually points to a + // three-doubleword data structure with the first field containing + // the address of the function's code. + descr->frame_pc = *reinterpret_cast(descr->frame_pc); +#endif + descr->frame_pc += 16; + + return true; +} + +static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, + uptr access_size, uptr prev_var_end, + uptr next_var_beg) { + uptr var_end = var.beg + var.size; + uptr addr_end = addr + access_size; + const char *pos_descr = nullptr; + // If the variable [var.beg, var_end) is the nearest variable to the + // current memory access, indicate it in the log. + if (addr >= var.beg) { + if (addr_end <= var_end) + pos_descr = "is inside"; // May happen if this is a use-after-return. + else if (addr < var_end) + pos_descr = "partially overflows"; + else if (addr_end <= next_var_beg && + next_var_beg - addr_end >= addr - var_end) + pos_descr = "overflows"; + } else { + if (addr_end > var.beg) + pos_descr = "partially underflows"; + else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end) + pos_descr = "underflows"; + } + InternalScopedString str(1024); + str.append(" [%zd, %zd)", var.beg, var_end); + // Render variable name. + str.append(" '"); + for (uptr i = 0; i < var.name_len; ++i) { + str.append("%c", var.name_pos[i]); + } + str.append("'"); + if (var.line > 0) { + str.append(" (line %d)", var.line); + } + if (pos_descr) { + Decorator d; + // FIXME: we may want to also print the size of the access here, + // but in case of accesses generated by memset it may be confusing. + str.append("%s <== Memory access at offset %zd %s this variable%s\n", + d.Location(), addr, pos_descr, d.Default()); + } else { + str.append("\n"); + } + Printf("%s", str.data()); +} + +bool DescribeAddressIfStack(uptr addr, uptr access_size) { + StackAddressDescription descr; + if (!GetStackAddressInformation(addr, access_size, &descr)) return false; + descr.Print(); + return true; +} + +// Global descriptions +static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, + const __asan_global &g) { + InternalScopedString str(4096); + Decorator d; + str.append("%s", d.Location()); + if (addr < g.beg) { + str.append("%p is located %zd bytes to the left", (void *)addr, + g.beg - addr); + } else if (addr + access_size > g.beg + g.size) { + if (addr < g.beg + g.size) addr = g.beg + g.size; + str.append("%p is located %zd bytes to the right", (void *)addr, + addr - (g.beg + g.size)); + } else { + // Can it happen? + str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg); + } + str.append(" of global variable '%s' defined in '", + MaybeDemangleGlobalName(g.name)); + PrintGlobalLocation(&str, g); + str.append("' (0x%zx) of size %zu\n", g.beg, g.size); + str.append("%s", d.Default()); + PrintGlobalNameIfASCII(&str, g); + Printf("%s", str.data()); +} + +bool GetGlobalAddressInformation(uptr addr, uptr access_size, + GlobalAddressDescription *descr) { + descr->addr = addr; + int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites, + ARRAY_SIZE(descr->globals)); + descr->size = globals_num; + descr->access_size = access_size; + return globals_num != 0; +} + +bool DescribeAddressIfGlobal(uptr addr, uptr access_size, + const char *bug_type) { + GlobalAddressDescription descr; + if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false; + + descr.Print(bug_type); + return true; +} + +void ShadowAddressDescription::Print() const { + Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]); +} + +void GlobalAddressDescription::Print(const char *bug_type) const { + for (int i = 0; i < size; i++) { + DescribeAddressRelativeToGlobal(addr, access_size, globals[i]); + if (bug_type && + 0 == internal_strcmp(bug_type, "initialization-order-fiasco") && + reg_sites[i]) { + Printf(" registered at:\n"); + StackDepotGet(reg_sites[i]).Print(); + } + } +} + +bool GlobalAddressDescription::PointsInsideTheSameVariable( + const GlobalAddressDescription &other) const { + if (size == 0 || other.size == 0) return false; + + for (uptr i = 0; i < size; i++) { + const __asan_global &a = globals[i]; + for (uptr j = 0; j < other.size; j++) { + const __asan_global &b = other.globals[j]; + if (a.beg == b.beg && + a.beg <= addr && + b.beg <= other.addr && + (addr + access_size) < (a.beg + a.size) && + (other.addr + other.access_size) < (b.beg + b.size)) + return true; + } + } + + return false; +} + +void StackAddressDescription::Print() const { + Decorator d; + Printf("%s", d.Location()); + Printf("Address %p is located in stack of thread %s", addr, + AsanThreadIdAndName(tid).c_str()); + + if (!frame_descr) { + Printf("%s\n", d.Default()); + return; + } + Printf(" at offset %zu in frame%s\n", offset, d.Default()); + + // Now we print the frame where the alloca has happened. + // We print this frame as a stack trace with one element. + // The symbolizer may print more than one frame if inlining was involved. + // The frame numbers may be different than those in the stack trace printed + // previously. That's unfortunate, but I have no better solution, + // especially given that the alloca may be from entirely different place + // (e.g. use-after-scope, or different thread's stack). + Printf("%s", d.Default()); + StackTrace alloca_stack(&frame_pc, 1); + alloca_stack.Print(); + + InternalMmapVector vars; + vars.reserve(16); + if (!ParseFrameDescription(frame_descr, &vars)) { + Printf( + "AddressSanitizer can't parse the stack frame " + "descriptor: |%s|\n", + frame_descr); + // 'addr' is a stack address, so return true even if we can't parse frame + return; + } + uptr n_objects = vars.size(); + // Report the number of stack objects. + Printf(" This frame has %zu object(s):\n", n_objects); + + // Report all objects in this frame. + for (uptr i = 0; i < n_objects; i++) { + uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0; + uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL); + PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end, + next_var_beg); + } + Printf( + "HINT: this may be a false positive if your program uses " + "some custom stack unwind mechanism, swapcontext or vfork\n"); + if (SANITIZER_WINDOWS) + Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); + else + Printf(" (longjmp and C++ exceptions *are* supported)\n"); + + DescribeThread(GetThreadContextByTidLocked(tid)); +} + +void HeapAddressDescription::Print() const { + PrintHeapChunkAccess(addr, chunk_access); + + asanThreadRegistry().CheckLocked(); + AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid); + StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id); + + Decorator d; + AsanThreadContext *free_thread = nullptr; + if (free_tid != kInvalidTid) { + free_thread = GetThreadContextByTidLocked(free_tid); + Printf("%sfreed by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(free_thread).c_str(), d.Default()); + StackTrace free_stack = GetStackTraceFromId(free_stack_id); + free_stack.Print(); + Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); + } else { + Printf("%sallocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); + } + alloc_stack.Print(); + DescribeThread(GetCurrentThread()); + if (free_thread) DescribeThread(free_thread); + DescribeThread(alloc_thread); +} + +AddressDescription::AddressDescription(uptr addr, uptr access_size, + bool shouldLockThreadRegistry) { + if (GetShadowAddressInformation(addr, &data.shadow)) { + data.kind = kAddressKindShadow; + return; + } + if (GetHeapAddressInformation(addr, access_size, &data.heap)) { + data.kind = kAddressKindHeap; + return; + } + + bool isStackMemory = false; + if (shouldLockThreadRegistry) { + ThreadRegistryLock l(&asanThreadRegistry()); + isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); + } else { + isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack); + } + if (isStackMemory) { + data.kind = kAddressKindStack; + return; + } + + if (GetGlobalAddressInformation(addr, access_size, &data.global)) { + data.kind = kAddressKindGlobal; + return; + } + data.kind = kAddressKindWild; + addr = 0; +} + +void PrintAddressDescription(uptr addr, uptr access_size, + const char *bug_type) { + ShadowAddressDescription shadow_descr; + if (GetShadowAddressInformation(addr, &shadow_descr)) { + shadow_descr.Print(); + return; + } + + GlobalAddressDescription global_descr; + if (GetGlobalAddressInformation(addr, access_size, &global_descr)) { + global_descr.Print(bug_type); + return; + } + + StackAddressDescription stack_descr; + if (GetStackAddressInformation(addr, access_size, &stack_descr)) { + stack_descr.Print(); + return; + } + + HeapAddressDescription heap_descr; + if (GetHeapAddressInformation(addr, access_size, &heap_descr)) { + heap_descr.Print(); + return; + } + + // We exhausted our possibilities. Bail out. + Printf( + "AddressSanitizer can not describe address in more detail " + "(wild memory access suspected).\n"); +} +} // namespace __asan diff --git a/lib/asan/asan_descriptions.h b/lib/asan/asan_descriptions.h index 0226d844afc9..ee0e2061559e 100644 --- a/lib/asan/asan_descriptions.h +++ b/lib/asan/asan_descriptions.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_descriptions.cc. +// ASan-private header for asan_descriptions.cpp. // TODO(filcab): Most struct definitions should move to the interface headers. //===----------------------------------------------------------------------===// #ifndef ASAN_DESCRIPTIONS_H @@ -203,7 +203,7 @@ class AddressDescription { AddressDescription() = default; // shouldLockThreadRegistry allows us to skip locking if we're sure we already // have done it. - AddressDescription(uptr addr, bool shouldLockThreadRegistry = true) + explicit AddressDescription(uptr addr, bool shouldLockThreadRegistry = true) : AddressDescription(addr, 1, shouldLockThreadRegistry) {} AddressDescription(uptr addr, uptr access_size, bool shouldLockThreadRegistry = true); diff --git a/lib/asan/asan_errors.cc b/lib/asan/asan_errors.cc deleted file mode 100644 index d598e37b940e..000000000000 --- a/lib/asan/asan_errors.cc +++ /dev/null @@ -1,597 +0,0 @@ -//===-- asan_errors.cc ------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan implementation for error structures. -//===----------------------------------------------------------------------===// - -#include "asan_errors.h" -#include "asan_descriptions.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_stackdepot.h" - -namespace __asan { - -static void OnStackUnwind(const SignalContext &sig, - const void *callback_context, - BufferedStackTrace *stack) { - bool fast = common_flags()->fast_unwind_on_fatal; -#if SANITIZER_FREEBSD || SANITIZER_NETBSD - // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace() - // yields the call stack of the signal's handler and not of the code - // that raised the signal (as it does on Linux). - fast = true; -#endif - // Tests and maybe some users expect that scariness is going to be printed - // just before the stack. As only asan has scariness score we have no - // corresponding code in the sanitizer_common and we use this callback to - // print it. - static_cast(callback_context)->Print(); - stack->Unwind(sig.pc, sig.bp, sig.context, fast); -} - -void ErrorDeadlySignal::Print() { - ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness); -} - -void ErrorDoubleFree::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n", - scariness.GetDescription(), addr_description.addr, - AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - scariness.Print(); - GET_STACK_TRACE_FATAL(second_free_stack->trace[0], - second_free_stack->top_frame_bp); - stack.Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), &stack); -} - -void ErrorNewDeleteTypeMismatch::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: %s on %p in thread %s:\n", - scariness.GetDescription(), addr_description.addr, - AsanThreadIdAndName(tid).c_str()); - Printf("%s object passed to delete has wrong type:\n", d.Default()); - if (delete_size != 0) { - Printf( - " size of the allocated type: %zd bytes;\n" - " size of the deallocated type: %zd bytes.\n", - addr_description.chunk_access.chunk_size, delete_size); - } - const uptr user_alignment = - addr_description.chunk_access.user_requested_alignment; - if (delete_alignment != user_alignment) { - char user_alignment_str[32]; - char delete_alignment_str[32]; - internal_snprintf(user_alignment_str, sizeof(user_alignment_str), - "%zd bytes", user_alignment); - internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str), - "%zd bytes", delete_alignment); - static const char *kDefaultAlignment = "default-aligned"; - Printf( - " alignment of the allocated type: %s;\n" - " alignment of the deallocated type: %s.\n", - user_alignment > 0 ? user_alignment_str : kDefaultAlignment, - delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment); - } - CHECK_GT(free_stack->size, 0); - scariness.Print(); - GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); - stack.Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), &stack); - Report( - "HINT: if you don't care about these errors you may set " - "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); -} - -void ErrorFreeNotMalloced::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: attempting free on address " - "which was not malloc()-ed: %p in thread %s\n", - addr_description.Address(), AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - CHECK_GT(free_stack->size, 0); - scariness.Print(); - GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); - stack.Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), &stack); -} - -void ErrorAllocTypeMismatch::Print() { - static const char *alloc_names[] = {"INVALID", "malloc", "operator new", - "operator new []"}; - static const char *dealloc_names[] = {"INVALID", "free", "operator delete", - "operator delete []"}; - CHECK_NE(alloc_type, dealloc_type); - Decorator d; - Printf("%s", d.Error()); - Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n", - scariness.GetDescription(), alloc_names[alloc_type], - dealloc_names[dealloc_type], addr_description.Address()); - Printf("%s", d.Default()); - CHECK_GT(dealloc_stack->size, 0); - scariness.Print(); - GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp); - stack.Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), &stack); - Report( - "HINT: if you don't care about these errors you may set " - "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); -} - -void ErrorMallocUsableSizeNotOwned::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for " - "pointer which is not owned: %p\n", - addr_description.Address()); - Printf("%s", d.Default()); - stack->Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: attempting to call " - "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n", - addr_description.Address()); - Printf("%s", d.Default()); - stack->Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorCallocOverflow::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: calloc parameters overflow: count * size " - "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", - count, size, AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorReallocArrayOverflow::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: reallocarray parameters overflow: count * size " - "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", - count, size, AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorPvallocOverflow::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx " - "rounded up to system page size 0x%zx cannot be represented in type " - "size_t (thread %s)\n", - size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorInvalidAllocationAlignment::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: invalid allocation alignment: %zd, " - "alignment must be a power of two (thread %s)\n", - alignment, AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorInvalidAlignedAllocAlignment::Print() { - Decorator d; - Printf("%s", d.Error()); -#if SANITIZER_POSIX - Report("ERROR: AddressSanitizer: invalid alignment requested in " - "aligned_alloc: %zd, alignment must be a power of two and the " - "requested size 0x%zx must be a multiple of alignment " - "(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str()); -#else - Report("ERROR: AddressSanitizer: invalid alignment requested in " - "aligned_alloc: %zd, the requested size 0x%zx must be a multiple of " - "alignment (thread %s)\n", alignment, size, - AsanThreadIdAndName(tid).c_str()); -#endif - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorInvalidPosixMemalignAlignment::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: " - "%zd, alignment must be a power of two and a multiple of sizeof(void*) " - "== %zd (thread %s)\n", - alignment, sizeof(void*), AsanThreadIdAndName(tid).c_str()); // NOLINT - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorAllocationSizeTooBig::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after " - "adjustments for alignment, red zones etc.) exceeds maximum supported " - "size of 0x%zx (thread %s)\n", - user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str()); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorRssLimitExceeded::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to " - "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorOutOfMemory::Print() { - Decorator d; - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: allocator is out of memory trying to allocate " - "0x%zx bytes\n", requested_size); - Printf("%s", d.Default()); - stack->Print(); - PrintHintAllocatorCannotReturnNull(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorStringFunctionMemoryRangesOverlap::Print() { - Decorator d; - char bug_type[100]; - internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); - Printf("%s", d.Error()); - Report( - "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) " - "overlap\n", - bug_type, addr1_description.Address(), - addr1_description.Address() + length1, addr2_description.Address(), - addr2_description.Address() + length2); - Printf("%s", d.Default()); - scariness.Print(); - stack->Print(); - addr1_description.Print(); - addr2_description.Print(); - ReportErrorSummary(bug_type, stack); -} - -void ErrorStringFunctionSizeOverflow::Print() { - Decorator d; - Printf("%s", d.Error()); - Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", - scariness.GetDescription(), size); - Printf("%s", d.Default()); - scariness.Print(); - stack->Print(); - addr_description.Print(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorBadParamsToAnnotateContiguousContainer::Print() { - Report( - "ERROR: AddressSanitizer: bad parameters to " - "__sanitizer_annotate_contiguous_container:\n" - " beg : %p\n" - " end : %p\n" - " old_mid : %p\n" - " new_mid : %p\n", - beg, end, old_mid, new_mid); - uptr granularity = SHADOW_GRANULARITY; - if (!IsAligned(beg, granularity)) - Report("ERROR: beg is not aligned by %d\n", granularity); - stack->Print(); - ReportErrorSummary(scariness.GetDescription(), stack); -} - -void ErrorODRViolation::Print() { - Decorator d; - Printf("%s", d.Error()); - Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), - global1.beg); - Printf("%s", d.Default()); - InternalScopedString g1_loc(256), g2_loc(256); - PrintGlobalLocation(&g1_loc, global1); - PrintGlobalLocation(&g2_loc, global2); - Printf(" [1] size=%zd '%s' %s\n", global1.size, - MaybeDemangleGlobalName(global1.name), g1_loc.data()); - Printf(" [2] size=%zd '%s' %s\n", global2.size, - MaybeDemangleGlobalName(global2.name), g2_loc.data()); - if (stack_id1 && stack_id2) { - Printf("These globals were registered at these points:\n"); - Printf(" [1]:\n"); - StackDepotGet(stack_id1).Print(); - Printf(" [2]:\n"); - StackDepotGet(stack_id2).Print(); - } - Report( - "HINT: if you don't care about these errors you may set " - "ASAN_OPTIONS=detect_odr_violation=0\n"); - InternalScopedString error_msg(256); - error_msg.append("%s: global '%s' at %s", scariness.GetDescription(), - MaybeDemangleGlobalName(global1.name), g1_loc.data()); - ReportErrorSummary(error_msg.data()); -} - -void ErrorInvalidPointerPair::Print() { - Decorator d; - Printf("%s", d.Error()); - Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(), - addr1_description.Address(), addr2_description.Address()); - Printf("%s", d.Default()); - GET_STACK_TRACE_FATAL(pc, bp); - stack.Print(); - addr1_description.Print(); - addr2_description.Print(); - ReportErrorSummary(scariness.GetDescription(), &stack); -} - -static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) { - return s[-1] > 127 && s[1] > 127; -} - -ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, - bool is_write_, uptr access_size_) - : ErrorBase(tid), - addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false), - pc(pc_), - bp(bp_), - sp(sp_), - access_size(access_size_), - is_write(is_write_), - shadow_val(0) { - scariness.Clear(); - if (access_size) { - if (access_size <= 9) { - char desr[] = "?-byte"; - desr[0] = '0' + access_size; - scariness.Scare(access_size + access_size / 2, desr); - } else if (access_size >= 10) { - scariness.Scare(15, "multi-byte"); - } - is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read"); - - // Determine the error type. - bug_descr = "unknown-crash"; - if (AddrIsInMem(addr)) { - u8 *shadow_addr = (u8 *)MemToShadow(addr); - // If we are accessing 16 bytes, look at the second shadow byte. - if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++; - // If we are in the partial right redzone, look at the next shadow byte. - if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++; - bool far_from_bounds = false; - shadow_val = *shadow_addr; - int bug_type_score = 0; - // For use-after-frees reads are almost as bad as writes. - int read_after_free_bonus = 0; - switch (shadow_val) { - case kAsanHeapLeftRedzoneMagic: - case kAsanArrayCookieMagic: - bug_descr = "heap-buffer-overflow"; - bug_type_score = 10; - far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); - break; - case kAsanHeapFreeMagic: - bug_descr = "heap-use-after-free"; - bug_type_score = 20; - if (!is_write) read_after_free_bonus = 18; - break; - case kAsanStackLeftRedzoneMagic: - bug_descr = "stack-buffer-underflow"; - bug_type_score = 25; - far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); - break; - case kAsanInitializationOrderMagic: - bug_descr = "initialization-order-fiasco"; - bug_type_score = 1; - break; - case kAsanStackMidRedzoneMagic: - case kAsanStackRightRedzoneMagic: - bug_descr = "stack-buffer-overflow"; - bug_type_score = 25; - far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); - break; - case kAsanStackAfterReturnMagic: - bug_descr = "stack-use-after-return"; - bug_type_score = 30; - if (!is_write) read_after_free_bonus = 18; - break; - case kAsanUserPoisonedMemoryMagic: - bug_descr = "use-after-poison"; - bug_type_score = 20; - break; - case kAsanContiguousContainerOOBMagic: - bug_descr = "container-overflow"; - bug_type_score = 10; - break; - case kAsanStackUseAfterScopeMagic: - bug_descr = "stack-use-after-scope"; - bug_type_score = 10; - break; - case kAsanGlobalRedzoneMagic: - bug_descr = "global-buffer-overflow"; - bug_type_score = 10; - far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); - break; - case kAsanIntraObjectRedzone: - bug_descr = "intra-object-overflow"; - bug_type_score = 10; - break; - case kAsanAllocaLeftMagic: - case kAsanAllocaRightMagic: - bug_descr = "dynamic-stack-buffer-overflow"; - bug_type_score = 25; - far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); - break; - } - scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr); - if (far_from_bounds) scariness.Scare(10, "far-from-bounds"); - } - } -} - -static void PrintContainerOverflowHint() { - Printf("HINT: if you don't care about these errors you may set " - "ASAN_OPTIONS=detect_container_overflow=0.\n" - "If you suspect a false positive see also: " - "https://github.com/google/sanitizers/wiki/" - "AddressSanitizerContainerOverflow.\n"); -} - -static void PrintShadowByte(InternalScopedString *str, const char *before, - u8 byte, const char *after = "\n") { - PrintMemoryByte(str, before, byte, /*in_shadow*/true, after); -} - -static void PrintLegend(InternalScopedString *str) { - str->append( - "Shadow byte legend (one shadow byte represents %d " - "application bytes):\n", - (int)SHADOW_GRANULARITY); - PrintShadowByte(str, " Addressable: ", 0); - str->append(" Partially addressable: "); - for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); - str->append("\n"); - PrintShadowByte(str, " Heap left redzone: ", - kAsanHeapLeftRedzoneMagic); - PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic); - PrintShadowByte(str, " Stack left redzone: ", - kAsanStackLeftRedzoneMagic); - PrintShadowByte(str, " Stack mid redzone: ", - kAsanStackMidRedzoneMagic); - PrintShadowByte(str, " Stack right redzone: ", - kAsanStackRightRedzoneMagic); - PrintShadowByte(str, " Stack after return: ", - kAsanStackAfterReturnMagic); - PrintShadowByte(str, " Stack use after scope: ", - kAsanStackUseAfterScopeMagic); - PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic); - PrintShadowByte(str, " Global init order: ", - kAsanInitializationOrderMagic); - PrintShadowByte(str, " Poisoned by user: ", - kAsanUserPoisonedMemoryMagic); - PrintShadowByte(str, " Container overflow: ", - kAsanContiguousContainerOOBMagic); - PrintShadowByte(str, " Array cookie: ", - kAsanArrayCookieMagic); - PrintShadowByte(str, " Intra object redzone: ", - kAsanIntraObjectRedzone); - PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); - PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); - PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); - PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap); -} - -static void PrintShadowBytes(InternalScopedString *str, const char *before, - u8 *bytes, u8 *guilty, uptr n) { - Decorator d; - if (before) str->append("%s%p:", before, bytes); - for (uptr i = 0; i < n; i++) { - u8 *p = bytes + i; - const char *before = - p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " "; - const char *after = p == guilty ? "]" : ""; - PrintShadowByte(str, before, *p, after); - } - str->append("\n"); -} - -static void PrintShadowMemoryForAddress(uptr addr) { - if (!AddrIsInMem(addr)) return; - uptr shadow_addr = MemToShadow(addr); - const uptr n_bytes_per_row = 16; - uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); - InternalScopedString str(4096 * 8); - str.append("Shadow bytes around the buggy address:\n"); - for (int i = -5; i <= 5; i++) { - uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row; - // Skip rows that would be outside the shadow range. This can happen when - // the user address is near the bottom, top, or shadow gap of the address - // space. - if (!AddrIsInShadow(row_shadow_addr)) continue; - const char *prefix = (i == 0) ? "=>" : " "; - PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr, - n_bytes_per_row); - } - if (flags()->print_legend) PrintLegend(&str); - Printf("%s", str.data()); -} - -void ErrorGeneric::Print() { - Decorator d; - Printf("%s", d.Error()); - uptr addr = addr_description.Address(); - Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n", - bug_descr, (void *)addr, pc, bp, sp); - Printf("%s", d.Default()); - - Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(), - access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size, - (void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default()); - - scariness.Print(); - GET_STACK_TRACE_FATAL(pc, bp); - stack.Print(); - - // Pass bug_descr because we have a special case for - // initialization-order-fiasco - addr_description.Print(bug_descr); - if (shadow_val == kAsanContiguousContainerOOBMagic) - PrintContainerOverflowHint(); - ReportErrorSummary(bug_descr, &stack); - PrintShadowMemoryForAddress(addr); -} - -} // namespace __asan diff --git a/lib/asan/asan_errors.cpp b/lib/asan/asan_errors.cpp new file mode 100644 index 000000000000..541c6e0353b5 --- /dev/null +++ b/lib/asan/asan_errors.cpp @@ -0,0 +1,598 @@ +//===-- asan_errors.cpp -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan implementation for error structures. +//===----------------------------------------------------------------------===// + +#include "asan_errors.h" +#include "asan_descriptions.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +static void OnStackUnwind(const SignalContext &sig, + const void *callback_context, + BufferedStackTrace *stack) { + bool fast = common_flags()->fast_unwind_on_fatal; +#if SANITIZER_FREEBSD || SANITIZER_NETBSD + // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace() + // yields the call stack of the signal's handler and not of the code + // that raised the signal (as it does on Linux). + fast = true; +#endif + // Tests and maybe some users expect that scariness is going to be printed + // just before the stack. As only asan has scariness score we have no + // corresponding code in the sanitizer_common and we use this callback to + // print it. + static_cast(callback_context)->Print(); + stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, + fast); +} + +void ErrorDeadlySignal::Print() { + ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness); +} + +void ErrorDoubleFree::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + scariness.Print(); + GET_STACK_TRACE_FATAL(second_free_stack->trace[0], + second_free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +void ErrorNewDeleteTypeMismatch::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); + Printf("%s object passed to delete has wrong type:\n", d.Default()); + if (delete_size != 0) { + Printf( + " size of the allocated type: %zd bytes;\n" + " size of the deallocated type: %zd bytes.\n", + addr_description.chunk_access.chunk_size, delete_size); + } + const uptr user_alignment = + addr_description.chunk_access.user_requested_alignment; + if (delete_alignment != user_alignment) { + char user_alignment_str[32]; + char delete_alignment_str[32]; + internal_snprintf(user_alignment_str, sizeof(user_alignment_str), + "%zd bytes", user_alignment); + internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str), + "%zd bytes", delete_alignment); + static const char *kDefaultAlignment = "default-aligned"; + Printf( + " alignment of the allocated type: %s;\n" + " alignment of the deallocated type: %s.\n", + user_alignment > 0 ? user_alignment_str : kDefaultAlignment, + delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment); + } + CHECK_GT(free_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); +} + +void ErrorFreeNotMalloced::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting free on address " + "which was not malloc()-ed: %p in thread %s\n", + addr_description.Address(), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + CHECK_GT(free_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +void ErrorAllocTypeMismatch::Print() { + static const char *alloc_names[] = {"INVALID", "malloc", "operator new", + "operator new []"}; + static const char *dealloc_names[] = {"INVALID", "free", "operator delete", + "operator delete []"}; + CHECK_NE(alloc_type, dealloc_type); + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n", + scariness.GetDescription(), alloc_names[alloc_type], + dealloc_names[dealloc_type], addr_description.Address()); + Printf("%s", d.Default()); + CHECK_GT(dealloc_stack->size, 0); + scariness.Print(); + GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp); + stack.Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); +} + +void ErrorMallocUsableSizeNotOwned::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for " + "pointer which is not owned: %p\n", + addr_description.Address()); + Printf("%s", d.Default()); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: attempting to call " + "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n", + addr_description.Address()); + Printf("%s", d.Default()); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorCallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: calloc parameters overflow: count * size " + "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", + count, size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorReallocArrayOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: reallocarray parameters overflow: count * size " + "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", + count, size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorPvallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx " + "rounded up to system page size 0x%zx cannot be represented in type " + "size_t (thread %s)\n", + size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAllocationAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid allocation alignment: %zd, " + "alignment must be a power of two (thread %s)\n", + alignment, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAlignedAllocAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); +#if SANITIZER_POSIX + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, alignment must be a power of two and the " + "requested size 0x%zx must be a multiple of alignment " + "(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str()); +#else + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, the requested size 0x%zx must be a multiple of " + "alignment (thread %s)\n", alignment, size, + AsanThreadIdAndName(tid).c_str()); +#endif + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidPosixMemalignAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: " + "%zd, alignment must be a power of two and a multiple of sizeof(void*) " + "== %zd (thread %s)\n", + alignment, sizeof(void *), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorAllocationSizeTooBig::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after " + "adjustments for alignment, red zones etc.) exceeds maximum supported " + "size of 0x%zx (thread %s)\n", + user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorRssLimitExceeded::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to " + "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorOutOfMemory::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: allocator is out of memory trying to allocate " + "0x%zx bytes\n", requested_size); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorStringFunctionMemoryRangesOverlap::Print() { + Decorator d; + char bug_type[100]; + internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) " + "overlap\n", + bug_type, addr1_description.Address(), + addr1_description.Address() + length1, addr2_description.Address(), + addr2_description.Address() + length2); + Printf("%s", d.Default()); + scariness.Print(); + stack->Print(); + addr1_description.Print(); + addr2_description.Print(); + ReportErrorSummary(bug_type, stack); +} + +void ErrorStringFunctionSizeOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", + scariness.GetDescription(), size); + Printf("%s", d.Default()); + scariness.Print(); + stack->Print(); + addr_description.Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorBadParamsToAnnotateContiguousContainer::Print() { + Report( + "ERROR: AddressSanitizer: bad parameters to " + "__sanitizer_annotate_contiguous_container:\n" + " beg : %p\n" + " end : %p\n" + " old_mid : %p\n" + " new_mid : %p\n", + beg, end, old_mid, new_mid); + uptr granularity = SHADOW_GRANULARITY; + if (!IsAligned(beg, granularity)) + Report("ERROR: beg is not aligned by %d\n", granularity); + stack->Print(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorODRViolation::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), + global1.beg); + Printf("%s", d.Default()); + InternalScopedString g1_loc(256), g2_loc(256); + PrintGlobalLocation(&g1_loc, global1); + PrintGlobalLocation(&g2_loc, global2); + Printf(" [1] size=%zd '%s' %s\n", global1.size, + MaybeDemangleGlobalName(global1.name), g1_loc.data()); + Printf(" [2] size=%zd '%s' %s\n", global2.size, + MaybeDemangleGlobalName(global2.name), g2_loc.data()); + if (stack_id1 && stack_id2) { + Printf("These globals were registered at these points:\n"); + Printf(" [1]:\n"); + StackDepotGet(stack_id1).Print(); + Printf(" [2]:\n"); + StackDepotGet(stack_id2).Print(); + } + Report( + "HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=detect_odr_violation=0\n"); + InternalScopedString error_msg(256); + error_msg.append("%s: global '%s' at %s", scariness.GetDescription(), + MaybeDemangleGlobalName(global1.name), g1_loc.data()); + ReportErrorSummary(error_msg.data()); +} + +void ErrorInvalidPointerPair::Print() { + Decorator d; + Printf("%s", d.Error()); + Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(), + addr1_description.Address(), addr2_description.Address()); + Printf("%s", d.Default()); + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + addr1_description.Print(); + addr2_description.Print(); + ReportErrorSummary(scariness.GetDescription(), &stack); +} + +static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) { + return s[-1] > 127 && s[1] > 127; +} + +ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, + bool is_write_, uptr access_size_) + : ErrorBase(tid), + addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false), + pc(pc_), + bp(bp_), + sp(sp_), + access_size(access_size_), + is_write(is_write_), + shadow_val(0) { + scariness.Clear(); + if (access_size) { + if (access_size <= 9) { + char desr[] = "?-byte"; + desr[0] = '0' + access_size; + scariness.Scare(access_size + access_size / 2, desr); + } else if (access_size >= 10) { + scariness.Scare(15, "multi-byte"); + } + is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read"); + + // Determine the error type. + bug_descr = "unknown-crash"; + if (AddrIsInMem(addr)) { + u8 *shadow_addr = (u8 *)MemToShadow(addr); + // If we are accessing 16 bytes, look at the second shadow byte. + if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++; + // If we are in the partial right redzone, look at the next shadow byte. + if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++; + bool far_from_bounds = false; + shadow_val = *shadow_addr; + int bug_type_score = 0; + // For use-after-frees reads are almost as bad as writes. + int read_after_free_bonus = 0; + switch (shadow_val) { + case kAsanHeapLeftRedzoneMagic: + case kAsanArrayCookieMagic: + bug_descr = "heap-buffer-overflow"; + bug_type_score = 10; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanHeapFreeMagic: + bug_descr = "heap-use-after-free"; + bug_type_score = 20; + if (!is_write) read_after_free_bonus = 18; + break; + case kAsanStackLeftRedzoneMagic: + bug_descr = "stack-buffer-underflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanInitializationOrderMagic: + bug_descr = "initialization-order-fiasco"; + bug_type_score = 1; + break; + case kAsanStackMidRedzoneMagic: + case kAsanStackRightRedzoneMagic: + bug_descr = "stack-buffer-overflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanStackAfterReturnMagic: + bug_descr = "stack-use-after-return"; + bug_type_score = 30; + if (!is_write) read_after_free_bonus = 18; + break; + case kAsanUserPoisonedMemoryMagic: + bug_descr = "use-after-poison"; + bug_type_score = 20; + break; + case kAsanContiguousContainerOOBMagic: + bug_descr = "container-overflow"; + bug_type_score = 10; + break; + case kAsanStackUseAfterScopeMagic: + bug_descr = "stack-use-after-scope"; + bug_type_score = 10; + break; + case kAsanGlobalRedzoneMagic: + bug_descr = "global-buffer-overflow"; + bug_type_score = 10; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + case kAsanIntraObjectRedzone: + bug_descr = "intra-object-overflow"; + bug_type_score = 10; + break; + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + bug_descr = "dynamic-stack-buffer-overflow"; + bug_type_score = 25; + far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr); + break; + } + scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr); + if (far_from_bounds) scariness.Scare(10, "far-from-bounds"); + } + } +} + +static void PrintContainerOverflowHint() { + Printf("HINT: if you don't care about these errors you may set " + "ASAN_OPTIONS=detect_container_overflow=0.\n" + "If you suspect a false positive see also: " + "https://github.com/google/sanitizers/wiki/" + "AddressSanitizerContainerOverflow.\n"); +} + +static void PrintShadowByte(InternalScopedString *str, const char *before, + u8 byte, const char *after = "\n") { + PrintMemoryByte(str, before, byte, /*in_shadow*/true, after); +} + +static void PrintLegend(InternalScopedString *str) { + str->append( + "Shadow byte legend (one shadow byte represents %d " + "application bytes):\n", + (int)SHADOW_GRANULARITY); + PrintShadowByte(str, " Addressable: ", 0); + str->append(" Partially addressable: "); + for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " "); + str->append("\n"); + PrintShadowByte(str, " Heap left redzone: ", + kAsanHeapLeftRedzoneMagic); + PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic); + PrintShadowByte(str, " Stack left redzone: ", + kAsanStackLeftRedzoneMagic); + PrintShadowByte(str, " Stack mid redzone: ", + kAsanStackMidRedzoneMagic); + PrintShadowByte(str, " Stack right redzone: ", + kAsanStackRightRedzoneMagic); + PrintShadowByte(str, " Stack after return: ", + kAsanStackAfterReturnMagic); + PrintShadowByte(str, " Stack use after scope: ", + kAsanStackUseAfterScopeMagic); + PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic); + PrintShadowByte(str, " Global init order: ", + kAsanInitializationOrderMagic); + PrintShadowByte(str, " Poisoned by user: ", + kAsanUserPoisonedMemoryMagic); + PrintShadowByte(str, " Container overflow: ", + kAsanContiguousContainerOOBMagic); + PrintShadowByte(str, " Array cookie: ", + kAsanArrayCookieMagic); + PrintShadowByte(str, " Intra object redzone: ", + kAsanIntraObjectRedzone); + PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); + PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); + PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); + PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap); +} + +static void PrintShadowBytes(InternalScopedString *str, const char *before, + u8 *bytes, u8 *guilty, uptr n) { + Decorator d; + if (before) str->append("%s%p:", before, bytes); + for (uptr i = 0; i < n; i++) { + u8 *p = bytes + i; + const char *before = + p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " "; + const char *after = p == guilty ? "]" : ""; + PrintShadowByte(str, before, *p, after); + } + str->append("\n"); +} + +static void PrintShadowMemoryForAddress(uptr addr) { + if (!AddrIsInMem(addr)) return; + uptr shadow_addr = MemToShadow(addr); + const uptr n_bytes_per_row = 16; + uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); + InternalScopedString str(4096 * 8); + str.append("Shadow bytes around the buggy address:\n"); + for (int i = -5; i <= 5; i++) { + uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row; + // Skip rows that would be outside the shadow range. This can happen when + // the user address is near the bottom, top, or shadow gap of the address + // space. + if (!AddrIsInShadow(row_shadow_addr)) continue; + const char *prefix = (i == 0) ? "=>" : " "; + PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr, + n_bytes_per_row); + } + if (flags()->print_legend) PrintLegend(&str); + Printf("%s", str.data()); +} + +void ErrorGeneric::Print() { + Decorator d; + Printf("%s", d.Error()); + uptr addr = addr_description.Address(); + Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n", + bug_descr, (void *)addr, pc, bp, sp); + Printf("%s", d.Default()); + + Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(), + access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size, + (void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default()); + + scariness.Print(); + GET_STACK_TRACE_FATAL(pc, bp); + stack.Print(); + + // Pass bug_descr because we have a special case for + // initialization-order-fiasco + addr_description.Print(bug_descr); + if (shadow_val == kAsanContiguousContainerOOBMagic) + PrintContainerOverflowHint(); + ReportErrorSummary(bug_descr, &stack); + PrintShadowMemoryForAddress(addr); +} + +} // namespace __asan diff --git a/lib/asan/asan_errors.h b/lib/asan/asan_errors.h index b84f56c18535..a7fda2fd9f5d 100644 --- a/lib/asan/asan_errors.h +++ b/lib/asan/asan_errors.h @@ -48,7 +48,8 @@ struct ErrorDeadlySignal : ErrorBase { scariness.Scare(10, "stack-overflow"); } else if (!signal.is_memory_access) { scariness.Scare(10, "signal"); - } else if (signal.addr < GetPageSizeCached()) { + } else if (signal.is_true_faulting_addr && + signal.addr < GetPageSizeCached()) { scariness.Scare(10, "null-deref"); } else if (signal.addr == signal.pc) { scariness.Scare(60, "wild-jump"); diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc deleted file mode 100644 index f8e1ac4b7bfe..000000000000 --- a/lib/asan/asan_fake_stack.cc +++ /dev/null @@ -1,282 +0,0 @@ -//===-- asan_fake_stack.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// FakeStack is used to detect use-after-return bugs. -//===----------------------------------------------------------------------===// - -#include "asan_allocator.h" -#include "asan_poisoning.h" -#include "asan_thread.h" - -namespace __asan { - -static const u64 kMagic1 = kAsanStackAfterReturnMagic; -static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; -static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; -static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; - -static const u64 kAllocaRedzoneSize = 32UL; -static const u64 kAllocaRedzoneMask = 31UL; - -// For small size classes inline PoisonShadow for better performance. -ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { - u64 *shadow = reinterpret_cast(MemToShadow(ptr)); - if (SHADOW_SCALE == 3 && class_id <= 6) { - // This code expects SHADOW_SCALE=3. - for (uptr i = 0; i < (((uptr)1) << class_id); i++) { - shadow[i] = magic; - // Make sure this does not become memset. - SanitizerBreakOptimization(nullptr); - } - } else { - // The size class is too big, it's cheaper to poison only size bytes. - PoisonShadow(ptr, size, static_cast(magic)); - } -} - -FakeStack *FakeStack::Create(uptr stack_size_log) { - static uptr kMinStackSizeLog = 16; - static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); - if (stack_size_log < kMinStackSizeLog) - stack_size_log = kMinStackSizeLog; - if (stack_size_log > kMaxStackSizeLog) - stack_size_log = kMaxStackSizeLog; - uptr size = RequiredSize(stack_size_log); - FakeStack *res = reinterpret_cast( - flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") - : MmapOrDie(size, "FakeStack")); - res->stack_size_log_ = stack_size_log; - u8 *p = reinterpret_cast(res); - VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " - "mmapped %zdK, noreserve=%d \n", - GetCurrentTidOrInvalid(), p, - p + FakeStack::RequiredSize(stack_size_log), stack_size_log, - size >> 10, flags()->uar_noreserve); - return res; -} - -void FakeStack::Destroy(int tid) { - PoisonAll(0); - if (Verbosity() >= 2) { - InternalScopedString str(kNumberOfSizeClasses * 50); - for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) - str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], - NumberOfFrames(stack_size_log(), class_id)); - Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); - } - uptr size = RequiredSize(stack_size_log_); - FlushUnneededASanShadowMemory(reinterpret_cast(this), size); - UnmapOrDie(this, size); -} - -void FakeStack::PoisonAll(u8 magic) { - PoisonShadow(reinterpret_cast(this), RequiredSize(stack_size_log()), - magic); -} - -#if !defined(_MSC_VER) || defined(__clang__) -ALWAYS_INLINE USED -#endif -FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, - uptr real_stack) { - CHECK_LT(class_id, kNumberOfSizeClasses); - if (needs_gc_) - GC(real_stack); - uptr &hint_position = hint_position_[class_id]; - const int num_iter = NumberOfFrames(stack_size_log, class_id); - u8 *flags = GetFlags(stack_size_log, class_id); - for (int i = 0; i < num_iter; i++) { - uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); - // This part is tricky. On one hand, checking and setting flags[pos] - // should be atomic to ensure async-signal safety. But on the other hand, - // if the signal arrives between checking and setting flags[pos], the - // signal handler's fake stack will start from a different hint_position - // and so will not touch this particular byte. So, it is safe to do this - // with regular non-atomic load and store (at least I was not able to make - // this code crash). - if (flags[pos]) continue; - flags[pos] = 1; - FakeFrame *res = reinterpret_cast( - GetFrame(stack_size_log, class_id, pos)); - res->real_stack = real_stack; - *SavedFlagPtr(reinterpret_cast(res), class_id) = &flags[pos]; - return res; - } - return nullptr; // We are out of fake stack. -} - -uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { - uptr stack_size_log = this->stack_size_log(); - uptr beg = reinterpret_cast(GetFrame(stack_size_log, 0, 0)); - uptr end = reinterpret_cast(this) + RequiredSize(stack_size_log); - if (ptr < beg || ptr >= end) return 0; - uptr class_id = (ptr - beg) >> stack_size_log; - uptr base = beg + (class_id << stack_size_log); - CHECK_LE(base, ptr); - CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); - uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); - uptr res = base + pos * BytesInSizeClass(class_id); - *frame_end = res + BytesInSizeClass(class_id); - *frame_beg = res + sizeof(FakeFrame); - return res; -} - -void FakeStack::HandleNoReturn() { - needs_gc_ = true; -} - -// When throw, longjmp or some such happens we don't call OnFree() and -// as the result may leak one or more fake frames, but the good news is that -// we are notified about all such events by HandleNoReturn(). -// If we recently had such no-return event we need to collect garbage frames. -// We do it based on their 'real_stack' values -- everything that is lower -// than the current real_stack is garbage. -NOINLINE void FakeStack::GC(uptr real_stack) { - uptr collected = 0; - for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { - u8 *flags = GetFlags(stack_size_log(), class_id); - for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; - i++) { - if (flags[i] == 0) continue; // not allocated. - FakeFrame *ff = reinterpret_cast( - GetFrame(stack_size_log(), class_id, i)); - if (ff->real_stack < real_stack) { - flags[i] = 0; - collected++; - } - } - } - needs_gc_ = false; -} - -void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { - for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { - u8 *flags = GetFlags(stack_size_log(), class_id); - for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; - i++) { - if (flags[i] == 0) continue; // not allocated. - FakeFrame *ff = reinterpret_cast( - GetFrame(stack_size_log(), class_id, i)); - uptr begin = reinterpret_cast(ff); - callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); - } - } -} - -#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA -static THREADLOCAL FakeStack *fake_stack_tls; - -FakeStack *GetTLSFakeStack() { - return fake_stack_tls; -} -void SetTLSFakeStack(FakeStack *fs) { - fake_stack_tls = fs; -} -#else -FakeStack *GetTLSFakeStack() { return 0; } -void SetTLSFakeStack(FakeStack *fs) { } -#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA - -static FakeStack *GetFakeStack() { - AsanThread *t = GetCurrentThread(); - if (!t) return nullptr; - return t->fake_stack(); -} - -static FakeStack *GetFakeStackFast() { - if (FakeStack *fs = GetTLSFakeStack()) - return fs; - if (!__asan_option_detect_stack_use_after_return) - return nullptr; - return GetFakeStack(); -} - -ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { - FakeStack *fs = GetFakeStackFast(); - if (!fs) return 0; - uptr local_stack; - uptr real_stack = reinterpret_cast(&local_stack); - FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); - if (!ff) return 0; // Out of fake stack. - uptr ptr = reinterpret_cast(ff); - SetShadow(ptr, size, class_id, 0); - return ptr; -} - -ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { - FakeStack::Deallocate(ptr, class_id); - SetShadow(ptr, size, class_id, kMagic8); -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; -#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ - extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ - __asan_stack_malloc_##class_id(uptr size) { \ - return OnMalloc(class_id, size); \ - } \ - extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ - uptr ptr, uptr size) { \ - OnFree(ptr, class_id, size); \ - } - -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) -DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } - -SANITIZER_INTERFACE_ATTRIBUTE -void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, - void **end) { - FakeStack *fs = reinterpret_cast(fake_stack); - if (!fs) return nullptr; - uptr frame_beg, frame_end; - FakeFrame *frame = reinterpret_cast(fs->AddrIsInFakeStack( - reinterpret_cast(addr), &frame_beg, &frame_end)); - if (!frame) return nullptr; - if (frame->magic != kCurrentStackFrameMagic) - return nullptr; - if (beg) *beg = reinterpret_cast(frame_beg); - if (end) *end = reinterpret_cast(frame_end); - return reinterpret_cast(frame->real_stack); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __asan_alloca_poison(uptr addr, uptr size) { - uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; - uptr PartialRzAddr = addr + size; - uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; - uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); - FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); - FastPoisonShadowPartialRightRedzone( - PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, - RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); - FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __asan_allocas_unpoison(uptr top, uptr bottom) { - if ((!top) || (top > bottom)) return; - REAL(memset)(reinterpret_cast(MemToShadow(top)), 0, - (bottom - top) / SHADOW_GRANULARITY); -} -} // extern "C" diff --git a/lib/asan/asan_fake_stack.cpp b/lib/asan/asan_fake_stack.cpp new file mode 100644 index 000000000000..295e6debc96c --- /dev/null +++ b/lib/asan/asan_fake_stack.cpp @@ -0,0 +1,282 @@ +//===-- asan_fake_stack.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// FakeStack is used to detect use-after-return bugs. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_poisoning.h" +#include "asan_thread.h" + +namespace __asan { + +static const u64 kMagic1 = kAsanStackAfterReturnMagic; +static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; +static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; +static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; + +static const u64 kAllocaRedzoneSize = 32UL; +static const u64 kAllocaRedzoneMask = 31UL; + +// For small size classes inline PoisonShadow for better performance. +ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { + u64 *shadow = reinterpret_cast(MemToShadow(ptr)); + if (SHADOW_SCALE == 3 && class_id <= 6) { + // This code expects SHADOW_SCALE=3. + for (uptr i = 0; i < (((uptr)1) << class_id); i++) { + shadow[i] = magic; + // Make sure this does not become memset. + SanitizerBreakOptimization(nullptr); + } + } else { + // The size class is too big, it's cheaper to poison only size bytes. + PoisonShadow(ptr, size, static_cast(magic)); + } +} + +FakeStack *FakeStack::Create(uptr stack_size_log) { + static uptr kMinStackSizeLog = 16; + static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); + if (stack_size_log < kMinStackSizeLog) + stack_size_log = kMinStackSizeLog; + if (stack_size_log > kMaxStackSizeLog) + stack_size_log = kMaxStackSizeLog; + uptr size = RequiredSize(stack_size_log); + FakeStack *res = reinterpret_cast( + flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") + : MmapOrDie(size, "FakeStack")); + res->stack_size_log_ = stack_size_log; + u8 *p = reinterpret_cast(res); + VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " + "mmapped %zdK, noreserve=%d \n", + GetCurrentTidOrInvalid(), p, + p + FakeStack::RequiredSize(stack_size_log), stack_size_log, + size >> 10, flags()->uar_noreserve); + return res; +} + +void FakeStack::Destroy(int tid) { + PoisonAll(0); + if (Verbosity() >= 2) { + InternalScopedString str(kNumberOfSizeClasses * 50); + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) + str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], + NumberOfFrames(stack_size_log(), class_id)); + Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); + } + uptr size = RequiredSize(stack_size_log_); + FlushUnneededASanShadowMemory(reinterpret_cast(this), size); + UnmapOrDie(this, size); +} + +void FakeStack::PoisonAll(u8 magic) { + PoisonShadow(reinterpret_cast(this), RequiredSize(stack_size_log()), + magic); +} + +#if !defined(_MSC_VER) || defined(__clang__) +ALWAYS_INLINE USED +#endif +FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, + uptr real_stack) { + CHECK_LT(class_id, kNumberOfSizeClasses); + if (needs_gc_) + GC(real_stack); + uptr &hint_position = hint_position_[class_id]; + const int num_iter = NumberOfFrames(stack_size_log, class_id); + u8 *flags = GetFlags(stack_size_log, class_id); + for (int i = 0; i < num_iter; i++) { + uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); + // This part is tricky. On one hand, checking and setting flags[pos] + // should be atomic to ensure async-signal safety. But on the other hand, + // if the signal arrives between checking and setting flags[pos], the + // signal handler's fake stack will start from a different hint_position + // and so will not touch this particular byte. So, it is safe to do this + // with regular non-atomic load and store (at least I was not able to make + // this code crash). + if (flags[pos]) continue; + flags[pos] = 1; + FakeFrame *res = reinterpret_cast( + GetFrame(stack_size_log, class_id, pos)); + res->real_stack = real_stack; + *SavedFlagPtr(reinterpret_cast(res), class_id) = &flags[pos]; + return res; + } + return nullptr; // We are out of fake stack. +} + +uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { + uptr stack_size_log = this->stack_size_log(); + uptr beg = reinterpret_cast(GetFrame(stack_size_log, 0, 0)); + uptr end = reinterpret_cast(this) + RequiredSize(stack_size_log); + if (ptr < beg || ptr >= end) return 0; + uptr class_id = (ptr - beg) >> stack_size_log; + uptr base = beg + (class_id << stack_size_log); + CHECK_LE(base, ptr); + CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); + uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); + uptr res = base + pos * BytesInSizeClass(class_id); + *frame_end = res + BytesInSizeClass(class_id); + *frame_beg = res + sizeof(FakeFrame); + return res; +} + +void FakeStack::HandleNoReturn() { + needs_gc_ = true; +} + +// When throw, longjmp or some such happens we don't call OnFree() and +// as the result may leak one or more fake frames, but the good news is that +// we are notified about all such events by HandleNoReturn(). +// If we recently had such no-return event we need to collect garbage frames. +// We do it based on their 'real_stack' values -- everything that is lower +// than the current real_stack is garbage. +NOINLINE void FakeStack::GC(uptr real_stack) { + uptr collected = 0; + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast( + GetFrame(stack_size_log(), class_id, i)); + if (ff->real_stack < real_stack) { + flags[i] = 0; + collected++; + } + } + } + needs_gc_ = false; +} + +void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { + for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { + u8 *flags = GetFlags(stack_size_log(), class_id); + for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; + i++) { + if (flags[i] == 0) continue; // not allocated. + FakeFrame *ff = reinterpret_cast( + GetFrame(stack_size_log(), class_id, i)); + uptr begin = reinterpret_cast(ff); + callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); + } + } +} + +#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA +static THREADLOCAL FakeStack *fake_stack_tls; + +FakeStack *GetTLSFakeStack() { + return fake_stack_tls; +} +void SetTLSFakeStack(FakeStack *fs) { + fake_stack_tls = fs; +} +#else +FakeStack *GetTLSFakeStack() { return 0; } +void SetTLSFakeStack(FakeStack *fs) { } +#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA + +static FakeStack *GetFakeStack() { + AsanThread *t = GetCurrentThread(); + if (!t) return nullptr; + return t->fake_stack(); +} + +static FakeStack *GetFakeStackFast() { + if (FakeStack *fs = GetTLSFakeStack()) + return fs; + if (!__asan_option_detect_stack_use_after_return) + return nullptr; + return GetFakeStack(); +} + +ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { + FakeStack *fs = GetFakeStackFast(); + if (!fs) return 0; + uptr local_stack; + uptr real_stack = reinterpret_cast(&local_stack); + FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); + if (!ff) return 0; // Out of fake stack. + uptr ptr = reinterpret_cast(ff); + SetShadow(ptr, size, class_id, 0); + return ptr; +} + +ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { + FakeStack::Deallocate(ptr, class_id); + SetShadow(ptr, size, class_id, kMagic8); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; +#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ + __asan_stack_malloc_##class_id(uptr size) { \ + return OnMalloc(class_id, size); \ + } \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ + uptr ptr, uptr size) { \ + OnFree(ptr, class_id, size); \ + } + +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) +DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } + +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, + void **end) { + FakeStack *fs = reinterpret_cast(fake_stack); + if (!fs) return nullptr; + uptr frame_beg, frame_end; + FakeFrame *frame = reinterpret_cast(fs->AddrIsInFakeStack( + reinterpret_cast(addr), &frame_beg, &frame_end)); + if (!frame) return nullptr; + if (frame->magic != kCurrentStackFrameMagic) + return nullptr; + if (beg) *beg = reinterpret_cast(frame_beg); + if (end) *end = reinterpret_cast(frame_end); + return reinterpret_cast(frame->real_stack); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_alloca_poison(uptr addr, uptr size) { + uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; + uptr PartialRzAddr = addr + size; + uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; + uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); + FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); + FastPoisonShadowPartialRightRedzone( + PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, + RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); + FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_allocas_unpoison(uptr top, uptr bottom) { + if ((!top) || (top > bottom)) return; + REAL(memset)(reinterpret_cast(MemToShadow(top)), 0, + (bottom - top) / SHADOW_GRANULARITY); +} +} // extern "C" diff --git a/lib/asan/asan_fake_stack.h b/lib/asan/asan_fake_stack.h index 59ba85218f88..270a19816d6e 100644 --- a/lib/asan/asan_fake_stack.h +++ b/lib/asan/asan_fake_stack.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_fake_stack.cc, implements FakeStack. +// ASan-private header for asan_fake_stack.cpp, implements FakeStack. //===----------------------------------------------------------------------===// #ifndef ASAN_FAKE_STACK_H diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc deleted file mode 100644 index 89e98936129d..000000000000 --- a/lib/asan/asan_flags.cc +++ /dev/null @@ -1,214 +0,0 @@ -//===-- asan_flags.cc -------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan flag parsing logic. -//===----------------------------------------------------------------------===// - -#include "asan_activation.h" -#include "asan_flags.h" -#include "asan_interface_internal.h" -#include "asan_stack.h" -#include "lsan/lsan_common.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_flag_parser.h" -#include "ubsan/ubsan_flags.h" -#include "ubsan/ubsan_platform.h" - -namespace __asan { - -Flags asan_flags_dont_use_directly; // use via flags(). - -static const char *MaybeCallAsanDefaultOptions() { - return (&__asan_default_options) ? __asan_default_options() : ""; -} - -static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { -#ifdef ASAN_DEFAULT_OPTIONS - return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS); -#else - return ""; -#endif -} - -void Flags::SetDefaults() { -#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; -#include "asan_flags.inc" -#undef ASAN_FLAG -} - -static void RegisterAsanFlags(FlagParser *parser, Flags *f) { -#define ASAN_FLAG(Type, Name, DefaultValue, Description) \ - RegisterFlag(parser, #Name, Description, &f->Name); -#include "asan_flags.inc" -#undef ASAN_FLAG -} - -void InitializeFlags() { - // Set the default values and prepare for parsing ASan and common flags. - SetCommonFlagsDefaults(); - { - CommonFlags cf; - cf.CopyFrom(*common_flags()); - cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS; - cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); - cf.malloc_context_size = kDefaultMallocContextSize; - cf.intercept_tls_get_addr = true; - cf.exitcode = 1; - OverrideCommonFlags(cf); - } - Flags *f = flags(); - f->SetDefaults(); - - FlagParser asan_parser; - RegisterAsanFlags(&asan_parser, f); - RegisterCommonFlags(&asan_parser); - - // Set the default values and prepare for parsing LSan and UBSan flags - // (which can also overwrite common flags). -#if CAN_SANITIZE_LEAKS - __lsan::Flags *lf = __lsan::flags(); - lf->SetDefaults(); - - FlagParser lsan_parser; - __lsan::RegisterLsanFlags(&lsan_parser, lf); - RegisterCommonFlags(&lsan_parser); -#endif - -#if CAN_SANITIZE_UB - __ubsan::Flags *uf = __ubsan::flags(); - uf->SetDefaults(); - - FlagParser ubsan_parser; - __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); - RegisterCommonFlags(&ubsan_parser); -#endif - - if (SANITIZER_MAC) { - // Support macOS MallocScribble and MallocPreScribble: - // - if (GetEnv("MallocScribble")) { - f->max_free_fill_size = 0x1000; - } - if (GetEnv("MallocPreScribble")) { - f->malloc_fill_byte = 0xaa; - } - } - - // Override from ASan compile definition. - const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition(); - asan_parser.ParseString(asan_compile_def); - - // Override from user-specified string. - const char *asan_default_options = MaybeCallAsanDefaultOptions(); - asan_parser.ParseString(asan_default_options); -#if CAN_SANITIZE_UB - const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); - ubsan_parser.ParseString(ubsan_default_options); -#endif -#if CAN_SANITIZE_LEAKS - const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions(); - lsan_parser.ParseString(lsan_default_options); -#endif - - // Override from command line. - asan_parser.ParseStringFromEnv("ASAN_OPTIONS"); -#if CAN_SANITIZE_LEAKS - lsan_parser.ParseStringFromEnv("LSAN_OPTIONS"); -#endif -#if CAN_SANITIZE_UB - ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS"); -#endif - - InitializeCommonFlags(); - - // TODO(eugenis): dump all flags at verbosity>=2? - if (Verbosity()) ReportUnrecognizedFlags(); - - if (common_flags()->help) { - // TODO(samsonov): print all of the flags (ASan, LSan, common). - asan_parser.PrintFlagDescriptions(); - } - - // Flag validation: - if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) { - Report("%s: detect_leaks is not supported on this platform.\n", - SanitizerToolName); - Die(); - } - // Ensure that redzone is at least SHADOW_GRANULARITY. - if (f->redzone < (int)SHADOW_GRANULARITY) - f->redzone = SHADOW_GRANULARITY; - // Make "strict_init_order" imply "check_initialization_order". - // TODO(samsonov): Use a single runtime flag for an init-order checker. - if (f->strict_init_order) { - f->check_initialization_order = true; - } - CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax); - CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log); - CHECK_GE(f->redzone, 16); - CHECK_GE(f->max_redzone, f->redzone); - CHECK_LE(f->max_redzone, 2048); - CHECK(IsPowerOfTwo(f->redzone)); - CHECK(IsPowerOfTwo(f->max_redzone)); - if (SANITIZER_RTEMS) { - CHECK(!f->unmap_shadow_on_exit); - CHECK(!f->protect_shadow_gap); - } - - // quarantine_size is deprecated but we still honor it. - // quarantine_size can not be used together with quarantine_size_mb. - if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) { - Report("%s: please use either 'quarantine_size' (deprecated) or " - "quarantine_size_mb, but not both\n", SanitizerToolName); - Die(); - } - if (f->quarantine_size >= 0) - f->quarantine_size_mb = f->quarantine_size >> 20; - if (f->quarantine_size_mb < 0) { - const int kDefaultQuarantineSizeMb = - (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8; - f->quarantine_size_mb = kDefaultQuarantineSizeMb; - } - if (f->thread_local_quarantine_size_kb < 0) { - const u32 kDefaultThreadLocalQuarantineSizeKb = - // It is not advised to go lower than 64Kb, otherwise quarantine batches - // pushed from thread local quarantine to global one will create too - // much overhead. One quarantine batch size is 8Kb and it holds up to - // 1021 chunk, which amounts to 1/8 memory overhead per batch when - // thread local quarantine is set to 64Kb. - (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10); - f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb; - } - if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) { - Report("%s: thread_local_quarantine_size_kb can be set to 0 only when " - "quarantine_size_mb is set to 0\n", SanitizerToolName); - Die(); - } - if (!f->replace_str && common_flags()->intercept_strlen) { - Report("WARNING: strlen interceptor is enabled even though replace_str=0. " - "Use intercept_strlen=0 to disable it."); - } - if (!f->replace_str && common_flags()->intercept_strchr) { - Report("WARNING: strchr* interceptors are enabled even though " - "replace_str=0. Use intercept_strchr=0 to disable them."); - } - if (!f->replace_str && common_flags()->intercept_strndup) { - Report("WARNING: strndup* interceptors are enabled even though " - "replace_str=0. Use intercept_strndup=0 to disable them."); - } -} - -} // namespace __asan - -SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) { - return ""; -} diff --git a/lib/asan/asan_flags.cpp b/lib/asan/asan_flags.cpp new file mode 100644 index 000000000000..c5c70eaed737 --- /dev/null +++ b/lib/asan/asan_flags.cpp @@ -0,0 +1,214 @@ +//===-- asan_flags.cpp ------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "asan_stack.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "ubsan/ubsan_flags.h" +#include "ubsan/ubsan_platform.h" + +namespace __asan { + +Flags asan_flags_dont_use_directly; // use via flags(). + +static const char *MaybeCallAsanDefaultOptions() { + return (&__asan_default_options) ? __asan_default_options() : ""; +} + +static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { +#ifdef ASAN_DEFAULT_OPTIONS + return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +void Flags::SetDefaults() { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +static void RegisterAsanFlags(FlagParser *parser, Flags *f) { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +void InitializeFlags() { + // Set the default values and prepare for parsing ASan and common flags. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS; + cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = kDefaultMallocContextSize; + cf.intercept_tls_get_addr = true; + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser asan_parser; + RegisterAsanFlags(&asan_parser, f); + RegisterCommonFlags(&asan_parser); + + // Set the default values and prepare for parsing LSan and UBSan flags + // (which can also overwrite common flags). +#if CAN_SANITIZE_LEAKS + __lsan::Flags *lf = __lsan::flags(); + lf->SetDefaults(); + + FlagParser lsan_parser; + __lsan::RegisterLsanFlags(&lsan_parser, lf); + RegisterCommonFlags(&lsan_parser); +#endif + +#if CAN_SANITIZE_UB + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif + + if (SANITIZER_MAC) { + // Support macOS MallocScribble and MallocPreScribble: + // + if (GetEnv("MallocScribble")) { + f->max_free_fill_size = 0x1000; + } + if (GetEnv("MallocPreScribble")) { + f->malloc_fill_byte = 0xaa; + } + } + + // Override from ASan compile definition. + const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition(); + asan_parser.ParseString(asan_compile_def); + + // Override from user-specified string. + const char *asan_default_options = MaybeCallAsanDefaultOptions(); + asan_parser.ParseString(asan_default_options); +#if CAN_SANITIZE_UB + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif +#if CAN_SANITIZE_LEAKS + const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions(); + lsan_parser.ParseString(lsan_default_options); +#endif + + // Override from command line. + asan_parser.ParseStringFromEnv("ASAN_OPTIONS"); +#if CAN_SANITIZE_LEAKS + lsan_parser.ParseStringFromEnv("LSAN_OPTIONS"); +#endif +#if CAN_SANITIZE_UB + ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS"); +#endif + + InitializeCommonFlags(); + + // TODO(eugenis): dump all flags at verbosity>=2? + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) { + // TODO(samsonov): print all of the flags (ASan, LSan, common). + asan_parser.PrintFlagDescriptions(); + } + + // Flag validation: + if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + Die(); + } + // Ensure that redzone is at least SHADOW_GRANULARITY. + if (f->redzone < (int)SHADOW_GRANULARITY) + f->redzone = SHADOW_GRANULARITY; + // Make "strict_init_order" imply "check_initialization_order". + // TODO(samsonov): Use a single runtime flag for an init-order checker. + if (f->strict_init_order) { + f->check_initialization_order = true; + } + CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax); + CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log); + CHECK_GE(f->redzone, 16); + CHECK_GE(f->max_redzone, f->redzone); + CHECK_LE(f->max_redzone, 2048); + CHECK(IsPowerOfTwo(f->redzone)); + CHECK(IsPowerOfTwo(f->max_redzone)); + if (SANITIZER_RTEMS) { + CHECK(!f->unmap_shadow_on_exit); + CHECK(!f->protect_shadow_gap); + } + + // quarantine_size is deprecated but we still honor it. + // quarantine_size can not be used together with quarantine_size_mb. + if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) { + Report("%s: please use either 'quarantine_size' (deprecated) or " + "quarantine_size_mb, but not both\n", SanitizerToolName); + Die(); + } + if (f->quarantine_size >= 0) + f->quarantine_size_mb = f->quarantine_size >> 20; + if (f->quarantine_size_mb < 0) { + const int kDefaultQuarantineSizeMb = + (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8; + f->quarantine_size_mb = kDefaultQuarantineSizeMb; + } + if (f->thread_local_quarantine_size_kb < 0) { + const u32 kDefaultThreadLocalQuarantineSizeKb = + // It is not advised to go lower than 64Kb, otherwise quarantine batches + // pushed from thread local quarantine to global one will create too + // much overhead. One quarantine batch size is 8Kb and it holds up to + // 1021 chunk, which amounts to 1/8 memory overhead per batch when + // thread local quarantine is set to 64Kb. + (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10); + f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb; + } + if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) { + Report("%s: thread_local_quarantine_size_kb can be set to 0 only when " + "quarantine_size_mb is set to 0\n", SanitizerToolName); + Die(); + } + if (!f->replace_str && common_flags()->intercept_strlen) { + Report("WARNING: strlen interceptor is enabled even though replace_str=0. " + "Use intercept_strlen=0 to disable it."); + } + if (!f->replace_str && common_flags()->intercept_strchr) { + Report("WARNING: strchr* interceptors are enabled even though " + "replace_str=0. Use intercept_strchr=0 to disable them."); + } + if (!f->replace_str && common_flags()->intercept_strndup) { + Report("WARNING: strndup* interceptors are enabled even though " + "replace_str=0. Use intercept_strndup=0 to disable them."); + } +} + +} // namespace __asan + +SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) { + return ""; +} diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc index d360e03ca55e..43c70dbca56b 100644 --- a/lib/asan/asan_flags.inc +++ b/lib/asan/asan_flags.inc @@ -139,10 +139,10 @@ ASAN_FLAG( "If >= 2, detect operations like <, <=, >, >= and - on invalid pointer " "pairs (e.g. when pointers belong to different objects); " "If == 1, detect invalid operations only when both pointers are non-null.") -ASAN_FLAG( - bool, detect_container_overflow, true, - "If true, honor the container overflow annotations. See " - "https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow") +ASAN_FLAG(bool, detect_container_overflow, true, + "If true, honor the container overflow annotations. See " + "https://github.com/google/sanitizers/wiki/" + "AddressSanitizerContainerOverflow") ASAN_FLAG(int, detect_odr_violation, 2, "If >=2, detect violation of One-Definition-Rule (ODR); " "If ==1, detect ODR-violation only if the two variables " @@ -158,5 +158,6 @@ ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true, ASAN_FLAG(bool, verify_asan_link_order, true, "Check position of ASan runtime in library list (needs to be disabled" " when other library has to be preloaded system-wide)") -ASAN_FLAG(bool, windows_hook_rtl_allocators, false, - "(Windows only) enable hooking of Rtl(Allocate|Free|Size|ReAllocate)Heap.") +ASAN_FLAG( + bool, windows_hook_rtl_allocators, false, + "(Windows only) enable hooking of Rtl(Allocate|Free|Size|ReAllocate)Heap.") diff --git a/lib/asan/asan_fuchsia.cc b/lib/asan/asan_fuchsia.cc deleted file mode 100644 index aebc17f38b4b..000000000000 --- a/lib/asan/asan_fuchsia.cc +++ /dev/null @@ -1,224 +0,0 @@ -//===-- asan_fuchsia.cc --------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===---------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Fuchsia-specific details. -//===---------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_fuchsia.h" -#if SANITIZER_FUCHSIA - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_stack.h" -#include "asan_thread.h" - -#include -#include -#include -#include - -namespace __asan { - -// The system already set up the shadow memory for us. -// __sanitizer::GetMaxUserVirtualAddress has already been called by -// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc). -// Just do some additional sanity checks here. -void InitializeShadowMemory() { - if (Verbosity()) PrintAddressSpaceLayout(); - - // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address. - __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; - DCHECK(kLowShadowBeg != kDefaultShadowSentinel); - __asan_shadow_memory_dynamic_address = kLowShadowBeg; - - CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); - CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1); - CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit); - CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base); - CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1); - CHECK_EQ(kLowShadowEnd, 0); - CHECK_EQ(kLowShadowBeg, 0); -} - -void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { - UNIMPLEMENTED(); -} - -void AsanCheckDynamicRTPrereqs() {} -void AsanCheckIncompatibleRT() {} -void InitializeAsanInterceptors() {} - -void *AsanDoesNotSupportStaticLinkage() { return nullptr; } - -void InitializePlatformExceptionHandlers() {} -void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { - UNIMPLEMENTED(); -} - -// We can use a plain thread_local variable for TSD. -static thread_local void *per_thread; - -void *AsanTSDGet() { return per_thread; } - -void AsanTSDSet(void *tsd) { per_thread = tsd; } - -// There's no initialization needed, and the passed-in destructor -// will never be called. Instead, our own thread destruction hook -// (below) will call AsanThread::TSDDtor directly. -void AsanTSDInit(void (*destructor)(void *tsd)) { - DCHECK(destructor == &PlatformTSDDtor); -} - -void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } - -static inline size_t AsanThreadMmapSize() { - return RoundUpTo(sizeof(AsanThread), PAGE_SIZE); -} - -struct AsanThread::InitOptions { - uptr stack_bottom, stack_size; -}; - -// Shared setup between thread creation and startup for the initial thread. -static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, - uptr user_id, bool detached, - const char *name, uptr stack_bottom, - uptr stack_size) { - // In lieu of AsanThread::Create. - AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__); - - AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; - u32 tid = - asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); - asanThreadRegistry().SetThreadName(tid, name); - - // On other systems, AsanThread::Init() is called from the new - // thread itself. But on Fuchsia we already know the stack address - // range beforehand, so we can do most of the setup right now. - const AsanThread::InitOptions options = {stack_bottom, stack_size}; - thread->Init(&options); - - return thread; -} - -// This gets the same arguments passed to Init by CreateAsanThread, above. -// We're in the creator thread before the new thread is actually started, -// but its stack address range is already known. We don't bother tracking -// the static TLS address range because the system itself already uses an -// ASan-aware allocator for that. -void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { - DCHECK_NE(GetCurrentThread(), this); - DCHECK_NE(GetCurrentThread(), nullptr); - CHECK_NE(options->stack_bottom, 0); - CHECK_NE(options->stack_size, 0); - stack_bottom_ = options->stack_bottom; - stack_top_ = options->stack_bottom + options->stack_size; -} - -// Called by __asan::AsanInitInternal (asan_rtl.c). -AsanThread *CreateMainThread() { - thrd_t self = thrd_current(); - char name[ZX_MAX_NAME_LEN]; - CHECK_NE(__sanitizer::MainThreadStackBase, 0); - CHECK_GT(__sanitizer::MainThreadStackSize, 0); - AsanThread *t = CreateAsanThread( - nullptr, 0, reinterpret_cast(self), true, - _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name, - sizeof(name)) == ZX_OK - ? name - : nullptr, - __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize); - SetCurrentThread(t); - return t; -} - -// This is called before each thread creation is attempted. So, in -// its first call, the calling thread is the initial and sole thread. -static void *BeforeThreadCreateHook(uptr user_id, bool detached, - const char *name, uptr stack_bottom, - uptr stack_size) { - EnsureMainThreadIDIsCorrect(); - // Strict init-order checking is thread-hostile. - if (flags()->strict_init_order) StopInitOrderChecking(); - - GET_STACK_TRACE_THREAD; - u32 parent_tid = GetCurrentTidOrInvalid(); - - return CreateAsanThread(&stack, parent_tid, user_id, detached, name, - stack_bottom, stack_size); -} - -// This is called after creating a new thread (in the creating thread), -// with the pointer returned by BeforeThreadCreateHook (above). -static void ThreadCreateHook(void *hook, bool aborted) { - AsanThread *thread = static_cast(hook); - if (!aborted) { - // The thread was created successfully. - // ThreadStartHook is already running in the new thread. - } else { - // The thread wasn't created after all. - // Clean up everything we set up in BeforeThreadCreateHook. - asanThreadRegistry().FinishThread(thread->tid()); - UnmapOrDie(thread, AsanThreadMmapSize()); - } -} - -// This is called in the newly-created thread before it runs anything else, -// with the pointer returned by BeforeThreadCreateHook (above). -// cf. asan_interceptors.cc:asan_thread_start -static void ThreadStartHook(void *hook, uptr os_id) { - AsanThread *thread = static_cast(hook); - SetCurrentThread(thread); - - // In lieu of AsanThread::ThreadStart. - asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular, - nullptr); -} - -// Each thread runs this just before it exits, -// with the pointer returned by BeforeThreadCreateHook (above). -// All per-thread destructors have already been called. -static void ThreadExitHook(void *hook, uptr os_id) { - AsanThread::TSDDtor(per_thread); -} - -bool HandleDlopenInit() { - // Not supported on this platform. - static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, - "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); - return false; -} - -} // namespace __asan - -// These are declared (in extern "C") by . -// The system runtime will call our definitions directly. - -void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached, - const char *name, void *stack_base, - size_t stack_size) { - return __asan::BeforeThreadCreateHook( - reinterpret_cast(thread), detached, name, - reinterpret_cast(stack_base), stack_size); -} - -void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) { - __asan::ThreadCreateHook(hook, error != thrd_success); -} - -void __sanitizer_thread_start_hook(void *hook, thrd_t self) { - __asan::ThreadStartHook(hook, reinterpret_cast(self)); -} - -void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { - __asan::ThreadExitHook(hook, reinterpret_cast(self)); -} - -#endif // SANITIZER_FUCHSIA diff --git a/lib/asan/asan_fuchsia.cpp b/lib/asan/asan_fuchsia.cpp new file mode 100644 index 000000000000..f8b2d5f26979 --- /dev/null +++ b/lib/asan/asan_fuchsia.cpp @@ -0,0 +1,224 @@ +//===-- asan_fuchsia.cpp -------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Fuchsia-specific details. +//===---------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_fuchsia.h" +#if SANITIZER_FUCHSIA + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" +#include "asan_thread.h" + +#include +#include +#include +#include + +namespace __asan { + +// The system already set up the shadow memory for us. +// __sanitizer::GetMaxUserVirtualAddress has already been called by +// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp). +// Just do some additional sanity checks here. +void InitializeShadowMemory() { + if (Verbosity()) PrintAddressSpaceLayout(); + + // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address. + __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; + DCHECK(kLowShadowBeg != kDefaultShadowSentinel); + __asan_shadow_memory_dynamic_address = kLowShadowBeg; + + CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); + CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1); + CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit); + CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base); + CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1); + CHECK_EQ(kLowShadowEnd, 0); + CHECK_EQ(kLowShadowBeg, 0); +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +void InitializeAsanInterceptors() {} + +void *AsanDoesNotSupportStaticLinkage() { return nullptr; } + +void InitializePlatformExceptionHandlers() {} +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +// We can use a plain thread_local variable for TSD. +static thread_local void *per_thread; + +void *AsanTSDGet() { return per_thread; } + +void AsanTSDSet(void *tsd) { per_thread = tsd; } + +// There's no initialization needed, and the passed-in destructor +// will never be called. Instead, our own thread destruction hook +// (below) will call AsanThread::TSDDtor directly. +void AsanTSDInit(void (*destructor)(void *tsd)) { + DCHECK(destructor == &PlatformTSDDtor); +} + +void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } + +static inline size_t AsanThreadMmapSize() { + return RoundUpTo(sizeof(AsanThread), PAGE_SIZE); +} + +struct AsanThread::InitOptions { + uptr stack_bottom, stack_size; +}; + +// Shared setup between thread creation and startup for the initial thread. +static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, + uptr user_id, bool detached, + const char *name, uptr stack_bottom, + uptr stack_size) { + // In lieu of AsanThread::Create. + AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__); + + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + u32 tid = + asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + asanThreadRegistry().SetThreadName(tid, name); + + // On other systems, AsanThread::Init() is called from the new + // thread itself. But on Fuchsia we already know the stack address + // range beforehand, so we can do most of the setup right now. + const AsanThread::InitOptions options = {stack_bottom, stack_size}; + thread->Init(&options); + + return thread; +} + +// This gets the same arguments passed to Init by CreateAsanThread, above. +// We're in the creator thread before the new thread is actually started, +// but its stack address range is already known. We don't bother tracking +// the static TLS address range because the system itself already uses an +// ASan-aware allocator for that. +void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { + DCHECK_NE(GetCurrentThread(), this); + DCHECK_NE(GetCurrentThread(), nullptr); + CHECK_NE(options->stack_bottom, 0); + CHECK_NE(options->stack_size, 0); + stack_bottom_ = options->stack_bottom; + stack_top_ = options->stack_bottom + options->stack_size; +} + +// Called by __asan::AsanInitInternal (asan_rtl.c). +AsanThread *CreateMainThread() { + thrd_t self = thrd_current(); + char name[ZX_MAX_NAME_LEN]; + CHECK_NE(__sanitizer::MainThreadStackBase, 0); + CHECK_GT(__sanitizer::MainThreadStackSize, 0); + AsanThread *t = CreateAsanThread( + nullptr, 0, reinterpret_cast(self), true, + _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name, + sizeof(name)) == ZX_OK + ? name + : nullptr, + __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize); + SetCurrentThread(t); + return t; +} + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +static void *BeforeThreadCreateHook(uptr user_id, bool detached, + const char *name, uptr stack_bottom, + uptr stack_size) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) StopInitOrderChecking(); + + GET_STACK_TRACE_THREAD; + u32 parent_tid = GetCurrentTidOrInvalid(); + + return CreateAsanThread(&stack, parent_tid, user_id, detached, name, + stack_bottom, stack_size); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by BeforeThreadCreateHook (above). +static void ThreadCreateHook(void *hook, bool aborted) { + AsanThread *thread = static_cast(hook); + if (!aborted) { + // The thread was created successfully. + // ThreadStartHook is already running in the new thread. + } else { + // The thread wasn't created after all. + // Clean up everything we set up in BeforeThreadCreateHook. + asanThreadRegistry().FinishThread(thread->tid()); + UnmapOrDie(thread, AsanThreadMmapSize()); + } +} + +// This is called in the newly-created thread before it runs anything else, +// with the pointer returned by BeforeThreadCreateHook (above). +// cf. asan_interceptors.cpp:asan_thread_start +static void ThreadStartHook(void *hook, uptr os_id) { + AsanThread *thread = static_cast(hook); + SetCurrentThread(thread); + + // In lieu of AsanThread::ThreadStart. + asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular, + nullptr); +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +static void ThreadExitHook(void *hook, uptr os_id) { + AsanThread::TSDDtor(per_thread); +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +} // namespace __asan + +// These are declared (in extern "C") by . +// The system runtime will call our definitions directly. + +void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached, + const char *name, void *stack_base, + size_t stack_size) { + return __asan::BeforeThreadCreateHook( + reinterpret_cast(thread), detached, name, + reinterpret_cast(stack_base), stack_size); +} + +void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) { + __asan::ThreadCreateHook(hook, error != thrd_success); +} + +void __sanitizer_thread_start_hook(void *hook, thrd_t self) { + __asan::ThreadStartHook(hook, reinterpret_cast(self)); +} + +void __sanitizer_thread_exit_hook(void *hook, thrd_t self) { + __asan::ThreadExitHook(hook, reinterpret_cast(self)); +} + +#endif // SANITIZER_FUCHSIA diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc deleted file mode 100644 index a831fdf9cd93..000000000000 --- a/lib/asan/asan_globals.cc +++ /dev/null @@ -1,465 +0,0 @@ -//===-- asan_globals.cc ---------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Handle globals. -//===----------------------------------------------------------------------===// - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_stats.h" -#include "asan_suppressions.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_symbolizer.h" - -namespace __asan { - -typedef __asan_global Global; - -struct ListOfGlobals { - const Global *g; - ListOfGlobals *next; -}; - -static BlockingMutex mu_for_globals(LINKER_INITIALIZED); -static LowLevelAllocator allocator_for_globals; -static ListOfGlobals *list_of_all_globals; - -static const int kDynamicInitGlobalsInitialCapacity = 512; -struct DynInitGlobal { - Global g; - bool initialized; -}; -typedef InternalMmapVector VectorOfGlobals; -// Lazy-initialized and never deleted. -static VectorOfGlobals *dynamic_init_globals; - -// We want to remember where a certain range of globals was registered. -struct GlobalRegistrationSite { - u32 stack_id; - Global *g_first, *g_last; -}; -typedef InternalMmapVector GlobalRegistrationSiteVector; -static GlobalRegistrationSiteVector *global_registration_site_vector; - -ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { - FastPoisonShadow(g->beg, g->size_with_redzone, value); -} - -ALWAYS_INLINE void PoisonRedZones(const Global &g) { - uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); - FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, - kAsanGlobalRedzoneMagic); - if (g.size != aligned_size) { - FastPoisonShadowPartialRightRedzone( - g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), - g.size % SHADOW_GRANULARITY, - SHADOW_GRANULARITY, - kAsanGlobalRedzoneMagic); - } -} - -const uptr kMinimalDistanceFromAnotherGlobal = 64; - -static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { - if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; - if (addr >= g.beg + g.size_with_redzone) return false; - return true; -} - -static void ReportGlobal(const Global &g, const char *prefix) { - Report( - "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu " - "odr_indicator=%p\n", - prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, - g.module_name, g.has_dynamic_init, (void *)g.odr_indicator); - if (g.location) { - Report(" location (%p): name=%s[%p], %d %d\n", g.location, - g.location->filename, g.location->filename, g.location->line_no, - g.location->column_no); - } -} - -static u32 FindRegistrationSite(const Global *g) { - mu_for_globals.CheckLocked(); - CHECK(global_registration_site_vector); - for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { - GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; - if (g >= grs.g_first && g <= grs.g_last) - return grs.stack_id; - } - return 0; -} - -int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, - int max_globals) { - if (!flags()->report_globals) return 0; - BlockingMutexLock lock(&mu_for_globals); - int res = 0; - for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { - const Global &g = *l->g; - if (flags()->report_globals >= 2) - ReportGlobal(g, "Search"); - if (IsAddressNearGlobal(addr, g)) { - internal_memcpy(&globals[res], &g, sizeof(g)); - if (reg_sites) - reg_sites[res] = FindRegistrationSite(&g); - res++; - if (res == max_globals) - break; - } - } - return res; -} - -enum GlobalSymbolState { - UNREGISTERED = 0, - REGISTERED = 1 -}; - -// Check ODR violation for given global G via special ODR indicator. We use -// this method in case compiler instruments global variables through their -// local aliases. -static void CheckODRViolationViaIndicator(const Global *g) { - // Instrumentation requests to skip ODR check. - if (g->odr_indicator == UINTPTR_MAX) - return; - u8 *odr_indicator = reinterpret_cast(g->odr_indicator); - if (*odr_indicator == UNREGISTERED) { - *odr_indicator = REGISTERED; - return; - } - // If *odr_indicator is DEFINED, some module have already registered - // externally visible symbol with the same name. This is an ODR violation. - for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { - if (g->odr_indicator == l->g->odr_indicator && - (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && - !IsODRViolationSuppressed(g->name)) - ReportODRViolation(g, FindRegistrationSite(g), - l->g, FindRegistrationSite(l->g)); - } -} - -// Check ODR violation for given global G by checking if it's already poisoned. -// We use this method in case compiler doesn't use private aliases for global -// variables. -static void CheckODRViolationViaPoisoning(const Global *g) { - if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { - // This check may not be enough: if the first global is much larger - // the entire redzone of the second global may be within the first global. - for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { - if (g->beg == l->g->beg && - (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && - !IsODRViolationSuppressed(g->name)) - ReportODRViolation(g, FindRegistrationSite(g), - l->g, FindRegistrationSite(l->g)); - } - } -} - -// Clang provides two different ways for global variables protection: -// it can poison the global itself or its private alias. In former -// case we may poison same symbol multiple times, that can help us to -// cheaply detect ODR violation: if we try to poison an already poisoned -// global, we have ODR violation error. -// In latter case, we poison each symbol exactly once, so we use special -// indicator symbol to perform similar check. -// In either case, compiler provides a special odr_indicator field to Global -// structure, that can contain two kinds of values: -// 1) Non-zero value. In this case, odr_indicator is an address of -// corresponding indicator variable for given global. -// 2) Zero. This means that we don't use private aliases for global variables -// and can freely check ODR violation with the first method. -// -// This routine chooses between two different methods of ODR violation -// detection. -static inline bool UseODRIndicator(const Global *g) { - return g->odr_indicator > 0; -} - -// Register a global variable. -// This function may be called more than once for every global -// so we store the globals in a map. -static void RegisterGlobal(const Global *g) { - CHECK(asan_inited); - if (flags()->report_globals >= 2) - ReportGlobal(*g, "Added"); - CHECK(flags()->report_globals); - CHECK(AddrIsInMem(g->beg)); - if (!AddrIsAlignedByGranularity(g->beg)) { - Report("The following global variable is not properly aligned.\n"); - Report("This may happen if another global with the same name\n"); - Report("resides in another non-instrumented module.\n"); - Report("Or the global comes from a C file built w/o -fno-common.\n"); - Report("In either case this is likely an ODR violation bug,\n"); - Report("but AddressSanitizer can not provide more details.\n"); - ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g)); - CHECK(AddrIsAlignedByGranularity(g->beg)); - } - CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (flags()->detect_odr_violation) { - // Try detecting ODR (One Definition Rule) violation, i.e. the situation - // where two globals with the same name are defined in different modules. - if (UseODRIndicator(g)) - CheckODRViolationViaIndicator(g); - else - CheckODRViolationViaPoisoning(g); - } - if (CanPoisonMemory()) - PoisonRedZones(*g); - ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; - l->g = g; - l->next = list_of_all_globals; - list_of_all_globals = l; - if (g->has_dynamic_init) { - if (!dynamic_init_globals) { - dynamic_init_globals = - new (allocator_for_globals) VectorOfGlobals; // NOLINT - dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity); - } - DynInitGlobal dyn_global = { *g, false }; - dynamic_init_globals->push_back(dyn_global); - } -} - -static void UnregisterGlobal(const Global *g) { - CHECK(asan_inited); - if (flags()->report_globals >= 2) - ReportGlobal(*g, "Removed"); - CHECK(flags()->report_globals); - CHECK(AddrIsInMem(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (CanPoisonMemory()) - PoisonShadowForGlobal(g, 0); - // We unpoison the shadow memory for the global but we do not remove it from - // the list because that would require O(n^2) time with the current list - // implementation. It might not be worth doing anyway. - - // Release ODR indicator. - if (UseODRIndicator(g) && g->odr_indicator != UINTPTR_MAX) { - u8 *odr_indicator = reinterpret_cast(g->odr_indicator); - *odr_indicator = UNREGISTERED; - } -} - -void StopInitOrderChecking() { - BlockingMutexLock lock(&mu_for_globals); - if (!flags()->check_initialization_order || !dynamic_init_globals) - return; - flags()->check_initialization_order = false; - for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { - DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; - const Global *g = &dyn_g.g; - // Unpoison the whole global. - PoisonShadowForGlobal(g, 0); - // Poison redzones back. - PoisonRedZones(*g); - } -} - -static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; } - -const char *MaybeDemangleGlobalName(const char *name) { - // We can spoil names of globals with C linkage, so use an heuristic - // approach to check if the name should be demangled. - bool should_demangle = false; - if (name[0] == '_' && name[1] == 'Z') - should_demangle = true; - else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?') - should_demangle = true; - - return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name; -} - -// Check if the global is a zero-terminated ASCII string. If so, print it. -void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) { - for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { - unsigned char c = *(unsigned char *)p; - if (c == '\0' || !IsASCII(c)) return; - } - if (*(char *)(g.beg + g.size - 1) != '\0') return; - str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name), - (char *)g.beg); -} - -static const char *GlobalFilename(const __asan_global &g) { - const char *res = g.module_name; - // Prefer the filename from source location, if is available. - if (g.location) res = g.location->filename; - CHECK(res); - return res; -} - -void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) { - str->append("%s", GlobalFilename(g)); - if (!g.location) return; - if (g.location->line_no) str->append(":%d", g.location->line_no); - if (g.location->column_no) str->append(":%d", g.location->column_no); -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - - -// Apply __asan_register_globals to all globals found in the same loaded -// executable or shared library as `flag'. The flag tracks whether globals have -// already been registered or not for this image. -void __asan_register_image_globals(uptr *flag) { - if (*flag) - return; - AsanApplyToGlobals(__asan_register_globals, flag); - *flag = 1; -} - -// This mirrors __asan_register_image_globals. -void __asan_unregister_image_globals(uptr *flag) { - if (!*flag) - return; - AsanApplyToGlobals(__asan_unregister_globals, flag); - *flag = 0; -} - -void __asan_register_elf_globals(uptr *flag, void *start, void *stop) { - if (*flag) return; - if (!start) return; - CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); - __asan_global *globals_start = (__asan_global*)start; - __asan_global *globals_stop = (__asan_global*)stop; - __asan_register_globals(globals_start, globals_stop - globals_start); - *flag = 1; -} - -void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) { - if (!*flag) return; - if (!start) return; - CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); - __asan_global *globals_start = (__asan_global*)start; - __asan_global *globals_stop = (__asan_global*)stop; - __asan_unregister_globals(globals_start, globals_stop - globals_start); - *flag = 0; -} - -// Register an array of globals. -void __asan_register_globals(__asan_global *globals, uptr n) { - if (!flags()->report_globals) return; - GET_STACK_TRACE_MALLOC; - u32 stack_id = StackDepotPut(stack); - BlockingMutexLock lock(&mu_for_globals); - if (!global_registration_site_vector) { - global_registration_site_vector = - new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT - global_registration_site_vector->reserve(128); - } - GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; - global_registration_site_vector->push_back(site); - if (flags()->report_globals >= 2) { - PRINT_CURRENT_STACK(); - Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]); - } - for (uptr i = 0; i < n; i++) { - if (SANITIZER_WINDOWS && globals[i].beg == 0) { - // The MSVC incremental linker may pad globals out to 256 bytes. As long - // as __asan_global is less than 256 bytes large and its size is a power - // of two, we can skip over the padding. - static_assert( - sizeof(__asan_global) < 256 && - (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0, - "sizeof(__asan_global) incompatible with incremental linker padding"); - // If these are padding bytes, the rest of the global should be zero. - CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 && - globals[i].name == nullptr && globals[i].module_name == nullptr && - globals[i].odr_indicator == 0); - continue; - } - RegisterGlobal(&globals[i]); - } - - // Poison the metadata. It should not be accessible to user code. - PoisonShadow(reinterpret_cast(globals), n * sizeof(__asan_global), - kAsanGlobalRedzoneMagic); -} - -// Unregister an array of globals. -// We must do this when a shared objects gets dlclosed. -void __asan_unregister_globals(__asan_global *globals, uptr n) { - if (!flags()->report_globals) return; - BlockingMutexLock lock(&mu_for_globals); - for (uptr i = 0; i < n; i++) { - if (SANITIZER_WINDOWS && globals[i].beg == 0) { - // Skip globals that look like padding from the MSVC incremental linker. - // See comment in __asan_register_globals. - continue; - } - UnregisterGlobal(&globals[i]); - } - - // Unpoison the metadata. - PoisonShadow(reinterpret_cast(globals), n * sizeof(__asan_global), 0); -} - -// This method runs immediately prior to dynamic initialization in each TU, -// when all dynamically initialized globals are unpoisoned. This method -// poisons all global variables not defined in this TU, so that a dynamic -// initializer can only touch global variables in the same TU. -void __asan_before_dynamic_init(const char *module_name) { - if (!flags()->check_initialization_order || - !CanPoisonMemory() || - !dynamic_init_globals) - return; - bool strict_init_order = flags()->strict_init_order; - CHECK(module_name); - CHECK(asan_inited); - BlockingMutexLock lock(&mu_for_globals); - if (flags()->report_globals >= 3) - Printf("DynInitPoison module: %s\n", module_name); - for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { - DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; - const Global *g = &dyn_g.g; - if (dyn_g.initialized) - continue; - if (g->module_name != module_name) - PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); - else if (!strict_init_order) - dyn_g.initialized = true; - } -} - -// This method runs immediately after dynamic initialization in each TU, when -// all dynamically initialized globals except for those defined in the current -// TU are poisoned. It simply unpoisons all dynamically initialized globals. -void __asan_after_dynamic_init() { - if (!flags()->check_initialization_order || - !CanPoisonMemory() || - !dynamic_init_globals) - return; - CHECK(asan_inited); - BlockingMutexLock lock(&mu_for_globals); - // FIXME: Optionally report that we're unpoisoning globals from a module. - for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { - DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; - const Global *g = &dyn_g.g; - if (!dyn_g.initialized) { - // Unpoison the whole global. - PoisonShadowForGlobal(g, 0); - // Poison redzones back. - PoisonRedZones(*g); - } - } -} diff --git a/lib/asan/asan_globals.cpp b/lib/asan/asan_globals.cpp new file mode 100644 index 000000000000..9d7dbc6f264c --- /dev/null +++ b/lib/asan/asan_globals.cpp @@ -0,0 +1,463 @@ +//===-- asan_globals.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Handle globals. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +typedef __asan_global Global; + +struct ListOfGlobals { + const Global *g; + ListOfGlobals *next; +}; + +static BlockingMutex mu_for_globals(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_globals; +static ListOfGlobals *list_of_all_globals; + +static const int kDynamicInitGlobalsInitialCapacity = 512; +struct DynInitGlobal { + Global g; + bool initialized; +}; +typedef InternalMmapVector VectorOfGlobals; +// Lazy-initialized and never deleted. +static VectorOfGlobals *dynamic_init_globals; + +// We want to remember where a certain range of globals was registered. +struct GlobalRegistrationSite { + u32 stack_id; + Global *g_first, *g_last; +}; +typedef InternalMmapVector GlobalRegistrationSiteVector; +static GlobalRegistrationSiteVector *global_registration_site_vector; + +ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { + FastPoisonShadow(g->beg, g->size_with_redzone, value); +} + +ALWAYS_INLINE void PoisonRedZones(const Global &g) { + uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); + FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, + kAsanGlobalRedzoneMagic); + if (g.size != aligned_size) { + FastPoisonShadowPartialRightRedzone( + g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), + g.size % SHADOW_GRANULARITY, + SHADOW_GRANULARITY, + kAsanGlobalRedzoneMagic); + } +} + +const uptr kMinimalDistanceFromAnotherGlobal = 64; + +static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { + if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; + if (addr >= g.beg + g.size_with_redzone) return false; + return true; +} + +static void ReportGlobal(const Global &g, const char *prefix) { + Report( + "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu " + "odr_indicator=%p\n", + prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name, + g.module_name, g.has_dynamic_init, (void *)g.odr_indicator); + if (g.location) { + Report(" location (%p): name=%s[%p], %d %d\n", g.location, + g.location->filename, g.location->filename, g.location->line_no, + g.location->column_no); + } +} + +static u32 FindRegistrationSite(const Global *g) { + mu_for_globals.CheckLocked(); + CHECK(global_registration_site_vector); + for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { + GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; + if (g >= grs.g_first && g <= grs.g_last) + return grs.stack_id; + } + return 0; +} + +int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, + int max_globals) { + if (!flags()->report_globals) return 0; + BlockingMutexLock lock(&mu_for_globals); + int res = 0; + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + const Global &g = *l->g; + if (flags()->report_globals >= 2) + ReportGlobal(g, "Search"); + if (IsAddressNearGlobal(addr, g)) { + internal_memcpy(&globals[res], &g, sizeof(g)); + if (reg_sites) + reg_sites[res] = FindRegistrationSite(&g); + res++; + if (res == max_globals) + break; + } + } + return res; +} + +enum GlobalSymbolState { + UNREGISTERED = 0, + REGISTERED = 1 +}; + +// Check ODR violation for given global G via special ODR indicator. We use +// this method in case compiler instruments global variables through their +// local aliases. +static void CheckODRViolationViaIndicator(const Global *g) { + // Instrumentation requests to skip ODR check. + if (g->odr_indicator == UINTPTR_MAX) + return; + u8 *odr_indicator = reinterpret_cast(g->odr_indicator); + if (*odr_indicator == UNREGISTERED) { + *odr_indicator = REGISTERED; + return; + } + // If *odr_indicator is DEFINED, some module have already registered + // externally visible symbol with the same name. This is an ODR violation. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->odr_indicator == l->g->odr_indicator && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } +} + +// Check ODR violation for given global G by checking if it's already poisoned. +// We use this method in case compiler doesn't use private aliases for global +// variables. +static void CheckODRViolationViaPoisoning(const Global *g) { + if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { + // This check may not be enough: if the first global is much larger + // the entire redzone of the second global may be within the first global. + for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { + if (g->beg == l->g->beg && + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) + ReportODRViolation(g, FindRegistrationSite(g), + l->g, FindRegistrationSite(l->g)); + } + } +} + +// Clang provides two different ways for global variables protection: +// it can poison the global itself or its private alias. In former +// case we may poison same symbol multiple times, that can help us to +// cheaply detect ODR violation: if we try to poison an already poisoned +// global, we have ODR violation error. +// In latter case, we poison each symbol exactly once, so we use special +// indicator symbol to perform similar check. +// In either case, compiler provides a special odr_indicator field to Global +// structure, that can contain two kinds of values: +// 1) Non-zero value. In this case, odr_indicator is an address of +// corresponding indicator variable for given global. +// 2) Zero. This means that we don't use private aliases for global variables +// and can freely check ODR violation with the first method. +// +// This routine chooses between two different methods of ODR violation +// detection. +static inline bool UseODRIndicator(const Global *g) { + return g->odr_indicator > 0; +} + +// Register a global variable. +// This function may be called more than once for every global +// so we store the globals in a map. +static void RegisterGlobal(const Global *g) { + CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Added"); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + if (!AddrIsAlignedByGranularity(g->beg)) { + Report("The following global variable is not properly aligned.\n"); + Report("This may happen if another global with the same name\n"); + Report("resides in another non-instrumented module.\n"); + Report("Or the global comes from a C file built w/o -fno-common.\n"); + Report("In either case this is likely an ODR violation bug,\n"); + Report("but AddressSanitizer can not provide more details.\n"); + ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + } + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (flags()->detect_odr_violation) { + // Try detecting ODR (One Definition Rule) violation, i.e. the situation + // where two globals with the same name are defined in different modules. + if (UseODRIndicator(g)) + CheckODRViolationViaIndicator(g); + else + CheckODRViolationViaPoisoning(g); + } + if (CanPoisonMemory()) + PoisonRedZones(*g); + ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; + l->g = g; + l->next = list_of_all_globals; + list_of_all_globals = l; + if (g->has_dynamic_init) { + if (!dynamic_init_globals) { + dynamic_init_globals = new (allocator_for_globals) VectorOfGlobals; + dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity); + } + DynInitGlobal dyn_global = { *g, false }; + dynamic_init_globals->push_back(dyn_global); + } +} + +static void UnregisterGlobal(const Global *g) { + CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Removed"); + CHECK(flags()->report_globals); + CHECK(AddrIsInMem(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->beg)); + CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); + if (CanPoisonMemory()) + PoisonShadowForGlobal(g, 0); + // We unpoison the shadow memory for the global but we do not remove it from + // the list because that would require O(n^2) time with the current list + // implementation. It might not be worth doing anyway. + + // Release ODR indicator. + if (UseODRIndicator(g) && g->odr_indicator != UINTPTR_MAX) { + u8 *odr_indicator = reinterpret_cast(g->odr_indicator); + *odr_indicator = UNREGISTERED; + } +} + +void StopInitOrderChecking() { + BlockingMutexLock lock(&mu_for_globals); + if (!flags()->check_initialization_order || !dynamic_init_globals) + return; + flags()->check_initialization_order = false; + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } +} + +static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; } + +const char *MaybeDemangleGlobalName(const char *name) { + // We can spoil names of globals with C linkage, so use an heuristic + // approach to check if the name should be demangled. + bool should_demangle = false; + if (name[0] == '_' && name[1] == 'Z') + should_demangle = true; + else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?') + should_demangle = true; + + return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name; +} + +// Check if the global is a zero-terminated ASCII string. If so, print it. +void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) { + for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { + unsigned char c = *(unsigned char *)p; + if (c == '\0' || !IsASCII(c)) return; + } + if (*(char *)(g.beg + g.size - 1) != '\0') return; + str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name), + (char *)g.beg); +} + +static const char *GlobalFilename(const __asan_global &g) { + const char *res = g.module_name; + // Prefer the filename from source location, if is available. + if (g.location) res = g.location->filename; + CHECK(res); + return res; +} + +void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) { + str->append("%s", GlobalFilename(g)); + if (!g.location) return; + if (g.location->line_no) str->append(":%d", g.location->line_no); + if (g.location->column_no) str->append(":%d", g.location->column_no); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +// Apply __asan_register_globals to all globals found in the same loaded +// executable or shared library as `flag'. The flag tracks whether globals have +// already been registered or not for this image. +void __asan_register_image_globals(uptr *flag) { + if (*flag) + return; + AsanApplyToGlobals(__asan_register_globals, flag); + *flag = 1; +} + +// This mirrors __asan_register_image_globals. +void __asan_unregister_image_globals(uptr *flag) { + if (!*flag) + return; + AsanApplyToGlobals(__asan_unregister_globals, flag); + *flag = 0; +} + +void __asan_register_elf_globals(uptr *flag, void *start, void *stop) { + if (*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_register_globals(globals_start, globals_stop - globals_start); + *flag = 1; +} + +void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) { + if (!*flag) return; + if (!start) return; + CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global)); + __asan_global *globals_start = (__asan_global*)start; + __asan_global *globals_stop = (__asan_global*)stop; + __asan_unregister_globals(globals_start, globals_stop - globals_start); + *flag = 0; +} + +// Register an array of globals. +void __asan_register_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + GET_STACK_TRACE_MALLOC; + u32 stack_id = StackDepotPut(stack); + BlockingMutexLock lock(&mu_for_globals); + if (!global_registration_site_vector) { + global_registration_site_vector = + new (allocator_for_globals) GlobalRegistrationSiteVector; + global_registration_site_vector->reserve(128); + } + GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; + global_registration_site_vector->push_back(site); + if (flags()->report_globals >= 2) { + PRINT_CURRENT_STACK(); + Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]); + } + for (uptr i = 0; i < n; i++) { + if (SANITIZER_WINDOWS && globals[i].beg == 0) { + // The MSVC incremental linker may pad globals out to 256 bytes. As long + // as __asan_global is less than 256 bytes large and its size is a power + // of two, we can skip over the padding. + static_assert( + sizeof(__asan_global) < 256 && + (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0, + "sizeof(__asan_global) incompatible with incremental linker padding"); + // If these are padding bytes, the rest of the global should be zero. + CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 && + globals[i].name == nullptr && globals[i].module_name == nullptr && + globals[i].odr_indicator == 0); + continue; + } + RegisterGlobal(&globals[i]); + } + + // Poison the metadata. It should not be accessible to user code. + PoisonShadow(reinterpret_cast(globals), n * sizeof(__asan_global), + kAsanGlobalRedzoneMagic); +} + +// Unregister an array of globals. +// We must do this when a shared objects gets dlclosed. +void __asan_unregister_globals(__asan_global *globals, uptr n) { + if (!flags()->report_globals) return; + BlockingMutexLock lock(&mu_for_globals); + for (uptr i = 0; i < n; i++) { + if (SANITIZER_WINDOWS && globals[i].beg == 0) { + // Skip globals that look like padding from the MSVC incremental linker. + // See comment in __asan_register_globals. + continue; + } + UnregisterGlobal(&globals[i]); + } + + // Unpoison the metadata. + PoisonShadow(reinterpret_cast(globals), n * sizeof(__asan_global), 0); +} + +// This method runs immediately prior to dynamic initialization in each TU, +// when all dynamically initialized globals are unpoisoned. This method +// poisons all global variables not defined in this TU, so that a dynamic +// initializer can only touch global variables in the same TU. +void __asan_before_dynamic_init(const char *module_name) { + if (!flags()->check_initialization_order || + !CanPoisonMemory() || + !dynamic_init_globals) + return; + bool strict_init_order = flags()->strict_init_order; + CHECK(module_name); + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + if (flags()->report_globals >= 3) + Printf("DynInitPoison module: %s\n", module_name); + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (dyn_g.initialized) + continue; + if (g->module_name != module_name) + PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); + else if (!strict_init_order) + dyn_g.initialized = true; + } +} + +// This method runs immediately after dynamic initialization in each TU, when +// all dynamically initialized globals except for those defined in the current +// TU are poisoned. It simply unpoisons all dynamically initialized globals. +void __asan_after_dynamic_init() { + if (!flags()->check_initialization_order || + !CanPoisonMemory() || + !dynamic_init_globals) + return; + CHECK(asan_inited); + BlockingMutexLock lock(&mu_for_globals); + // FIXME: Optionally report that we're unpoisoning globals from a module. + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (!dyn_g.initialized) { + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } + } +} diff --git a/lib/asan/asan_globals_win.cc b/lib/asan/asan_globals_win.cc deleted file mode 100644 index bdce37f701e1..000000000000 --- a/lib/asan/asan_globals_win.cc +++ /dev/null @@ -1,61 +0,0 @@ -//===-- asan_globals_win.cc -----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Global registration code that is linked into every Windows DLL and EXE. -// -//===----------------------------------------------------------------------===// - -#include "asan_interface_internal.h" -#if SANITIZER_WINDOWS - -namespace __asan { - -#pragma section(".ASAN$GA", read, write) // NOLINT -#pragma section(".ASAN$GZ", read, write) // NOLINT -extern "C" __declspec(allocate(".ASAN$GA")) - ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {}; -extern "C" __declspec(allocate(".ASAN$GZ")) - ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {}; -#pragma comment(linker, "/merge:.ASAN=.data") - -static void call_on_globals(void (*hook)(__asan_global *, uptr)) { - __asan_global *start = &__asan_globals_start + 1; - __asan_global *end = &__asan_globals_end; - uptr bytediff = (uptr)end - (uptr)start; - if (bytediff % sizeof(__asan_global) != 0) { -#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) - __debugbreak(); -#else - CHECK("corrupt asan global array"); -#endif - } - // We know end >= start because the linker sorts the portion after the dollar - // sign alphabetically. - uptr n = end - start; - hook(start, n); -} - -static void register_dso_globals() { - call_on_globals(&__asan_register_globals); -} - -static void unregister_dso_globals() { - call_on_globals(&__asan_unregister_globals); -} - -// Register globals -#pragma section(".CRT$XCU", long, read) // NOLINT -#pragma section(".CRT$XTX", long, read) // NOLINT -extern "C" __declspec(allocate(".CRT$XCU")) -void (*const __asan_dso_reg_hook)() = ®ister_dso_globals; -extern "C" __declspec(allocate(".CRT$XTX")) -void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals; - -} // namespace __asan - -#endif // SANITIZER_WINDOWS diff --git a/lib/asan/asan_globals_win.cpp b/lib/asan/asan_globals_win.cpp new file mode 100644 index 000000000000..19af88ab12b4 --- /dev/null +++ b/lib/asan/asan_globals_win.cpp @@ -0,0 +1,61 @@ +//===-- asan_globals_win.cpp ----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Global registration code that is linked into every Windows DLL and EXE. +// +//===----------------------------------------------------------------------===// + +#include "asan_interface_internal.h" +#if SANITIZER_WINDOWS + +namespace __asan { + +#pragma section(".ASAN$GA", read, write) +#pragma section(".ASAN$GZ", read, write) +extern "C" __declspec(allocate(".ASAN$GA")) + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {}; +extern "C" __declspec(allocate(".ASAN$GZ")) + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {}; +#pragma comment(linker, "/merge:.ASAN=.data") + +static void call_on_globals(void (*hook)(__asan_global *, uptr)) { + __asan_global *start = &__asan_globals_start + 1; + __asan_global *end = &__asan_globals_end; + uptr bytediff = (uptr)end - (uptr)start; + if (bytediff % sizeof(__asan_global) != 0) { +#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK) + __debugbreak(); +#else + CHECK("corrupt asan global array"); +#endif + } + // We know end >= start because the linker sorts the portion after the dollar + // sign alphabetically. + uptr n = end - start; + hook(start, n); +} + +static void register_dso_globals() { + call_on_globals(&__asan_register_globals); +} + +static void unregister_dso_globals() { + call_on_globals(&__asan_unregister_globals); +} + +// Register globals +#pragma section(".CRT$XCU", long, read) +#pragma section(".CRT$XTX", long, read) +extern "C" __declspec(allocate(".CRT$XCU")) +void (*const __asan_dso_reg_hook)() = ®ister_dso_globals; +extern "C" __declspec(allocate(".CRT$XTX")) +void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals; + +} // namespace __asan + +#endif // SANITIZER_WINDOWS diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc deleted file mode 100644 index 7eea7d2f942b..000000000000 --- a/lib/asan/asan_interceptors.cc +++ /dev/null @@ -1,675 +0,0 @@ -//===-- asan_interceptors.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Intercept various libc functions. -//===----------------------------------------------------------------------===// - -#include "asan_interceptors.h" -#include "asan_allocator.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_stats.h" -#include "asan_suppressions.h" -#include "lsan/lsan_common.h" -#include "sanitizer_common/sanitizer_libc.h" - -// There is no general interception at all on Fuchsia and RTEMS. -// Only the functions in asan_interceptors_memintrinsics.cc are -// really defined to replace libc functions. -#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS - -#if SANITIZER_POSIX -#include "sanitizer_common/sanitizer_posix.h" -#endif - -#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \ - ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION -#include -#endif - -#if defined(__i386) && SANITIZER_LINUX -#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" -#elif defined(__mips__) && SANITIZER_LINUX -#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" -#endif - -namespace __asan { - -#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ - ASAN_READ_RANGE((ctx), (s), \ - common_flags()->strict_string_checks ? (len) + 1 : (n)) - -#define ASAN_READ_STRING(ctx, s, n) \ - ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) - -static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { -#if SANITIZER_INTERCEPT_STRNLEN - if (REAL(strnlen)) { - return REAL(strnlen)(s, maxlen); - } -#endif - return internal_strnlen(s, maxlen); -} - -void SetThreadName(const char *name) { - AsanThread *t = GetCurrentThread(); - if (t) - asanThreadRegistry().SetThreadName(t->tid(), name); -} - -int OnExit() { - if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks && - __lsan::HasReportedLeaks()) { - return common_flags()->exitcode; - } - // FIXME: ask frontend whether we need to return failure. - return 0; -} - -} // namespace __asan - -// ---------------------- Wrappers ---------------- {{{1 -using namespace __asan; // NOLINT - -DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) -DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) - -#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ - AsanInterceptorContext _ctx = {#func}; \ - ctx = (void *)&_ctx; \ - (void) ctx; \ - -#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) -#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ - ASAN_INTERCEPT_FUNC_VER(name, ver) -#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ - ASAN_WRITE_RANGE(ctx, ptr, size) -#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ - ASAN_READ_RANGE(ctx, ptr, size) -#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ - ASAN_INTERCEPTOR_ENTER(ctx, func); \ - do { \ - if (asan_init_is_running) \ - return REAL(func)(__VA_ARGS__); \ - if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ - return REAL(func)(__VA_ARGS__); \ - ENSURE_ASAN_INITED(); \ - } while (false) -#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ - do { \ - } while (false) -#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ - do { \ - } while (false) -#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ - do { \ - } while (false) -#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ - do { \ - } while (false) -#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) -// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) -// But asan does not remember UserId's for threads (pthread_t); -// and remembers all ever existed threads, so the linear search by UserId -// can be slow. -#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ - do { \ - } while (false) -#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) -// Strict init-order checking is dlopen-hostile: -// https://github.com/google/sanitizers/issues/178 -#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ - do { \ - if (flags()->strict_init_order) \ - StopInitOrderChecking(); \ - CheckNoDeepBind(filename, flag); \ - } while (false) -#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) -#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() -#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) -#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ - if (AsanThread *t = GetCurrentThread()) { \ - *begin = t->tls_begin(); \ - *end = t->tls_end(); \ - } else { \ - *begin = *end = 0; \ - } - -#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ - do { \ - ASAN_INTERCEPTOR_ENTER(ctx, memmove); \ - ASAN_MEMMOVE_IMPL(ctx, to, from, size); \ - } while (false) - -#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ - do { \ - ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \ - ASAN_MEMCPY_IMPL(ctx, to, from, size); \ - } while (false) - -#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ - do { \ - ASAN_INTERCEPTOR_ENTER(ctx, memset); \ - ASAN_MEMSET_IMPL(ctx, block, c, size); \ - } while (false) - -#include "sanitizer_common/sanitizer_common_interceptors.inc" -#include "sanitizer_common/sanitizer_signal_interceptors.inc" - -// Syscall interceptors don't have contexts, we don't support suppressions -// for them. -#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) -#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) -#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ - do { \ - (void)(p); \ - (void)(s); \ - } while (false) -#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ - do { \ - (void)(p); \ - (void)(s); \ - } while (false) -#include "sanitizer_common/sanitizer_common_syscalls.inc" -#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" - -struct ThreadStartParam { - atomic_uintptr_t t; - atomic_uintptr_t is_registered; -}; - -#if ASAN_INTERCEPT_PTHREAD_CREATE -static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { - ThreadStartParam *param = reinterpret_cast(arg); - AsanThread *t = nullptr; - while ((t = reinterpret_cast( - atomic_load(¶m->t, memory_order_acquire))) == nullptr) - internal_sched_yield(); - SetCurrentThread(t); - return t->ThreadStart(GetTid(), ¶m->is_registered); -} - -INTERCEPTOR(int, pthread_create, void *thread, - void *attr, void *(*start_routine)(void*), void *arg) { - EnsureMainThreadIDIsCorrect(); - // Strict init-order checking is thread-hostile. - if (flags()->strict_init_order) - StopInitOrderChecking(); - GET_STACK_TRACE_THREAD; - int detached = 0; - if (attr) - REAL(pthread_attr_getdetachstate)(attr, &detached); - ThreadStartParam param; - atomic_store(¶m.t, 0, memory_order_relaxed); - atomic_store(¶m.is_registered, 0, memory_order_relaxed); - int result; - { - // Ignore all allocations made by pthread_create: thread stack/TLS may be - // stored by pthread for future reuse even after thread destruction, and - // the linked list it's stored in doesn't even hold valid pointers to the - // objects, the latter are calculated by obscure pointer arithmetic. -#if CAN_SANITIZE_LEAKS - __lsan::ScopedInterceptorDisabler disabler; -#endif - result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); - } - if (result == 0) { - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = - AsanThread::Create(start_routine, arg, current_tid, &stack, detached); - atomic_store(¶m.t, reinterpret_cast(t), memory_order_release); - // Wait until the AsanThread object is initialized and the ThreadRegistry - // entry is in "started" state. One reason for this is that after this - // interceptor exits, the child thread's stack may be the only thing holding - // the |arg| pointer. This may cause LSan to report a leak if leak checking - // happens at a point when the interceptor has already exited, but the stack - // range for the child thread is not yet known. - while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) - internal_sched_yield(); - } - return result; -} - -INTERCEPTOR(int, pthread_join, void *t, void **arg) { - return real_pthread_join(t, arg); -} - -DEFINE_REAL_PTHREAD_FUNCTIONS -#endif // ASAN_INTERCEPT_PTHREAD_CREATE - -#if ASAN_INTERCEPT_SWAPCONTEXT -static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { - // Align to page size. - uptr PageSize = GetPageSizeCached(); - uptr bottom = stack & ~(PageSize - 1); - ssize += stack - bottom; - ssize = RoundUpTo(ssize, PageSize); - static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb - if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { - PoisonShadow(bottom, ssize, 0); - } -} - -INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, - struct ucontext_t *ucp) { - static bool reported_warning = false; - if (!reported_warning) { - Report("WARNING: ASan doesn't fully support makecontext/swapcontext " - "functions and may produce false positives in some cases!\n"); - reported_warning = true; - } - // Clear shadow memory for new context (it may share stack - // with current context). - uptr stack, ssize; - ReadContextStack(ucp, &stack, &ssize); - ClearShadowMemoryForContextStack(stack, ssize); -#if __has_attribute(__indirect_return__) && \ - (defined(__x86_64__) || defined(__i386__)) - int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *) - __attribute__((__indirect_return__)) - = REAL(swapcontext); - int res = real_swapcontext(oucp, ucp); -#else - int res = REAL(swapcontext)(oucp, ucp); -#endif - // swapcontext technically does not return, but program may swap context to - // "oucp" later, that would look as if swapcontext() returned 0. - // We need to clear shadow for ucp once again, as it may be in arbitrary - // state. - ClearShadowMemoryForContextStack(stack, ssize); - return res; -} -#endif // ASAN_INTERCEPT_SWAPCONTEXT - -#if SANITIZER_NETBSD -#define longjmp __longjmp14 -#define siglongjmp __siglongjmp14 -#endif - -INTERCEPTOR(void, longjmp, void *env, int val) { - __asan_handle_no_return(); - REAL(longjmp)(env, val); -} - -#if ASAN_INTERCEPT__LONGJMP -INTERCEPTOR(void, _longjmp, void *env, int val) { - __asan_handle_no_return(); - REAL(_longjmp)(env, val); -} -#endif - -#if ASAN_INTERCEPT___LONGJMP_CHK -INTERCEPTOR(void, __longjmp_chk, void *env, int val) { - __asan_handle_no_return(); - REAL(__longjmp_chk)(env, val); -} -#endif - -#if ASAN_INTERCEPT_SIGLONGJMP -INTERCEPTOR(void, siglongjmp, void *env, int val) { - __asan_handle_no_return(); - REAL(siglongjmp)(env, val); -} -#endif - -#if ASAN_INTERCEPT___CXA_THROW -INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { - CHECK(REAL(__cxa_throw)); - __asan_handle_no_return(); - REAL(__cxa_throw)(a, b, c); -} -#endif - -#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION -INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) { - CHECK(REAL(__cxa_rethrow_primary_exception)); - __asan_handle_no_return(); - REAL(__cxa_rethrow_primary_exception)(a); -} -#endif - -#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION -INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException, - _Unwind_Exception *object) { - CHECK(REAL(_Unwind_RaiseException)); - __asan_handle_no_return(); - return REAL(_Unwind_RaiseException)(object); -} -#endif - -#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION -INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException, - _Unwind_Exception *object) { - CHECK(REAL(_Unwind_SjLj_RaiseException)); - __asan_handle_no_return(); - return REAL(_Unwind_SjLj_RaiseException)(object); -} -#endif - -#if ASAN_INTERCEPT_INDEX -# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX -INTERCEPTOR(char*, index, const char *string, int c) - ALIAS(WRAPPER_NAME(strchr)); -# else -# if SANITIZER_MAC -DECLARE_REAL(char*, index, const char *string, int c) -OVERRIDE_FUNCTION(index, strchr); -# else -DEFINE_REAL(char*, index, const char *string, int c) -# endif -# endif -#endif // ASAN_INTERCEPT_INDEX - -// For both strcat() and strncat() we need to check the validity of |to| -// argument irrespective of the |from| length. -INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT - ENSURE_ASAN_INITED(); - if (flags()->replace_str) { - uptr from_length = REAL(strlen)(from); - ASAN_READ_RANGE(ctx, from, from_length + 1); - uptr to_length = REAL(strlen)(to); - ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); - ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); - // If the copying actually happens, the |from| string should not overlap - // with the resulting string starting at |to|, which has a length of - // to_length + from_length + 1. - if (from_length > 0) { - CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, - from, from_length + 1); - } - } - return REAL(strcat)(to, from); // NOLINT -} - -INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strncat); - ENSURE_ASAN_INITED(); - if (flags()->replace_str) { - uptr from_length = MaybeRealStrnlen(from, size); - uptr copy_length = Min(size, from_length + 1); - ASAN_READ_RANGE(ctx, from, copy_length); - uptr to_length = REAL(strlen)(to); - ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); - ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); - if (from_length > 0) { - CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, - from, copy_length); - } - } - return REAL(strncat)(to, from, size); -} - -INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT -#if SANITIZER_MAC - if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT -#endif - // strcpy is called from malloc_default_purgeable_zone() - // in __asan::ReplaceSystemAlloc() on Mac. - if (asan_init_is_running) { - return REAL(strcpy)(to, from); // NOLINT - } - ENSURE_ASAN_INITED(); - if (flags()->replace_str) { - uptr from_size = REAL(strlen)(from) + 1; - CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); - ASAN_READ_RANGE(ctx, from, from_size); - ASAN_WRITE_RANGE(ctx, to, from_size); - } - return REAL(strcpy)(to, from); // NOLINT -} - -INTERCEPTOR(char*, strdup, const char *s) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strdup); - if (UNLIKELY(!asan_inited)) return internal_strdup(s); - ENSURE_ASAN_INITED(); - uptr length = REAL(strlen)(s); - if (flags()->replace_str) { - ASAN_READ_RANGE(ctx, s, length + 1); - } - GET_STACK_TRACE_MALLOC; - void *new_mem = asan_malloc(length + 1, &stack); - REAL(memcpy)(new_mem, s, length + 1); - return reinterpret_cast(new_mem); -} - -#if ASAN_INTERCEPT___STRDUP -INTERCEPTOR(char*, __strdup, const char *s) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strdup); - if (UNLIKELY(!asan_inited)) return internal_strdup(s); - ENSURE_ASAN_INITED(); - uptr length = REAL(strlen)(s); - if (flags()->replace_str) { - ASAN_READ_RANGE(ctx, s, length + 1); - } - GET_STACK_TRACE_MALLOC; - void *new_mem = asan_malloc(length + 1, &stack); - REAL(memcpy)(new_mem, s, length + 1); - return reinterpret_cast(new_mem); -} -#endif // ASAN_INTERCEPT___STRDUP - -INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strncpy); - ENSURE_ASAN_INITED(); - if (flags()->replace_str) { - uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); - CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); - ASAN_READ_RANGE(ctx, from, from_size); - ASAN_WRITE_RANGE(ctx, to, size); - } - return REAL(strncpy)(to, from, size); -} - -INTERCEPTOR(long, strtol, const char *nptr, // NOLINT - char **endptr, int base) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strtol); - ENSURE_ASAN_INITED(); - if (!flags()->replace_str) { - return REAL(strtol)(nptr, endptr, base); - } - char *real_endptr; - long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT - StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); - return result; -} - -INTERCEPTOR(int, atoi, const char *nptr) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, atoi); -#if SANITIZER_MAC - if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); -#endif - ENSURE_ASAN_INITED(); - if (!flags()->replace_str) { - return REAL(atoi)(nptr); - } - char *real_endptr; - // "man atoi" tells that behavior of atoi(nptr) is the same as - // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the - // parsed integer can't be stored in *long* type (even if it's - // different from int). So, we just imitate this behavior. - int result = REAL(strtol)(nptr, &real_endptr, 10); - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); - return result; -} - -INTERCEPTOR(long, atol, const char *nptr) { // NOLINT - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, atol); -#if SANITIZER_MAC - if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); -#endif - ENSURE_ASAN_INITED(); - if (!flags()->replace_str) { - return REAL(atol)(nptr); - } - char *real_endptr; - long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); - return result; -} - -#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL -INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT - char **endptr, int base) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, strtoll); - ENSURE_ASAN_INITED(); - if (!flags()->replace_str) { - return REAL(strtoll)(nptr, endptr, base); - } - char *real_endptr; - long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT - StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); - return result; -} - -INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, atoll); - ENSURE_ASAN_INITED(); - if (!flags()->replace_str) { - return REAL(atoll)(nptr); - } - char *real_endptr; - long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); - return result; -} -#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL - -#if ASAN_INTERCEPT___CXA_ATEXIT -static void AtCxaAtexit(void *unused) { - (void)unused; - StopInitOrderChecking(); -} - -INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, - void *dso_handle) { -#if SANITIZER_MAC - if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); -#endif - ENSURE_ASAN_INITED(); - int res = REAL(__cxa_atexit)(func, arg, dso_handle); - REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); - return res; -} -#endif // ASAN_INTERCEPT___CXA_ATEXIT - -#if ASAN_INTERCEPT_VFORK -DEFINE_REAL(int, vfork) -DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork) -#endif - -// ---------------------- InitializeAsanInterceptors ---------------- {{{1 -namespace __asan { -void InitializeAsanInterceptors() { - static bool was_called_once; - CHECK(!was_called_once); - was_called_once = true; - InitializeCommonInterceptors(); - InitializeSignalInterceptors(); - - // Intercept str* functions. - ASAN_INTERCEPT_FUNC(strcat); // NOLINT - ASAN_INTERCEPT_FUNC(strcpy); // NOLINT - ASAN_INTERCEPT_FUNC(strncat); - ASAN_INTERCEPT_FUNC(strncpy); - ASAN_INTERCEPT_FUNC(strdup); -#if ASAN_INTERCEPT___STRDUP - ASAN_INTERCEPT_FUNC(__strdup); -#endif -#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX - ASAN_INTERCEPT_FUNC(index); -#endif - - ASAN_INTERCEPT_FUNC(atoi); - ASAN_INTERCEPT_FUNC(atol); - ASAN_INTERCEPT_FUNC(strtol); -#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL - ASAN_INTERCEPT_FUNC(atoll); - ASAN_INTERCEPT_FUNC(strtoll); -#endif - - // Intecept jump-related functions. - ASAN_INTERCEPT_FUNC(longjmp); - -#if ASAN_INTERCEPT_SWAPCONTEXT - ASAN_INTERCEPT_FUNC(swapcontext); -#endif -#if ASAN_INTERCEPT__LONGJMP - ASAN_INTERCEPT_FUNC(_longjmp); -#endif -#if ASAN_INTERCEPT___LONGJMP_CHK - ASAN_INTERCEPT_FUNC(__longjmp_chk); -#endif -#if ASAN_INTERCEPT_SIGLONGJMP - ASAN_INTERCEPT_FUNC(siglongjmp); -#endif - - // Intercept exception handling functions. -#if ASAN_INTERCEPT___CXA_THROW - ASAN_INTERCEPT_FUNC(__cxa_throw); -#endif -#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION - ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception); -#endif - // Indirectly intercept std::rethrow_exception. -#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION - INTERCEPT_FUNCTION(_Unwind_RaiseException); -#endif - // Indirectly intercept std::rethrow_exception. -#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION - INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException); -#endif - - // Intercept threading-related functions -#if ASAN_INTERCEPT_PTHREAD_CREATE -#if defined(ASAN_PTHREAD_CREATE_VERSION) - ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); -#else - ASAN_INTERCEPT_FUNC(pthread_create); -#endif - ASAN_INTERCEPT_FUNC(pthread_join); -#endif - - // Intercept atexit function. -#if ASAN_INTERCEPT___CXA_ATEXIT - ASAN_INTERCEPT_FUNC(__cxa_atexit); -#endif - -#if ASAN_INTERCEPT_VFORK - ASAN_INTERCEPT_FUNC(vfork); -#endif - - InitializePlatformInterceptors(); - - VReport(1, "AddressSanitizer: libc interceptors initialized\n"); -} - -} // namespace __asan - -#endif // !SANITIZER_FUCHSIA diff --git a/lib/asan/asan_interceptors.cpp b/lib/asan/asan_interceptors.cpp new file mode 100644 index 000000000000..b19cf25c7cd0 --- /dev/null +++ b/lib/asan/asan_interceptors.cpp @@ -0,0 +1,722 @@ +//===-- asan_interceptors.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Intercept various libc functions. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +// There is no general interception at all on Fuchsia and RTEMS. +// Only the functions in asan_interceptors_memintrinsics.cpp are +// really defined to replace libc functions. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +#if SANITIZER_POSIX +#include "sanitizer_common/sanitizer_posix.h" +#endif + +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \ + ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +#include +#endif + +#if defined(__i386) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" +#elif defined(__mips__) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" +#endif + +namespace __asan { + +#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ + ASAN_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n)) + +#define ASAN_READ_STRING(ctx, s, n) \ + ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) + +static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { +#if SANITIZER_INTERCEPT_STRNLEN + if (REAL(strnlen)) { + return REAL(strnlen)(s, maxlen); + } +#endif + return internal_strnlen(s, maxlen); +} + +void SetThreadName(const char *name) { + AsanThread *t = GetCurrentThread(); + if (t) + asanThreadRegistry().SetThreadName(t->tid(), name); +} + +int OnExit() { + if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks && + __lsan::HasReportedLeaks()) { + return common_flags()->exitcode; + } + // FIXME: ask frontend whether we need to return failure. + return 0; +} + +} // namespace __asan + +// ---------------------- Wrappers ---------------- {{{1 +using namespace __asan; + +DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) +DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) + +#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ + AsanInterceptorContext _ctx = {#func}; \ + ctx = (void *)&_ctx; \ + (void) ctx; \ + +#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) +#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ + ASAN_INTERCEPT_FUNC_VER(name, ver) +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ + ASAN_WRITE_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + ASAN_READ_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + ASAN_INTERCEPTOR_ENTER(ctx, func); \ + do { \ + if (asan_init_is_running) \ + return REAL(func)(__VA_ARGS__); \ + if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ + return REAL(func)(__VA_ARGS__); \ + ENSURE_ASAN_INITED(); \ + } while (false) +#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) +// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name) +// But asan does not remember UserId's for threads (pthread_t); +// and remembers all ever existed threads, so the linear search by UserId +// can be slow. +#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ + do { \ + } while (false) +#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) +// Strict init-order checking is dlopen-hostile: +// https://github.com/google/sanitizers/issues/178 +#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ + do { \ + if (flags()->strict_init_order) \ + StopInitOrderChecking(); \ + CheckNoDeepBind(filename, flag); \ + } while (false) +#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (AsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } + +#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memmove); \ + ASAN_MEMMOVE_IMPL(ctx, to, from, size); \ + } while (false) + +#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \ + ASAN_MEMCPY_IMPL(ctx, to, from, size); \ + } while (false) + +#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ + do { \ + ASAN_INTERCEPTOR_ENTER(ctx, memset); \ + ASAN_MEMSET_IMPL(ctx, block, c, size); \ + } while (false) + +#if CAN_SANITIZE_LEAKS +#define COMMON_INTERCEPTOR_STRERROR() \ + __lsan::ScopedInterceptorDisabler disabler +#endif + +#include "sanitizer_common/sanitizer_common_interceptors.inc" +#include "sanitizer_common/sanitizer_signal_interceptors.inc" + +// Syscall interceptors don't have contexts, we don't support suppressions +// for them. +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ + do { \ + (void)(p); \ + (void)(s); \ + } while (false) +#include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" + +struct ThreadStartParam { + atomic_uintptr_t t; + atomic_uintptr_t is_registered; +}; + +#if ASAN_INTERCEPT_PTHREAD_CREATE +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + ThreadStartParam *param = reinterpret_cast(arg); + AsanThread *t = nullptr; + while ((t = reinterpret_cast( + atomic_load(¶m->t, memory_order_acquire))) == nullptr) + internal_sched_yield(); + SetCurrentThread(t); + return t->ThreadStart(GetTid(), ¶m->is_registered); +} + +INTERCEPTOR(int, pthread_create, void *thread, + void *attr, void *(*start_routine)(void*), void *arg) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + int detached = 0; + if (attr) + REAL(pthread_attr_getdetachstate)(attr, &detached); + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + int result; + { + // Ignore all allocations made by pthread_create: thread stack/TLS may be + // stored by pthread for future reuse even after thread destruction, and + // the linked list it's stored in doesn't even hold valid pointers to the + // objects, the latter are calculated by obscure pointer arithmetic. +#if CAN_SANITIZE_LEAKS + __lsan::ScopedInterceptorDisabler disabler; +#endif + result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); + } + if (result == 0) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast(t), memory_order_release); + // Wait until the AsanThread object is initialized and the ThreadRegistry + // entry is in "started" state. One reason for this is that after this + // interceptor exits, the child thread's stack may be the only thing holding + // the |arg| pointer. This may cause LSan to report a leak if leak checking + // happens at a point when the interceptor has already exited, but the stack + // range for the child thread is not yet known. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} + +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return real_pthread_join(t, arg); +} + +DEFINE_REAL_PTHREAD_FUNCTIONS +#endif // ASAN_INTERCEPT_PTHREAD_CREATE + +#if ASAN_INTERCEPT_SWAPCONTEXT +static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { + // Align to page size. + uptr PageSize = GetPageSizeCached(); + uptr bottom = stack & ~(PageSize - 1); + ssize += stack - bottom; + ssize = RoundUpTo(ssize, PageSize); + static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb + if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { + PoisonShadow(bottom, ssize, 0); + } +} + +INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, + struct ucontext_t *ucp) { + static bool reported_warning = false; + if (!reported_warning) { + Report("WARNING: ASan doesn't fully support makecontext/swapcontext " + "functions and may produce false positives in some cases!\n"); + reported_warning = true; + } + // Clear shadow memory for new context (it may share stack + // with current context). + uptr stack, ssize; + ReadContextStack(ucp, &stack, &ssize); + ClearShadowMemoryForContextStack(stack, ssize); +#if __has_attribute(__indirect_return__) && \ + (defined(__x86_64__) || defined(__i386__)) + int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *) + __attribute__((__indirect_return__)) + = REAL(swapcontext); + int res = real_swapcontext(oucp, ucp); +#else + int res = REAL(swapcontext)(oucp, ucp); +#endif + // swapcontext technically does not return, but program may swap context to + // "oucp" later, that would look as if swapcontext() returned 0. + // We need to clear shadow for ucp once again, as it may be in arbitrary + // state. + ClearShadowMemoryForContextStack(stack, ssize); + return res; +} +#endif // ASAN_INTERCEPT_SWAPCONTEXT + +#if SANITIZER_NETBSD +#define longjmp __longjmp14 +#define siglongjmp __siglongjmp14 +#endif + +INTERCEPTOR(void, longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(longjmp)(env, val); +} + +#if ASAN_INTERCEPT__LONGJMP +INTERCEPTOR(void, _longjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(_longjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT___LONGJMP_CHK +INTERCEPTOR(void, __longjmp_chk, void *env, int val) { + __asan_handle_no_return(); + REAL(__longjmp_chk)(env, val); +} +#endif + +#if ASAN_INTERCEPT_SIGLONGJMP +INTERCEPTOR(void, siglongjmp, void *env, int val) { + __asan_handle_no_return(); + REAL(siglongjmp)(env, val); +} +#endif + +#if ASAN_INTERCEPT___CXA_THROW +INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { + CHECK(REAL(__cxa_throw)); + __asan_handle_no_return(); + REAL(__cxa_throw)(a, b, c); +} +#endif + +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION +INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) { + CHECK(REAL(__cxa_rethrow_primary_exception)); + __asan_handle_no_return(); + REAL(__cxa_rethrow_primary_exception)(a); +} +#endif + +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_RaiseException)(object); +} +#endif + +#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_SjLj_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_SjLj_RaiseException)(object); +} +#endif + +#if ASAN_INTERCEPT_INDEX +# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX +INTERCEPTOR(char*, index, const char *string, int c) + ALIAS(WRAPPER_NAME(strchr)); +# else +# if SANITIZER_MAC +DECLARE_REAL(char*, index, const char *string, int c) +OVERRIDE_FUNCTION(index, strchr); +# else +DEFINE_REAL(char*, index, const char *string, int c) +# endif +# endif +#endif // ASAN_INTERCEPT_INDEX + +// For both strcat() and strncat() we need to check the validity of |to| +// argument irrespective of the |from| length. + INTERCEPTOR(char *, strcat, char *to, const char *from) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcat); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = REAL(strlen)(from); + ASAN_READ_RANGE(ctx, from, from_length + 1); + uptr to_length = REAL(strlen)(to); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + // If the copying actually happens, the |from| string should not overlap + // with the resulting string starting at |to|, which has a length of + // to_length + from_length + 1. + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1, from, + from_length + 1); + } + } + return REAL(strcat)(to, from); + } + +INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncat); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_length = MaybeRealStrnlen(from, size); + uptr copy_length = Min(size, from_length + 1); + ASAN_READ_RANGE(ctx, from, copy_length); + uptr to_length = REAL(strlen)(to); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); + if (from_length > 0) { + CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, + from, copy_length); + } + } + return REAL(strncat)(to, from, size); +} + +INTERCEPTOR(char *, strcpy, char *to, const char *from) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcpy); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) + return REAL(strcpy)(to, from); +#endif + // strcpy is called from malloc_default_purgeable_zone() + // in __asan::ReplaceSystemAlloc() on Mac. + if (asan_init_is_running) { + return REAL(strcpy)(to, from); + } + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = REAL(strlen)(from) + 1; + CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, from_size); + } + return REAL(strcpy)(to, from); +} + +INTERCEPTOR(char*, strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); + if (UNLIKELY(!asan_inited)) return internal_strdup(s); + ENSURE_ASAN_INITED(); + uptr length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + GET_STACK_TRACE_MALLOC; + void *new_mem = asan_malloc(length + 1, &stack); + REAL(memcpy)(new_mem, s, length + 1); + return reinterpret_cast(new_mem); +} + +#if ASAN_INTERCEPT___STRDUP +INTERCEPTOR(char*, __strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); + if (UNLIKELY(!asan_inited)) return internal_strdup(s); + ENSURE_ASAN_INITED(); + uptr length = REAL(strlen)(s); + if (flags()->replace_str) { + ASAN_READ_RANGE(ctx, s, length + 1); + } + GET_STACK_TRACE_MALLOC; + void *new_mem = asan_malloc(length + 1, &stack); + REAL(memcpy)(new_mem, s, length + 1); + return reinterpret_cast(new_mem); +} +#endif // ASAN_INTERCEPT___STRDUP + +INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncpy); + ENSURE_ASAN_INITED(); + if (flags()->replace_str) { + uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); + CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size); + } + return REAL(strncpy)(to, from, size); +} + +INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtol); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtol)(nptr, endptr, base); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); + return result; +} + +INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoi)(nptr); + } + char *real_endptr; + // "man atoi" tells that behavior of atoi(nptr) is the same as + // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the + // parsed integer can't be stored in *long* type (even if it's + // different from int). So, we just imitate this behavior. + int result = REAL(strtol)(nptr, &real_endptr, 10); + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +INTERCEPTOR(long, atol, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atol); +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); +#endif + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atol)(nptr); + } + char *real_endptr; + long result = REAL(strtol)(nptr, &real_endptr, 10); + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} + +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL +INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(strtoll)(nptr, endptr, base); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); + return result; +} + +INTERCEPTOR(long long, atoll, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoll); + ENSURE_ASAN_INITED(); + if (!flags()->replace_str) { + return REAL(atoll)(nptr); + } + char *real_endptr; + long long result = REAL(strtoll)(nptr, &real_endptr, 10); + FixRealStrtolEndptr(nptr, &real_endptr); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); + return result; +} +#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL + +#if ASAN_INTERCEPT___CXA_ATEXIT || ASAN_INTERCEPT_ATEXIT +static void AtCxaAtexit(void *unused) { + (void)unused; + StopInitOrderChecking(); +} +#endif + +#if ASAN_INTERCEPT___CXA_ATEXIT +INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, + void *dso_handle) { +#if SANITIZER_MAC + if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle); +#endif + ENSURE_ASAN_INITED(); +#if CAN_SANITIZE_LEAKS + __lsan::ScopedInterceptorDisabler disabler; +#endif + int res = REAL(__cxa_atexit)(func, arg, dso_handle); + REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); + return res; +} +#endif // ASAN_INTERCEPT___CXA_ATEXIT + +#if ASAN_INTERCEPT_ATEXIT +INTERCEPTOR(int, atexit, void (*func)()) { + ENSURE_ASAN_INITED(); +#if CAN_SANITIZE_LEAKS + __lsan::ScopedInterceptorDisabler disabler; +#endif + // Avoid calling real atexit as it is unrechable on at least on Linux. + int res = REAL(__cxa_atexit)((void (*)(void *a))func, nullptr, nullptr); + REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); + return res; +} +#endif + +#if ASAN_INTERCEPT_PTHREAD_ATFORK +extern "C" { +extern int _pthread_atfork(void (*prepare)(), void (*parent)(), + void (*child)()); +}; + +INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(), + void (*child)()) { +#if CAN_SANITIZE_LEAKS + __lsan::ScopedInterceptorDisabler disabler; +#endif + // REAL(pthread_atfork) cannot be called due to symbol indirections at least + // on NetBSD + return _pthread_atfork(prepare, parent, child); +} +#endif + +#if ASAN_INTERCEPT_VFORK +DEFINE_REAL(int, vfork) +DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork) +#endif + +// ---------------------- InitializeAsanInterceptors ---------------- {{{1 +namespace __asan { +void InitializeAsanInterceptors() { + static bool was_called_once; + CHECK(!was_called_once); + was_called_once = true; + InitializeCommonInterceptors(); + InitializeSignalInterceptors(); + + // Intercept str* functions. + ASAN_INTERCEPT_FUNC(strcat); + ASAN_INTERCEPT_FUNC(strcpy); + ASAN_INTERCEPT_FUNC(strncat); + ASAN_INTERCEPT_FUNC(strncpy); + ASAN_INTERCEPT_FUNC(strdup); +#if ASAN_INTERCEPT___STRDUP + ASAN_INTERCEPT_FUNC(__strdup); +#endif +#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX + ASAN_INTERCEPT_FUNC(index); +#endif + + ASAN_INTERCEPT_FUNC(atoi); + ASAN_INTERCEPT_FUNC(atol); + ASAN_INTERCEPT_FUNC(strtol); +#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL + ASAN_INTERCEPT_FUNC(atoll); + ASAN_INTERCEPT_FUNC(strtoll); +#endif + + // Intecept jump-related functions. + ASAN_INTERCEPT_FUNC(longjmp); + +#if ASAN_INTERCEPT_SWAPCONTEXT + ASAN_INTERCEPT_FUNC(swapcontext); +#endif +#if ASAN_INTERCEPT__LONGJMP + ASAN_INTERCEPT_FUNC(_longjmp); +#endif +#if ASAN_INTERCEPT___LONGJMP_CHK + ASAN_INTERCEPT_FUNC(__longjmp_chk); +#endif +#if ASAN_INTERCEPT_SIGLONGJMP + ASAN_INTERCEPT_FUNC(siglongjmp); +#endif + + // Intercept exception handling functions. +#if ASAN_INTERCEPT___CXA_THROW + ASAN_INTERCEPT_FUNC(__cxa_throw); +#endif +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION + ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_RaiseException); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException); +#endif + + // Intercept threading-related functions +#if ASAN_INTERCEPT_PTHREAD_CREATE +#if defined(ASAN_PTHREAD_CREATE_VERSION) + ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); +#else + ASAN_INTERCEPT_FUNC(pthread_create); +#endif + ASAN_INTERCEPT_FUNC(pthread_join); +#endif + + // Intercept atexit function. +#if ASAN_INTERCEPT___CXA_ATEXIT + ASAN_INTERCEPT_FUNC(__cxa_atexit); +#endif + +#if ASAN_INTERCEPT_ATEXIT + ASAN_INTERCEPT_FUNC(atexit); +#endif + +#if ASAN_INTERCEPT_PTHREAD_ATFORK + ASAN_INTERCEPT_FUNC(pthread_atfork); +#endif + +#if ASAN_INTERCEPT_VFORK + ASAN_INTERCEPT_FUNC(vfork); +#endif + + InitializePlatformInterceptors(); + + VReport(1, "AddressSanitizer: libc interceptors initialized\n"); +} + +} // namespace __asan + +#endif // !SANITIZER_FUCHSIA diff --git a/lib/asan/asan_interceptors.h b/lib/asan/asan_interceptors.h index 381b03984191..344a64bd83d3 100644 --- a/lib/asan/asan_interceptors.h +++ b/lib/asan/asan_interceptors.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_interceptors.cc +// ASan-private header for asan_interceptors.cpp //===----------------------------------------------------------------------===// #ifndef ASAN_INTERCEPTORS_H #define ASAN_INTERCEPTORS_H @@ -99,6 +99,12 @@ void InitializePlatformInterceptors(); # define ASAN_INTERCEPT___CXA_ATEXIT 0 #endif +#if SANITIZER_NETBSD +# define ASAN_INTERCEPT_ATEXIT 1 +#else +# define ASAN_INTERCEPT_ATEXIT 0 +#endif + #if SANITIZER_LINUX && !SANITIZER_ANDROID # define ASAN_INTERCEPT___STRDUP 1 #else @@ -112,6 +118,12 @@ void InitializePlatformInterceptors(); # define ASAN_INTERCEPT_VFORK 0 #endif +#if SANITIZER_NETBSD +# define ASAN_INTERCEPT_PTHREAD_ATFORK 1 +#else +# define ASAN_INTERCEPT_PTHREAD_ATFORK 0 +#endif + DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size) DECLARE_REAL(char*, strchr, const char *str, int c) DECLARE_REAL(SIZE_T, strlen, const char *s) diff --git a/lib/asan/asan_interceptors_memintrinsics.cc b/lib/asan/asan_interceptors_memintrinsics.cc deleted file mode 100644 index e17f9ba4aab5..000000000000 --- a/lib/asan/asan_interceptors_memintrinsics.cc +++ /dev/null @@ -1,43 +0,0 @@ -//===-- asan_interceptors_memintrinsics.cc --------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===---------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan versions of memcpy, memmove, and memset. -//===---------------------------------------------------------------------===// - -#include "asan_interceptors_memintrinsics.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_suppressions.h" - -using namespace __asan; // NOLINT - -void *__asan_memcpy(void *to, const void *from, uptr size) { - ASAN_MEMCPY_IMPL(nullptr, to, from, size); -} - -void *__asan_memset(void *block, int c, uptr size) { - ASAN_MEMSET_IMPL(nullptr, block, c, size); -} - -void *__asan_memmove(void *to, const void *from, uptr size) { - ASAN_MEMMOVE_IMPL(nullptr, to, from, size); -} - -#if SANITIZER_FUCHSIA || SANITIZER_RTEMS - -// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but -// the only things there it wants are these three. Just define them -// as aliases here rather than repeating the contents. - -extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; -extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]]; -extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]]; - -#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS diff --git a/lib/asan/asan_interceptors_memintrinsics.cpp b/lib/asan/asan_interceptors_memintrinsics.cpp new file mode 100644 index 000000000000..ccdd5159042c --- /dev/null +++ b/lib/asan/asan_interceptors_memintrinsics.cpp @@ -0,0 +1,43 @@ +//===-- asan_interceptors_memintrinsics.cpp -------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan versions of memcpy, memmove, and memset. +//===---------------------------------------------------------------------===// + +#include "asan_interceptors_memintrinsics.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_suppressions.h" + +using namespace __asan; + +void *__asan_memcpy(void *to, const void *from, uptr size) { + ASAN_MEMCPY_IMPL(nullptr, to, from, size); +} + +void *__asan_memset(void *block, int c, uptr size) { + ASAN_MEMSET_IMPL(nullptr, block, c, size); +} + +void *__asan_memmove(void *to, const void *from, uptr size) { + ASAN_MEMMOVE_IMPL(nullptr, to, from, size); +} + +#if SANITIZER_FUCHSIA || SANITIZER_RTEMS + +// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but +// the only things there it wants are these three. Just define them +// as aliases here rather than repeating the contents. + +extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; +extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]]; +extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]]; + +#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS diff --git a/lib/asan/asan_interceptors_memintrinsics.h b/lib/asan/asan_interceptors_memintrinsics.h index 1fd65fe24953..632f0515a9eb 100644 --- a/lib/asan/asan_interceptors_memintrinsics.h +++ b/lib/asan/asan_interceptors_memintrinsics.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_memintrin.cc +// ASan-private header for asan_interceptors_memintrinsics.cpp //===---------------------------------------------------------------------===// #ifndef ASAN_MEMINTRIN_H #define ASAN_MEMINTRIN_H diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h index e4f771079293..72a4c3f22ff1 100644 --- a/lib/asan/asan_internal.h +++ b/lib/asan/asan_internal.h @@ -61,29 +61,29 @@ using __sanitizer::StackTrace; void AsanInitFromRtl(); -// asan_win.cc +// asan_win.cpp void InitializePlatformExceptionHandlers(); // Returns whether an address is a valid allocated system heap block. // 'addr' must point to the beginning of the block. bool IsSystemHeapAddress(uptr addr); -// asan_rtl.cc +// asan_rtl.cpp void PrintAddressSpaceLayout(); void NORETURN ShowStatsAndAbort(); -// asan_shadow_setup.cc +// asan_shadow_setup.cpp void InitializeShadowMemory(); -// asan_malloc_linux.cc / asan_malloc_mac.cc +// asan_malloc_linux.cpp / asan_malloc_mac.cpp void ReplaceSystemMalloc(); -// asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc +// asan_linux.cpp / asan_mac.cpp / asan_rtems.cpp / asan_win.cpp uptr FindDynamicShadowStart(); void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); -// asan_thread.cc +// asan_thread.cpp AsanThread *CreateMainThread(); // Support function for __asan_(un)register_image_globals. Searches for the diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc deleted file mode 100644 index f9182328916f..000000000000 --- a/lib/asan/asan_linux.cc +++ /dev/null @@ -1,260 +0,0 @@ -//===-- asan_linux.cc -----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Linux-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_premap_shadow.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_freebsd.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_procmaps.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if SANITIZER_FREEBSD -#include -#endif - -#if SANITIZER_SOLARIS -#include -#endif - -#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS -#include -extern "C" void* _DYNAMIC; -#elif SANITIZER_NETBSD -#include -#include -extern Elf_Dyn _DYNAMIC; -#else -#include -#include -#endif - -// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in -// 32-bit mode. -#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \ - __FreeBSD_version <= 902001 // v9.2 -#define ucontext_t xucontext_t -#endif - -typedef enum { - ASAN_RT_VERSION_UNDEFINED = 0, - ASAN_RT_VERSION_DYNAMIC, - ASAN_RT_VERSION_STATIC, -} asan_rt_version_t; - -// FIXME: perhaps also store abi version here? -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -asan_rt_version_t __asan_rt_version; -} - -namespace __asan { - -void InitializePlatformInterceptors() {} -void InitializePlatformExceptionHandlers() {} -bool IsSystemHeapAddress (uptr addr) { return false; } - -void *AsanDoesNotSupportStaticLinkage() { - // This will fail to link with -static. - return &_DYNAMIC; // defined in link.h -} - -static void UnmapFromTo(uptr from, uptr to) { - CHECK(to >= from); - if (to == from) return; - uptr res = internal_munmap(reinterpret_cast(from), to - from); - if (UNLIKELY(internal_iserror(res))) { - Report( - "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address " - "%p\n", - to - from, to - from, from); - CHECK("unable to unmap" && 0); - } -} - -#if ASAN_PREMAP_SHADOW -uptr FindPremappedShadowStart() { - uptr granularity = GetMmapGranularity(); - uptr shadow_start = reinterpret_cast(&__asan_shadow); - uptr premap_shadow_size = PremapShadowSize(); - uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); - // We may have mapped too much. Release extra memory. - UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); - return shadow_start; -} -#endif - -uptr FindDynamicShadowStart() { -#if ASAN_PREMAP_SHADOW - if (!PremapShadowFailed()) - return FindPremappedShadowStart(); -#endif - - uptr granularity = GetMmapGranularity(); - uptr alignment = granularity * 8; - uptr left_padding = granularity; - uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); - uptr map_size = shadow_size + left_padding + alignment; - - uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - UnmapFromTo(map_start, shadow_start - left_padding); - UnmapFromTo(shadow_start + shadow_size, map_start + map_size); - - return shadow_start; -} - -void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { - UNIMPLEMENTED(); -} - -#if SANITIZER_ANDROID -// FIXME: should we do anything for Android? -void AsanCheckDynamicRTPrereqs() {} -void AsanCheckIncompatibleRT() {} -#else -static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, - void *data) { - VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", - info->dlpi_name, info->dlpi_addr); - - // Continue until the first dynamic library is found - if (!info->dlpi_name || info->dlpi_name[0] == 0) - return 0; - - // Ignore vDSO - if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) - return 0; - -#if SANITIZER_FREEBSD || SANITIZER_NETBSD - // Ignore first entry (the main program) - char **p = (char **)data; - if (!(*p)) { - *p = (char *)-1; - return 0; - } -#endif - -#if SANITIZER_SOLARIS - // Ignore executable on Solaris - if (info->dlpi_addr == 0) - return 0; -#endif - - *(const char **)data = info->dlpi_name; - return 1; -} - -static bool IsDynamicRTName(const char *libname) { - return internal_strstr(libname, "libclang_rt.asan") || - internal_strstr(libname, "libasan.so"); -} - -static void ReportIncompatibleRT() { - Report("Your application is linked against incompatible ASan runtimes.\n"); - Die(); -} - -void AsanCheckDynamicRTPrereqs() { - if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order) - return; - - // Ensure that dynamic RT is the first DSO in the list - const char *first_dso_name = nullptr; - dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); - if (first_dso_name && !IsDynamicRTName(first_dso_name)) { - Report("ASan runtime does not come first in initial library list; " - "you should either link runtime to your application or " - "manually preload it with LD_PRELOAD.\n"); - Die(); - } -} - -void AsanCheckIncompatibleRT() { - if (ASAN_DYNAMIC) { - if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { - __asan_rt_version = ASAN_RT_VERSION_DYNAMIC; - } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) { - ReportIncompatibleRT(); - } - } else { - if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { - // Ensure that dynamic runtime is not present. We should detect it - // as early as possible, otherwise ASan interceptors could bind to - // the functions in dynamic ASan runtime instead of the functions in - // system libraries, causing crashes later in ASan initialization. - MemoryMappingLayout proc_maps(/*cache_enabled*/true); - char filename[PATH_MAX]; - MemoryMappedSegment segment(filename, sizeof(filename)); - while (proc_maps.Next(&segment)) { - if (IsDynamicRTName(segment.filename)) { - Report("Your application is linked against " - "incompatible ASan runtimes.\n"); - Die(); - } - } - __asan_rt_version = ASAN_RT_VERSION_STATIC; - } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) { - ReportIncompatibleRT(); - } - } -} -#endif // SANITIZER_ANDROID - -#if !SANITIZER_ANDROID -void ReadContextStack(void *context, uptr *stack, uptr *ssize) { - ucontext_t *ucp = (ucontext_t*)context; - *stack = (uptr)ucp->uc_stack.ss_sp; - *ssize = ucp->uc_stack.ss_size; -} -#else -void ReadContextStack(void *context, uptr *stack, uptr *ssize) { - UNIMPLEMENTED(); -} -#endif - -void *AsanDlSymNext(const char *sym) { - return dlsym(RTLD_NEXT, sym); -} - -bool HandleDlopenInit() { - // Not supported on this platform. - static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, - "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); - return false; -} - -} // namespace __asan - -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS diff --git a/lib/asan/asan_linux.cpp b/lib/asan/asan_linux.cpp new file mode 100644 index 000000000000..ce5e873dc518 --- /dev/null +++ b/lib/asan/asan_linux.cpp @@ -0,0 +1,260 @@ +//===-- asan_linux.cpp ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_premap_shadow.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_freebsd.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if SANITIZER_FREEBSD +#include +#endif + +#if SANITIZER_SOLARIS +#include +#endif + +#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS +#include +extern "C" void* _DYNAMIC; +#elif SANITIZER_NETBSD +#include +#include +extern Elf_Dyn _DYNAMIC; +#else +#include +#include +#endif + +// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in +// 32-bit mode. +#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \ + __FreeBSD_version <= 902001 // v9.2 +#define ucontext_t xucontext_t +#endif + +typedef enum { + ASAN_RT_VERSION_UNDEFINED = 0, + ASAN_RT_VERSION_DYNAMIC, + ASAN_RT_VERSION_STATIC, +} asan_rt_version_t; + +// FIXME: perhaps also store abi version here? +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +asan_rt_version_t __asan_rt_version; +} + +namespace __asan { + +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} +bool IsSystemHeapAddress (uptr addr) { return false; } + +void *AsanDoesNotSupportStaticLinkage() { + // This will fail to link with -static. + return &_DYNAMIC; // defined in link.h +} + +static void UnmapFromTo(uptr from, uptr to) { + CHECK(to >= from); + if (to == from) return; + uptr res = internal_munmap(reinterpret_cast(from), to - from); + if (UNLIKELY(internal_iserror(res))) { + Report( + "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address " + "%p\n", + to - from, to - from, from); + CHECK("unable to unmap" && 0); + } +} + +#if ASAN_PREMAP_SHADOW +uptr FindPremappedShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr shadow_start = reinterpret_cast(&__asan_shadow); + uptr premap_shadow_size = PremapShadowSize(); + uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); + // We may have mapped too much. Release extra memory. + UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); + return shadow_start; +} +#endif + +uptr FindDynamicShadowStart() { +#if ASAN_PREMAP_SHADOW + if (!PremapShadowFailed()) + return FindPremappedShadowStart(); +#endif + + uptr granularity = GetMmapGranularity(); + uptr alignment = granularity * 8; + uptr left_padding = granularity; + uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity); + uptr map_size = shadow_size + left_padding + alignment; + + uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + UnmapFromTo(map_start, shadow_start - left_padding); + UnmapFromTo(shadow_start + shadow_size, map_start + map_size); + + return shadow_start; +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +#if SANITIZER_ANDROID +// FIXME: should we do anything for Android? +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +#else +static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size, + void *data) { + VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", + info->dlpi_name, info->dlpi_addr); + + // Continue until the first dynamic library is found + if (!info->dlpi_name || info->dlpi_name[0] == 0) + return 0; + + // Ignore vDSO + if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0) + return 0; + +#if SANITIZER_FREEBSD || SANITIZER_NETBSD + // Ignore first entry (the main program) + char **p = (char **)data; + if (!(*p)) { + *p = (char *)-1; + return 0; + } +#endif + +#if SANITIZER_SOLARIS + // Ignore executable on Solaris + if (info->dlpi_addr == 0) + return 0; +#endif + + *(const char **)data = info->dlpi_name; + return 1; +} + +static bool IsDynamicRTName(const char *libname) { + return internal_strstr(libname, "libclang_rt.asan") || + internal_strstr(libname, "libasan.so"); +} + +static void ReportIncompatibleRT() { + Report("Your application is linked against incompatible ASan runtimes.\n"); + Die(); +} + +void AsanCheckDynamicRTPrereqs() { + if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order) + return; + + // Ensure that dynamic RT is the first DSO in the list + const char *first_dso_name = nullptr; + dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); + if (first_dso_name && !IsDynamicRTName(first_dso_name)) { + Report("ASan runtime does not come first in initial library list; " + "you should either link runtime to your application or " + "manually preload it with LD_PRELOAD.\n"); + Die(); + } +} + +void AsanCheckIncompatibleRT() { + if (ASAN_DYNAMIC) { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + __asan_rt_version = ASAN_RT_VERSION_DYNAMIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) { + ReportIncompatibleRT(); + } + } else { + if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) { + // Ensure that dynamic runtime is not present. We should detect it + // as early as possible, otherwise ASan interceptors could bind to + // the functions in dynamic ASan runtime instead of the functions in + // system libraries, causing crashes later in ASan initialization. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + char filename[PATH_MAX]; + MemoryMappedSegment segment(filename, sizeof(filename)); + while (proc_maps.Next(&segment)) { + if (IsDynamicRTName(segment.filename)) { + Report("Your application is linked against " + "incompatible ASan runtimes.\n"); + Die(); + } + } + __asan_rt_version = ASAN_RT_VERSION_STATIC; + } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) { + ReportIncompatibleRT(); + } + } +} +#endif // SANITIZER_ANDROID + +#if !SANITIZER_ANDROID +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + ucontext_t *ucp = (ucontext_t*)context; + *stack = (uptr)ucp->uc_stack.ss_sp; + *ssize = ucp->uc_stack.ss_size; +} +#else +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} +#endif + +void *AsanDlSymNext(const char *sym) { + return dlsym(RTLD_NEXT, sym); +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +} // namespace __asan + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || + // SANITIZER_SOLARIS diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc deleted file mode 100644 index e776acd2f539..000000000000 --- a/lib/asan/asan_mac.cc +++ /dev/null @@ -1,331 +0,0 @@ -//===-- asan_mac.cc -------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Mac-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_MAC - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_mac.h" - -#include -#include -#include -#include -#include -#include -#include -#include // for free() -#include -#include -#include -#include -#include - -// from , but we don't have that file on iOS -extern "C" { - extern char ***_NSGetArgv(void); - extern char ***_NSGetEnviron(void); -} - -namespace __asan { - -void InitializePlatformInterceptors() {} -void InitializePlatformExceptionHandlers() {} -bool IsSystemHeapAddress (uptr addr) { return false; } - -// No-op. Mac does not support static linkage anyway. -void *AsanDoesNotSupportStaticLinkage() { - return 0; -} - -uptr FindDynamicShadowStart() { - uptr granularity = GetMmapGranularity(); - uptr alignment = 8 * granularity; - uptr left_padding = granularity; - uptr space_size = kHighShadowEnd + left_padding; - - uptr largest_gap_found = 0; - uptr max_occupied_addr = 0; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); - uptr shadow_start = - FindAvailableMemoryRange(space_size, alignment, granularity, - &largest_gap_found, &max_occupied_addr); - // If the shadow doesn't fit, restrict the address space to make it fit. - if (shadow_start == 0) { - VReport( - 2, - "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", - largest_gap_found, max_occupied_addr); - uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); - if (new_max_vm < max_occupied_addr) { - Report("Unable to find a memory range for dynamic shadow.\n"); - Report( - "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " - "new_max_vm = %p\n", - space_size, largest_gap_found, max_occupied_addr, new_max_vm); - CHECK(0 && "cannot place shadow"); - } - RestrictMemoryToMaxAddress(new_max_vm); - kHighMemEnd = new_max_vm - 1; - space_size = kHighShadowEnd + left_padding; - VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); - shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, - nullptr, nullptr); - if (shadow_start == 0) { - Report("Unable to find a memory range after restricting VM.\n"); - CHECK(0 && "cannot place shadow after restricting vm"); - } - } - CHECK_NE((uptr)0, shadow_start); - CHECK(IsAligned(shadow_start, alignment)); - return shadow_start; -} - -// No-op. Mac does not support static linkage anyway. -void AsanCheckDynamicRTPrereqs() {} - -// No-op. Mac does not support static linkage anyway. -void AsanCheckIncompatibleRT() {} - -void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { - // Find the Mach-O header for the image containing the needle - Dl_info info; - int err = dladdr(needle, &info); - if (err == 0) return; - -#if __LP64__ - const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase; -#else - const struct mach_header *mh = (struct mach_header *)info.dli_fbase; -#endif - - // Look up the __asan_globals section in that image and register its globals - unsigned long size = 0; - __asan_global *globals = (__asan_global *)getsectiondata( - mh, - "__DATA", "__asan_globals", - &size); - - if (!globals) return; - if (size % sizeof(__asan_global) != 0) return; - op(globals, size / sizeof(__asan_global)); -} - -void ReadContextStack(void *context, uptr *stack, uptr *ssize) { - UNIMPLEMENTED(); -} - -// Support for the following functions from libdispatch on Mac OS: -// dispatch_async_f() -// dispatch_async() -// dispatch_sync_f() -// dispatch_sync() -// dispatch_after_f() -// dispatch_after() -// dispatch_group_async_f() -// dispatch_group_async() -// TODO(glider): libdispatch API contains other functions that we don't support -// yet. -// -// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are -// they can cause jobs to run on a thread different from the current one. -// TODO(glider): if so, we need a test for this (otherwise we should remove -// them). -// -// The following functions use dispatch_barrier_async_f() (which isn't a library -// function but is exported) and are thus supported: -// dispatch_source_set_cancel_handler_f() -// dispatch_source_set_cancel_handler() -// dispatch_source_set_event_handler_f() -// dispatch_source_set_event_handler() -// -// The reference manual for Grand Central Dispatch is available at -// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html -// The implementation details are at -// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c - -typedef void* dispatch_group_t; -typedef void* dispatch_queue_t; -typedef void* dispatch_source_t; -typedef u64 dispatch_time_t; -typedef void (*dispatch_function_t)(void *block); -typedef void* (*worker_t)(void *block); - -// A wrapper for the ObjC blocks used to support libdispatch. -typedef struct { - void *block; - dispatch_function_t func; - u32 parent_tid; -} asan_block_context_t; - -ALWAYS_INLINE -void asan_register_worker_thread(int parent_tid, StackTrace *stack) { - AsanThread *t = GetCurrentThread(); - if (!t) { - t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, - parent_tid, stack, /* detached */ true); - t->Init(); - asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker, - nullptr); - SetCurrentThread(t); - } -} - -// For use by only those functions that allocated the context via -// alloc_asan_context(). -extern "C" -void asan_dispatch_call_block_and_release(void *block) { - GET_STACK_TRACE_THREAD; - asan_block_context_t *context = (asan_block_context_t*)block; - VReport(2, - "asan_dispatch_call_block_and_release(): " - "context: %p, pthread_self: %p\n", - block, pthread_self()); - asan_register_worker_thread(context->parent_tid, &stack); - // Call the original dispatcher for the block. - context->func(context->block); - asan_free(context, &stack, FROM_MALLOC); -} - -} // namespace __asan - -using namespace __asan; // NOLINT - -// Wrap |ctxt| and |func| into an asan_block_context_t. -// The caller retains control of the allocated context. -extern "C" -asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, - BufferedStackTrace *stack) { - asan_block_context_t *asan_ctxt = - (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); - asan_ctxt->block = ctxt; - asan_ctxt->func = func; - asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); - return asan_ctxt; -} - -// Define interceptor for dispatch_*_f function with the three most common -// parameters: dispatch_queue_t, context, dispatch_function_t. -#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ - INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ - dispatch_function_t func) { \ - GET_STACK_TRACE_THREAD; \ - asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ - if (Verbosity() >= 2) { \ - Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ - asan_ctxt, pthread_self()); \ - PRINT_CURRENT_STACK(); \ - } \ - return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \ - asan_dispatch_call_block_and_release); \ - } - -INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) -INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) -INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) - -INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, - dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) { - GET_STACK_TRACE_THREAD; - asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (Verbosity() >= 2) { - Report("dispatch_after_f: %p\n", asan_ctxt); - PRINT_CURRENT_STACK(); - } - return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt, - asan_dispatch_call_block_and_release); -} - -INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, - dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) { - GET_STACK_TRACE_THREAD; - asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (Verbosity() >= 2) { - Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", - asan_ctxt, pthread_self()); - PRINT_CURRENT_STACK(); - } - REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt, - asan_dispatch_call_block_and_release); -} - -#if !defined(MISSING_BLOCKS_SUPPORT) -extern "C" { -void dispatch_async(dispatch_queue_t dq, void(^work)(void)); -void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, - void(^work)(void)); -void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - void(^work)(void)); -void dispatch_source_set_cancel_handler(dispatch_source_t ds, - void(^work)(void)); -void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); -} - -#define GET_ASAN_BLOCK(work) \ - void (^asan_block)(void); \ - int parent_tid = GetCurrentTidOrInvalid(); \ - asan_block = ^(void) { \ - GET_STACK_TRACE_THREAD; \ - asan_register_worker_thread(parent_tid, &stack); \ - work(); \ - } - -INTERCEPTOR(void, dispatch_async, - dispatch_queue_t dq, void(^work)(void)) { - ENABLE_FRAME_POINTER; - GET_ASAN_BLOCK(work); - REAL(dispatch_async)(dq, asan_block); -} - -INTERCEPTOR(void, dispatch_group_async, - dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) { - ENABLE_FRAME_POINTER; - GET_ASAN_BLOCK(work); - REAL(dispatch_group_async)(dg, dq, asan_block); -} - -INTERCEPTOR(void, dispatch_after, - dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) { - ENABLE_FRAME_POINTER; - GET_ASAN_BLOCK(work); - REAL(dispatch_after)(when, queue, asan_block); -} - -INTERCEPTOR(void, dispatch_source_set_cancel_handler, - dispatch_source_t ds, void(^work)(void)) { - if (!work) { - REAL(dispatch_source_set_cancel_handler)(ds, work); - return; - } - ENABLE_FRAME_POINTER; - GET_ASAN_BLOCK(work); - REAL(dispatch_source_set_cancel_handler)(ds, asan_block); -} - -INTERCEPTOR(void, dispatch_source_set_event_handler, - dispatch_source_t ds, void(^work)(void)) { - ENABLE_FRAME_POINTER; - GET_ASAN_BLOCK(work); - REAL(dispatch_source_set_event_handler)(ds, asan_block); -} -#endif - -#endif // SANITIZER_MAC diff --git a/lib/asan/asan_mac.cpp b/lib/asan/asan_mac.cpp new file mode 100644 index 000000000000..a8d3f5d3473c --- /dev/null +++ b/lib/asan/asan_mac.cpp @@ -0,0 +1,331 @@ +//===-- asan_mac.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mac.h" + +#include +#include +#include +#include +#include +#include +#include +#include // for free() +#include +#include +#include +#include +#include + +// from , but we don't have that file on iOS +extern "C" { + extern char ***_NSGetArgv(void); + extern char ***_NSGetEnviron(void); +} + +namespace __asan { + +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} +bool IsSystemHeapAddress (uptr addr) { return false; } + +// No-op. Mac does not support static linkage anyway. +void *AsanDoesNotSupportStaticLinkage() { + return 0; +} + +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + + uptr largest_gap_found = 0; + uptr max_occupied_addr = 0; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + uptr shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, + &largest_gap_found, &max_occupied_addr); + // If the shadow doesn't fit, restrict the address space to make it fit. + if (shadow_start == 0) { + VReport( + 2, + "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", + largest_gap_found, max_occupied_addr); + uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); + if (new_max_vm < max_occupied_addr) { + Report("Unable to find a memory range for dynamic shadow.\n"); + Report( + "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " + "new_max_vm = %p\n", + space_size, largest_gap_found, max_occupied_addr, new_max_vm); + CHECK(0 && "cannot place shadow"); + } + RestrictMemoryToMaxAddress(new_max_vm); + kHighMemEnd = new_max_vm - 1; + space_size = kHighShadowEnd + left_padding; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, + nullptr, nullptr); + if (shadow_start == 0) { + Report("Unable to find a memory range after restricting VM.\n"); + CHECK(0 && "cannot place shadow after restricting vm"); + } + } + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckDynamicRTPrereqs() {} + +// No-op. Mac does not support static linkage anyway. +void AsanCheckIncompatibleRT() {} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + // Find the Mach-O header for the image containing the needle + Dl_info info; + int err = dladdr(needle, &info); + if (err == 0) return; + +#if __LP64__ + const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase; +#else + const struct mach_header *mh = (struct mach_header *)info.dli_fbase; +#endif + + // Look up the __asan_globals section in that image and register its globals + unsigned long size = 0; + __asan_global *globals = (__asan_global *)getsectiondata( + mh, + "__DATA", "__asan_globals", + &size); + + if (!globals) return; + if (size % sizeof(__asan_global) != 0) return; + op(globals, size / sizeof(__asan_global)); +} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +// Support for the following functions from libdispatch on Mac OS: +// dispatch_async_f() +// dispatch_async() +// dispatch_sync_f() +// dispatch_sync() +// dispatch_after_f() +// dispatch_after() +// dispatch_group_async_f() +// dispatch_group_async() +// TODO(glider): libdispatch API contains other functions that we don't support +// yet. +// +// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are +// they can cause jobs to run on a thread different from the current one. +// TODO(glider): if so, we need a test for this (otherwise we should remove +// them). +// +// The following functions use dispatch_barrier_async_f() (which isn't a library +// function but is exported) and are thus supported: +// dispatch_source_set_cancel_handler_f() +// dispatch_source_set_cancel_handler() +// dispatch_source_set_event_handler_f() +// dispatch_source_set_event_handler() +// +// The reference manual for Grand Central Dispatch is available at +// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html +// The implementation details are at +// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c + +typedef void* dispatch_group_t; +typedef void* dispatch_queue_t; +typedef void* dispatch_source_t; +typedef u64 dispatch_time_t; +typedef void (*dispatch_function_t)(void *block); +typedef void* (*worker_t)(void *block); + +// A wrapper for the ObjC blocks used to support libdispatch. +typedef struct { + void *block; + dispatch_function_t func; + u32 parent_tid; +} asan_block_context_t; + +ALWAYS_INLINE +void asan_register_worker_thread(int parent_tid, StackTrace *stack) { + AsanThread *t = GetCurrentThread(); + if (!t) { + t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, + parent_tid, stack, /* detached */ true); + t->Init(); + asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker, + nullptr); + SetCurrentThread(t); + } +} + +// For use by only those functions that allocated the context via +// alloc_asan_context(). +extern "C" +void asan_dispatch_call_block_and_release(void *block) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *context = (asan_block_context_t*)block; + VReport(2, + "asan_dispatch_call_block_and_release(): " + "context: %p, pthread_self: %p\n", + block, pthread_self()); + asan_register_worker_thread(context->parent_tid, &stack); + // Call the original dispatcher for the block. + context->func(context->block); + asan_free(context, &stack, FROM_MALLOC); +} + +} // namespace __asan + +using namespace __asan; + +// Wrap |ctxt| and |func| into an asan_block_context_t. +// The caller retains control of the allocated context. +extern "C" +asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, + BufferedStackTrace *stack) { + asan_block_context_t *asan_ctxt = + (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); + asan_ctxt->block = ctxt; + asan_ctxt->func = func; + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); + return asan_ctxt; +} + +// Define interceptor for dispatch_*_f function with the three most common +// parameters: dispatch_queue_t, context, dispatch_function_t. +#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \ + INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \ + dispatch_function_t func) { \ + GET_STACK_TRACE_THREAD; \ + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ + if (Verbosity() >= 2) { \ + Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ + asan_ctxt, pthread_self()); \ + PRINT_CURRENT_STACK(); \ + } \ + return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \ + asan_dispatch_call_block_and_release); \ + } + +INTERCEPT_DISPATCH_X_F_3(dispatch_async_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f) +INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f) + +INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (Verbosity() >= 2) { + Report("dispatch_after_f: %p\n", asan_ctxt); + PRINT_CURRENT_STACK(); + } + return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, + dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { + GET_STACK_TRACE_THREAD; + asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); + if (Verbosity() >= 2) { + Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", + asan_ctxt, pthread_self()); + PRINT_CURRENT_STACK(); + } + REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt, + asan_dispatch_call_block_and_release); +} + +#if !defined(MISSING_BLOCKS_SUPPORT) +extern "C" { +void dispatch_async(dispatch_queue_t dq, void(^work)(void)); +void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + void(^work)(void)); +void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void(^work)(void)); +void dispatch_source_set_cancel_handler(dispatch_source_t ds, + void(^work)(void)); +void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); +} + +#define GET_ASAN_BLOCK(work) \ + void (^asan_block)(void); \ + int parent_tid = GetCurrentTidOrInvalid(); \ + asan_block = ^(void) { \ + GET_STACK_TRACE_THREAD; \ + asan_register_worker_thread(parent_tid, &stack); \ + work(); \ + } + +INTERCEPTOR(void, dispatch_async, + dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_async)(dq, asan_block); +} + +INTERCEPTOR(void, dispatch_group_async, + dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_group_async)(dg, dq, asan_block); +} + +INTERCEPTOR(void, dispatch_after, + dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_after)(when, queue, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_cancel_handler, + dispatch_source_t ds, void(^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_cancel_handler)(ds, asan_block); +} + +INTERCEPTOR(void, dispatch_source_set_event_handler, + dispatch_source_t ds, void(^work)(void)) { + ENABLE_FRAME_POINTER; + GET_ASAN_BLOCK(work); + REAL(dispatch_source_set_event_handler)(ds, asan_block); +} +#endif + +#endif // SANITIZER_MAC diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc deleted file mode 100644 index 86fcd3b12d58..000000000000 --- a/lib/asan/asan_malloc_linux.cc +++ /dev/null @@ -1,307 +0,0 @@ -//===-- asan_malloc_linux.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Linux-specific malloc interception. -// We simply define functions like malloc, free, realloc, etc. -// They will replace the corresponding libc functions automagically. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ - SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS - -#include "sanitizer_common/sanitizer_allocator_checks.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "asan_allocator.h" -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_malloc_local.h" -#include "asan_stack.h" - -// ---------------------- Replacement functions ---------------- {{{1 -using namespace __asan; // NOLINT - -static uptr allocated_for_dlsym; -static uptr last_dlsym_alloc_size_in_words; -static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; -static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; - -static INLINE bool IsInDlsymAllocPool(const void *ptr) { - uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; - return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]); -} - -static void *AllocateFromLocalPool(uptr size_in_bytes) { - uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; - void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym]; - last_dlsym_alloc_size_in_words = size_in_words; - allocated_for_dlsym += size_in_words; - CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); - return mem; -} - -static void DeallocateFromLocalPool(const void *ptr) { - // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store - // error messages and instead uses malloc followed by free. To avoid pool - // exhaustion due to long object filenames, handle that special case here. - uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words; - void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset]; - if (prev_mem == ptr) { - REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize); - allocated_for_dlsym = prev_offset; - last_dlsym_alloc_size_in_words = 0; - } -} - -static int PosixMemalignFromLocalPool(void **memptr, uptr alignment, - uptr size_in_bytes) { - if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) - return errno_EINVAL; - - CHECK(alignment >= kWordSize); - - uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym]; - uptr aligned_addr = RoundUpTo(addr, alignment); - uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize); - - uptr *end_mem = (uptr*)(aligned_addr + aligned_size); - uptr allocated = end_mem - alloc_memory_for_dlsym; - if (allocated >= kDlsymAllocPoolSize) - return errno_ENOMEM; - - allocated_for_dlsym = allocated; - *memptr = (void*)aligned_addr; - return 0; -} - -#if SANITIZER_RTEMS -void* MemalignFromLocalPool(uptr alignment, uptr size) { - void *ptr = nullptr; - alignment = Max(alignment, kWordSize); - PosixMemalignFromLocalPool(&ptr, alignment, size); - return ptr; -} - -bool IsFromLocalPool(const void *ptr) { - return IsInDlsymAllocPool(ptr); -} -#endif - -static INLINE bool MaybeInDlsym() { - // Fuchsia doesn't use dlsym-based interceptors. - return !SANITIZER_FUCHSIA && asan_init_is_running; -} - -static INLINE bool UseLocalPool() { - return EarlyMalloc() || MaybeInDlsym(); -} - -static void *ReallocFromLocalPool(void *ptr, uptr size) { - const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; - const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); - void *new_ptr; - if (UNLIKELY(UseLocalPool())) { - new_ptr = AllocateFromLocalPool(size); - } else { - ENSURE_ASAN_INITED(); - GET_STACK_TRACE_MALLOC; - new_ptr = asan_malloc(size, &stack); - } - internal_memcpy(new_ptr, ptr, copy_size); - return new_ptr; -} - -INTERCEPTOR(void, free, void *ptr) { - GET_STACK_TRACE_FREE; - if (UNLIKELY(IsInDlsymAllocPool(ptr))) { - DeallocateFromLocalPool(ptr); - return; - } - asan_free(ptr, &stack, FROM_MALLOC); -} - -#if SANITIZER_INTERCEPT_CFREE -INTERCEPTOR(void, cfree, void *ptr) { - GET_STACK_TRACE_FREE; - if (UNLIKELY(IsInDlsymAllocPool(ptr))) - return; - asan_free(ptr, &stack, FROM_MALLOC); -} -#endif // SANITIZER_INTERCEPT_CFREE - -INTERCEPTOR(void*, malloc, uptr size) { - if (UNLIKELY(UseLocalPool())) - // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. - return AllocateFromLocalPool(size); - ENSURE_ASAN_INITED(); - GET_STACK_TRACE_MALLOC; - return asan_malloc(size, &stack); -} - -INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { - if (UNLIKELY(UseLocalPool())) - // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. - return AllocateFromLocalPool(nmemb * size); - ENSURE_ASAN_INITED(); - GET_STACK_TRACE_MALLOC; - return asan_calloc(nmemb, size, &stack); -} - -INTERCEPTOR(void*, realloc, void *ptr, uptr size) { - if (UNLIKELY(IsInDlsymAllocPool(ptr))) - return ReallocFromLocalPool(ptr, size); - if (UNLIKELY(UseLocalPool())) - return AllocateFromLocalPool(size); - ENSURE_ASAN_INITED(); - GET_STACK_TRACE_MALLOC; - return asan_realloc(ptr, size, &stack); -} - -#if SANITIZER_INTERCEPT_REALLOCARRAY -INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) { - ENSURE_ASAN_INITED(); - GET_STACK_TRACE_MALLOC; - return asan_reallocarray(ptr, nmemb, size, &stack); -} -#endif // SANITIZER_INTERCEPT_REALLOCARRAY - -#if SANITIZER_INTERCEPT_MEMALIGN -INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { - GET_STACK_TRACE_MALLOC; - return asan_memalign(boundary, size, &stack, FROM_MALLOC); -} - -INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { - GET_STACK_TRACE_MALLOC; - void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC); - DTLS_on_libc_memalign(res, size); - return res; -} -#endif // SANITIZER_INTERCEPT_MEMALIGN - -#if SANITIZER_INTERCEPT_ALIGNED_ALLOC -INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { - GET_STACK_TRACE_MALLOC; - return asan_aligned_alloc(boundary, size, &stack); -} -#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC - -INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { - GET_CURRENT_PC_BP_SP; - (void)sp; - return asan_malloc_usable_size(ptr, pc, bp); -} - -#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO -// We avoid including malloc.h for portability reasons. -// man mallinfo says the fields are "long", but the implementation uses int. -// It doesn't matter much -- we just need to make sure that the libc's mallinfo -// is not called. -struct fake_mallinfo { - int x[10]; -}; - -INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { - struct fake_mallinfo res; - REAL(memset)(&res, 0, sizeof(res)); - return res; -} - -INTERCEPTOR(int, mallopt, int cmd, int value) { - return 0; -} -#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO - -INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { - if (UNLIKELY(UseLocalPool())) - return PosixMemalignFromLocalPool(memptr, alignment, size); - GET_STACK_TRACE_MALLOC; - return asan_posix_memalign(memptr, alignment, size, &stack); -} - -INTERCEPTOR(void*, valloc, uptr size) { - GET_STACK_TRACE_MALLOC; - return asan_valloc(size, &stack); -} - -#if SANITIZER_INTERCEPT_PVALLOC -INTERCEPTOR(void*, pvalloc, uptr size) { - GET_STACK_TRACE_MALLOC; - return asan_pvalloc(size, &stack); -} -#endif // SANITIZER_INTERCEPT_PVALLOC - -INTERCEPTOR(void, malloc_stats, void) { - __asan_print_accumulated_stats(); -} - -#if SANITIZER_ANDROID -// Format of __libc_malloc_dispatch has changed in Android L. -// While we are moving towards a solution that does not depend on bionic -// internals, here is something to support both K* and L releases. -struct MallocDebugK { - void *(*malloc)(uptr bytes); - void (*free)(void *mem); - void *(*calloc)(uptr n_elements, uptr elem_size); - void *(*realloc)(void *oldMem, uptr bytes); - void *(*memalign)(uptr alignment, uptr bytes); - uptr (*malloc_usable_size)(void *mem); -}; - -struct MallocDebugL { - void *(*calloc)(uptr n_elements, uptr elem_size); - void (*free)(void *mem); - fake_mallinfo (*mallinfo)(void); - void *(*malloc)(uptr bytes); - uptr (*malloc_usable_size)(void *mem); - void *(*memalign)(uptr alignment, uptr bytes); - int (*posix_memalign)(void **memptr, uptr alignment, uptr size); - void* (*pvalloc)(uptr size); - void *(*realloc)(void *oldMem, uptr bytes); - void* (*valloc)(uptr size); -}; - -ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = { - WRAP(malloc), WRAP(free), WRAP(calloc), - WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)}; - -ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = { - WRAP(calloc), WRAP(free), WRAP(mallinfo), - WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign), - WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc), - WRAP(valloc)}; - -namespace __asan { -void ReplaceSystemMalloc() { - void **__libc_malloc_dispatch_p = - (void **)AsanDlSymNext("__libc_malloc_dispatch"); - if (__libc_malloc_dispatch_p) { - // Decide on K vs L dispatch format by the presence of - // __libc_malloc_default_dispatch export in libc. - void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch"); - if (default_dispatch_p) - *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k; - else - *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l; - } -} -} // namespace __asan - -#else // SANITIZER_ANDROID - -namespace __asan { -void ReplaceSystemMalloc() { -} -} // namespace __asan -#endif // SANITIZER_ANDROID - -#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || - // SANITIZER_NETBSD || SANITIZER_SOLARIS diff --git a/lib/asan/asan_malloc_linux.cpp b/lib/asan/asan_malloc_linux.cpp new file mode 100644 index 000000000000..faa8968a5d00 --- /dev/null +++ b/lib/asan/asan_malloc_linux.cpp @@ -0,0 +1,307 @@ +//===-- asan_malloc_linux.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific malloc interception. +// We simply define functions like malloc, free, realloc, etc. +// They will replace the corresponding libc functions automagically. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ + SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS + +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_malloc_local.h" +#include "asan_stack.h" + +// ---------------------- Replacement functions ---------------- {{{1 +using namespace __asan; + +static uptr allocated_for_dlsym; +static uptr last_dlsym_alloc_size_in_words; +static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; +static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + +static INLINE bool IsInDlsymAllocPool(const void *ptr) { + uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]); +} + +static void *AllocateFromLocalPool(uptr size_in_bytes) { + uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; + void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym]; + last_dlsym_alloc_size_in_words = size_in_words; + allocated_for_dlsym += size_in_words; + CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); + return mem; +} + +static void DeallocateFromLocalPool(const void *ptr) { + // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store + // error messages and instead uses malloc followed by free. To avoid pool + // exhaustion due to long object filenames, handle that special case here. + uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words; + void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset]; + if (prev_mem == ptr) { + REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize); + allocated_for_dlsym = prev_offset; + last_dlsym_alloc_size_in_words = 0; + } +} + +static int PosixMemalignFromLocalPool(void **memptr, uptr alignment, + uptr size_in_bytes) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) + return errno_EINVAL; + + CHECK(alignment >= kWordSize); + + uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym]; + uptr aligned_addr = RoundUpTo(addr, alignment); + uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize); + + uptr *end_mem = (uptr*)(aligned_addr + aligned_size); + uptr allocated = end_mem - alloc_memory_for_dlsym; + if (allocated >= kDlsymAllocPoolSize) + return errno_ENOMEM; + + allocated_for_dlsym = allocated; + *memptr = (void*)aligned_addr; + return 0; +} + +#if SANITIZER_RTEMS +void* MemalignFromLocalPool(uptr alignment, uptr size) { + void *ptr = nullptr; + alignment = Max(alignment, kWordSize); + PosixMemalignFromLocalPool(&ptr, alignment, size); + return ptr; +} + +bool IsFromLocalPool(const void *ptr) { + return IsInDlsymAllocPool(ptr); +} +#endif + +static INLINE bool MaybeInDlsym() { + // Fuchsia doesn't use dlsym-based interceptors. + return !SANITIZER_FUCHSIA && asan_init_is_running; +} + +static INLINE bool UseLocalPool() { + return EarlyMalloc() || MaybeInDlsym(); +} + +static void *ReallocFromLocalPool(void *ptr, uptr size) { + const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); + void *new_ptr; + if (UNLIKELY(UseLocalPool())) { + new_ptr = AllocateFromLocalPool(size); + } else { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + new_ptr = asan_malloc(size, &stack); + } + internal_memcpy(new_ptr, ptr, copy_size); + return new_ptr; +} + +INTERCEPTOR(void, free, void *ptr) { + GET_STACK_TRACE_FREE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + DeallocateFromLocalPool(ptr); + return; + } + asan_free(ptr, &stack, FROM_MALLOC); +} + +#if SANITIZER_INTERCEPT_CFREE +INTERCEPTOR(void, cfree, void *ptr) { + GET_STACK_TRACE_FREE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) + return; + asan_free(ptr, &stack, FROM_MALLOC); +} +#endif // SANITIZER_INTERCEPT_CFREE + +INTERCEPTOR(void*, malloc, uptr size) { + if (UNLIKELY(UseLocalPool())) + // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. + return AllocateFromLocalPool(size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (UNLIKELY(UseLocalPool())) + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + return AllocateFromLocalPool(nmemb * size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +INTERCEPTOR(void*, realloc, void *ptr, uptr size) { + if (UNLIKELY(IsInDlsymAllocPool(ptr))) + return ReallocFromLocalPool(ptr, size); + if (UNLIKELY(UseLocalPool())) + return AllocateFromLocalPool(size); + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +#if SANITIZER_INTERCEPT_REALLOCARRAY +INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) { + ENSURE_ASAN_INITED(); + GET_STACK_TRACE_MALLOC; + return asan_reallocarray(ptr, nmemb, size, &stack); +} +#endif // SANITIZER_INTERCEPT_REALLOCARRAY + +#if SANITIZER_INTERCEPT_MEMALIGN +INTERCEPTOR(void*, memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_memalign(boundary, size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC); + DTLS_on_libc_memalign(res, size); + return res; +} +#endif // SANITIZER_INTERCEPT_MEMALIGN + +#if SANITIZER_INTERCEPT_ALIGNED_ALLOC +INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_aligned_alloc(boundary, size, &stack); +} +#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO +// We avoid including malloc.h for portability reasons. +// man mallinfo says the fields are "long", but the implementation uses int. +// It doesn't matter much -- we just need to make sure that the libc's mallinfo +// is not called. +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + REAL(memset)(&res, 0, sizeof(res)); + return res; +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return 0; +} +#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + if (UNLIKELY(UseLocalPool())) + return PosixMemalignFromLocalPool(memptr, alignment, size); + GET_STACK_TRACE_MALLOC; + return asan_posix_memalign(memptr, alignment, size, &stack); +} + +INTERCEPTOR(void*, valloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_valloc(size, &stack); +} + +#if SANITIZER_INTERCEPT_PVALLOC +INTERCEPTOR(void*, pvalloc, uptr size) { + GET_STACK_TRACE_MALLOC; + return asan_pvalloc(size, &stack); +} +#endif // SANITIZER_INTERCEPT_PVALLOC + +INTERCEPTOR(void, malloc_stats, void) { + __asan_print_accumulated_stats(); +} + +#if SANITIZER_ANDROID +// Format of __libc_malloc_dispatch has changed in Android L. +// While we are moving towards a solution that does not depend on bionic +// internals, here is something to support both K* and L releases. +struct MallocDebugK { + void *(*malloc)(uptr bytes); + void (*free)(void *mem); + void *(*calloc)(uptr n_elements, uptr elem_size); + void *(*realloc)(void *oldMem, uptr bytes); + void *(*memalign)(uptr alignment, uptr bytes); + uptr (*malloc_usable_size)(void *mem); +}; + +struct MallocDebugL { + void *(*calloc)(uptr n_elements, uptr elem_size); + void (*free)(void *mem); + fake_mallinfo (*mallinfo)(void); + void *(*malloc)(uptr bytes); + uptr (*malloc_usable_size)(void *mem); + void *(*memalign)(uptr alignment, uptr bytes); + int (*posix_memalign)(void **memptr, uptr alignment, uptr size); + void* (*pvalloc)(uptr size); + void *(*realloc)(void *oldMem, uptr bytes); + void* (*valloc)(uptr size); +}; + +ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = { + WRAP(malloc), WRAP(free), WRAP(calloc), + WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)}; + +ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = { + WRAP(calloc), WRAP(free), WRAP(mallinfo), + WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign), + WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc), + WRAP(valloc)}; + +namespace __asan { +void ReplaceSystemMalloc() { + void **__libc_malloc_dispatch_p = + (void **)AsanDlSymNext("__libc_malloc_dispatch"); + if (__libc_malloc_dispatch_p) { + // Decide on K vs L dispatch format by the presence of + // __libc_malloc_default_dispatch export in libc. + void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch"); + if (default_dispatch_p) + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k; + else + *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l; + } +} +} // namespace __asan + +#else // SANITIZER_ANDROID + +namespace __asan { +void ReplaceSystemMalloc() { +} +} // namespace __asan +#endif // SANITIZER_ANDROID + +#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || + // SANITIZER_NETBSD || SANITIZER_SOLARIS diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc deleted file mode 100644 index 06dc1c289267..000000000000 --- a/lib/asan/asan_malloc_mac.cc +++ /dev/null @@ -1,102 +0,0 @@ -//===-- asan_malloc_mac.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Mac-specific malloc interception. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_MAC - -#include "asan_interceptors.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_stats.h" -#include "lsan/lsan_common.h" - -using namespace __asan; -#define COMMON_MALLOC_ZONE_NAME "asan" -#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED() -#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited -#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock() -#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock() -#define COMMON_MALLOC_MEMALIGN(alignment, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC) -#define COMMON_MALLOC_MALLOC(size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = asan_malloc(size, &stack) -#define COMMON_MALLOC_REALLOC(ptr, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = asan_realloc(ptr, size, &stack); -#define COMMON_MALLOC_CALLOC(count, size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = asan_calloc(count, size, &stack); -#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ - GET_STACK_TRACE_MALLOC; \ - int res = asan_posix_memalign(memptr, alignment, size, &stack); -#define COMMON_MALLOC_VALLOC(size) \ - GET_STACK_TRACE_MALLOC; \ - void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); -#define COMMON_MALLOC_FREE(ptr) \ - GET_STACK_TRACE_FREE; \ - asan_free(ptr, &stack, FROM_MALLOC); -#define COMMON_MALLOC_SIZE(ptr) \ - uptr size = asan_mz_size(ptr); -#define COMMON_MALLOC_FILL_STATS(zone, stats) \ - AsanMallocStats malloc_stats; \ - FillMallocStatistics(&malloc_stats); \ - CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \ - internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); -#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ - GET_STACK_TRACE_FREE; \ - ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); -#define COMMON_MALLOC_NAMESPACE __asan -#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 -#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1 - -#include "sanitizer_common/sanitizer_malloc_mac.inc" - -namespace COMMON_MALLOC_NAMESPACE { - -bool HandleDlopenInit() { - static_assert(SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, - "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be true"); - // We have no reliable way of knowing how we are being loaded - // so make it a requirement on Apple platforms to set this environment - // variable to indicate that we want to perform initialization via - // dlopen(). - auto init_str = GetEnv("APPLE_ASAN_INIT_FOR_DLOPEN"); - if (!init_str) - return false; - if (internal_strncmp(init_str, "1", 1) != 0) - return false; - // When we are loaded via `dlopen()` path we still initialize the malloc zone - // so Symbolication clients (e.g. `leaks`) that load the ASan allocator can - // find an initialized malloc zone. - InitMallocZoneFields(); - return true; -} -} // namespace COMMON_MALLOC_NAMESPACE - -namespace { - -void mi_extra_init(sanitizer_malloc_introspection_t *mi) { - uptr last_byte_plus_one = 0; - mi->allocator_ptr = 0; - // Range is [begin_ptr, end_ptr) - __lsan::GetAllocatorGlobalRange(&(mi->allocator_ptr), &last_byte_plus_one); - CHECK_NE(mi->allocator_ptr, 0); - CHECK_GT(last_byte_plus_one, mi->allocator_ptr); - mi->allocator_size = last_byte_plus_one - (mi->allocator_ptr); - CHECK_GT(mi->allocator_size, 0); -} -} // namespace - -#endif diff --git a/lib/asan/asan_malloc_mac.cpp b/lib/asan/asan_malloc_mac.cpp new file mode 100644 index 000000000000..e8484685daed --- /dev/null +++ b/lib/asan/asan_malloc_mac.cpp @@ -0,0 +1,102 @@ +//===-- asan_malloc_mac.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC + +#include "asan_interceptors.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "lsan/lsan_common.h" + +using namespace __asan; +#define COMMON_MALLOC_ZONE_NAME "asan" +#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED() +#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited +#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock() +#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock() +#define COMMON_MALLOC_MEMALIGN(alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC) +#define COMMON_MALLOC_MALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_malloc(size, &stack) +#define COMMON_MALLOC_REALLOC(ptr, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_realloc(ptr, size, &stack); +#define COMMON_MALLOC_CALLOC(count, size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_calloc(count, size, &stack); +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = asan_posix_memalign(memptr, alignment, size, &stack); +#define COMMON_MALLOC_VALLOC(size) \ + GET_STACK_TRACE_MALLOC; \ + void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); +#define COMMON_MALLOC_FREE(ptr) \ + GET_STACK_TRACE_FREE; \ + asan_free(ptr, &stack, FROM_MALLOC); +#define COMMON_MALLOC_SIZE(ptr) \ + uptr size = asan_mz_size(ptr); +#define COMMON_MALLOC_FILL_STATS(zone, stats) \ + AsanMallocStats malloc_stats; \ + FillMallocStatistics(&malloc_stats); \ + CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \ + internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); +#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \ + GET_STACK_TRACE_FREE; \ + ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); +#define COMMON_MALLOC_NAMESPACE __asan +#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 +#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1 + +#include "sanitizer_common/sanitizer_malloc_mac.inc" + +namespace COMMON_MALLOC_NAMESPACE { + +bool HandleDlopenInit() { + static_assert(SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be true"); + // We have no reliable way of knowing how we are being loaded + // so make it a requirement on Apple platforms to set this environment + // variable to indicate that we want to perform initialization via + // dlopen(). + auto init_str = GetEnv("APPLE_ASAN_INIT_FOR_DLOPEN"); + if (!init_str) + return false; + if (internal_strncmp(init_str, "1", 1) != 0) + return false; + // When we are loaded via `dlopen()` path we still initialize the malloc zone + // so Symbolication clients (e.g. `leaks`) that load the ASan allocator can + // find an initialized malloc zone. + InitMallocZoneFields(); + return true; +} +} // namespace COMMON_MALLOC_NAMESPACE + +namespace { + +void mi_extra_init(sanitizer_malloc_introspection_t *mi) { + uptr last_byte_plus_one = 0; + mi->allocator_ptr = 0; + // Range is [begin_ptr, end_ptr) + __lsan::GetAllocatorGlobalRange(&(mi->allocator_ptr), &last_byte_plus_one); + CHECK_NE(mi->allocator_ptr, 0); + CHECK_GT(last_byte_plus_one, mi->allocator_ptr); + mi->allocator_size = last_byte_plus_one - (mi->allocator_ptr); + CHECK_GT(mi->allocator_size, 0); +} +} // namespace + +#endif diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc deleted file mode 100644 index 5fad55d6e284..000000000000 --- a/lib/asan/asan_malloc_win.cc +++ /dev/null @@ -1,553 +0,0 @@ -//===-- asan_malloc_win.cc ------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Windows-specific malloc interception. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_WINDOWS -#include "asan_allocator.h" -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_stack.h" -#include "interception/interception.h" -#include - -// Intentionally not including windows.h here, to avoid the risk of -// pulling in conflicting declarations of these functions. (With mingw-w64, -// there's a risk of windows.h pulling in stdint.h.) -typedef int BOOL; -typedef void *HANDLE; -typedef const void *LPCVOID; -typedef void *LPVOID; - -typedef unsigned long DWORD; -constexpr unsigned long HEAP_ZERO_MEMORY = 0x00000008; -constexpr unsigned long HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010; -constexpr unsigned long HEAP_ALLOCATE_SUPPORTED_FLAGS = (HEAP_ZERO_MEMORY); -constexpr unsigned long HEAP_ALLOCATE_UNSUPPORTED_FLAGS = - (~HEAP_ALLOCATE_SUPPORTED_FLAGS); -constexpr unsigned long HEAP_FREE_SUPPORTED_FLAGS = (0); -constexpr unsigned long HEAP_FREE_UNSUPPORTED_FLAGS = - (~HEAP_ALLOCATE_SUPPORTED_FLAGS); -constexpr unsigned long HEAP_REALLOC_SUPPORTED_FLAGS = - (HEAP_REALLOC_IN_PLACE_ONLY | HEAP_ZERO_MEMORY); -constexpr unsigned long HEAP_REALLOC_UNSUPPORTED_FLAGS = - (~HEAP_ALLOCATE_SUPPORTED_FLAGS); - - -extern "C" { -LPVOID WINAPI HeapAlloc(HANDLE hHeap, DWORD dwFlags, size_t dwBytes); -LPVOID WINAPI HeapReAlloc(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, - size_t dwBytes); -BOOL WINAPI HeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem); -size_t WINAPI HeapSize(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem); - -BOOL WINAPI HeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem); -} - -using namespace __asan; // NOLINT - -// MT: Simply defining functions with the same signature in *.obj -// files overrides the standard functions in the CRT. -// MD: Memory allocation functions are defined in the CRT .dll, -// so we have to intercept them before they are called for the first time. - -#if ASAN_DYNAMIC -# define ALLOCATION_FUNCTION_ATTRIBUTE -#else -# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE -#endif - -extern "C" { -ALLOCATION_FUNCTION_ATTRIBUTE -size_t _msize(void *ptr) { - GET_CURRENT_PC_BP_SP; - (void)sp; - return asan_malloc_usable_size(ptr, pc, bp); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -size_t _msize_base(void *ptr) { - return _msize(ptr); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void free(void *ptr) { - GET_STACK_TRACE_FREE; - return asan_free(ptr, &stack, FROM_MALLOC); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void _free_dbg(void *ptr, int) { - free(ptr); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void _free_base(void *ptr) { - free(ptr); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *malloc(size_t size) { - GET_STACK_TRACE_MALLOC; - return asan_malloc(size, &stack); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_malloc_base(size_t size) { - return malloc(size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_malloc_dbg(size_t size, int, const char *, int) { - return malloc(size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *calloc(size_t nmemb, size_t size) { - GET_STACK_TRACE_MALLOC; - return asan_calloc(nmemb, size, &stack); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_calloc_base(size_t nmemb, size_t size) { - return calloc(nmemb, size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) { - return calloc(nmemb, size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) { - return calloc(nmemb, size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *realloc(void *ptr, size_t size) { - GET_STACK_TRACE_MALLOC; - return asan_realloc(ptr, size, &stack); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_realloc_dbg(void *ptr, size_t size, int) { - UNREACHABLE("_realloc_dbg should not exist!"); - return 0; -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_realloc_base(void *ptr, size_t size) { - return realloc(ptr, size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_recalloc(void *p, size_t n, size_t elem_size) { - if (!p) - return calloc(n, elem_size); - const size_t size = n * elem_size; - if (elem_size != 0 && size / elem_size != n) - return 0; - - size_t old_size = _msize(p); - void *new_alloc = malloc(size); - if (new_alloc) { - REAL(memcpy)(new_alloc, p, Min(size, old_size)); - if (old_size < size) - REAL(memset)(((u8 *)new_alloc) + old_size, 0, size - old_size); - free(p); - } - return new_alloc; -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_recalloc_base(void *p, size_t n, size_t elem_size) { - return _recalloc(p, n, elem_size); -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_expand(void *memblock, size_t size) { - // _expand is used in realloc-like functions to resize the buffer if possible. - // We don't want memory to stand still while resizing buffers, so return 0. - return 0; -} - -ALLOCATION_FUNCTION_ATTRIBUTE -void *_expand_dbg(void *memblock, size_t size) { - return _expand(memblock, size); -} - -// TODO(timurrrr): Might want to add support for _aligned_* allocation -// functions to detect a bit more bugs. Those functions seem to wrap malloc(). - -int _CrtDbgReport(int, const char*, int, - const char*, const char*, ...) { - ShowStatsAndAbort(); -} - -int _CrtDbgReportW(int reportType, const wchar_t*, int, - const wchar_t*, const wchar_t*, ...) { - ShowStatsAndAbort(); -} - -int _CrtSetReportMode(int, int) { - return 0; -} -} // extern "C" - -#define OWNED_BY_RTL(heap, memory) \ - (!__sanitizer_get_ownership(memory) && HeapValidate(heap, 0, memory)) - -INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags, - LPCVOID lpMem) { - // If the RTL allocators are hooked we need to check whether the ASAN - // allocator owns the pointer we're about to use. Allocations occur before - // interception takes place, so if it is not owned by the RTL heap we can - // pass it to the ASAN heap for inspection. - if (flags()->windows_hook_rtl_allocators) { - if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem)) - return REAL(HeapSize)(hHeap, dwFlags, lpMem); - } else { - CHECK(dwFlags == 0 && "unsupported heap flags"); - } - GET_CURRENT_PC_BP_SP; - (void)sp; - return asan_malloc_usable_size(lpMem, pc, bp); -} - -INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags, - size_t dwBytes) { - // If the ASAN runtime is not initialized, or we encounter an unsupported - // flag, fall back to the original allocator. - if (flags()->windows_hook_rtl_allocators) { - if (UNLIKELY(!asan_inited || - (dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) { - return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes); - } - } else { - // In the case that we don't hook the rtl allocators, - // this becomes an assert since there is no failover to the original - // allocator. - CHECK((HEAP_ALLOCATE_UNSUPPORTED_FLAGS & dwFlags) != 0 && - "unsupported flags"); - } - GET_STACK_TRACE_MALLOC; - void *p = asan_malloc(dwBytes, &stack); - // Reading MSDN suggests that the *entire* usable allocation is zeroed out. - // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY. - // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083 - if (p && (dwFlags & HEAP_ZERO_MEMORY)) { - GET_CURRENT_PC_BP_SP; - (void)sp; - auto usable_size = asan_malloc_usable_size(p, pc, bp); - internal_memset(p, 0, usable_size); - } - return p; -} - -INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) { - // Heap allocations happen before this function is hooked, so we must fall - // back to the original function if the pointer is not from the ASAN heap, - // or unsupported flags are provided. - if (flags()->windows_hook_rtl_allocators) { - if (OWNED_BY_RTL(hHeap, lpMem)) - return REAL(HeapFree)(hHeap, dwFlags, lpMem); - } else { - CHECK((HEAP_FREE_UNSUPPORTED_FLAGS & dwFlags) != 0 && "unsupported flags"); - } - GET_STACK_TRACE_FREE; - asan_free(lpMem, &stack, FROM_MALLOC); - return true; -} - -namespace __asan { -using AllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, size_t); -using ReAllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, LPVOID, size_t); -using SizeFunction = size_t(WINAPI *)(HANDLE, DWORD, LPVOID); -using FreeFunction = BOOL(WINAPI *)(HANDLE, DWORD, LPVOID); - -void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc, - FreeFunction freeFunc, AllocFunction allocFunc, - HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, size_t dwBytes) { - CHECK(reallocFunc && heapSizeFunc && freeFunc && allocFunc); - GET_STACK_TRACE_MALLOC; - GET_CURRENT_PC_BP_SP; - (void)sp; - if (flags()->windows_hook_rtl_allocators) { - enum AllocationOwnership { NEITHER = 0, ASAN = 1, RTL = 2 }; - AllocationOwnership ownershipState; - bool owned_rtlalloc = false; - bool owned_asan = __sanitizer_get_ownership(lpMem); - - if (!owned_asan) - owned_rtlalloc = HeapValidate(hHeap, 0, lpMem); - - if (owned_asan && !owned_rtlalloc) - ownershipState = ASAN; - else if (!owned_asan && owned_rtlalloc) - ownershipState = RTL; - else if (!owned_asan && !owned_rtlalloc) - ownershipState = NEITHER; - - // If this heap block which was allocated before the ASAN - // runtime came up, use the real HeapFree function. - if (UNLIKELY(!asan_inited)) { - return reallocFunc(hHeap, dwFlags, lpMem, dwBytes); - } - bool only_asan_supported_flags = - (HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) == 0; - - if (ownershipState == RTL || - (ownershipState == NEITHER && !only_asan_supported_flags)) { - if (only_asan_supported_flags) { - // if this is a conversion to ASAN upported flags, transfer this - // allocation to the ASAN allocator - void *replacement_alloc; - if (dwFlags & HEAP_ZERO_MEMORY) - replacement_alloc = asan_calloc(1, dwBytes, &stack); - else - replacement_alloc = asan_malloc(dwBytes, &stack); - if (replacement_alloc) { - size_t old_size = heapSizeFunc(hHeap, dwFlags, lpMem); - if (old_size == ((size_t)0) - 1) { - asan_free(replacement_alloc, &stack, FROM_MALLOC); - return nullptr; - } - REAL(memcpy)(replacement_alloc, lpMem, old_size); - freeFunc(hHeap, dwFlags, lpMem); - } - return replacement_alloc; - } else { - // owned by rtl or neither with unsupported ASAN flags, - // just pass back to original allocator - CHECK(ownershipState == RTL || ownershipState == NEITHER); - CHECK(!only_asan_supported_flags); - return reallocFunc(hHeap, dwFlags, lpMem, dwBytes); - } - } - - if (ownershipState == ASAN && !only_asan_supported_flags) { - // Conversion to unsupported flags allocation, - // transfer this allocation back to the original allocator. - void *replacement_alloc = allocFunc(hHeap, dwFlags, dwBytes); - size_t old_usable_size = 0; - if (replacement_alloc) { - old_usable_size = asan_malloc_usable_size(lpMem, pc, bp); - REAL(memcpy)(replacement_alloc, lpMem, - Min(dwBytes, old_usable_size)); - asan_free(lpMem, &stack, FROM_MALLOC); - } - return replacement_alloc; - } - - CHECK((ownershipState == ASAN || ownershipState == NEITHER) && - only_asan_supported_flags); - // At this point we should either be ASAN owned with ASAN supported flags - // or we owned by neither and have supported flags. - // Pass through even when it's neither since this could be a null realloc or - // UAF that ASAN needs to catch. - } else { - CHECK((HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) != 0 && - "unsupported flags"); - } - // asan_realloc will never reallocate in place, so for now this flag is - // unsupported until we figure out a way to fake this. - if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY) - return nullptr; - - // HeapReAlloc and HeapAlloc both happily accept 0 sized allocations. - // passing a 0 size into asan_realloc will free the allocation. - // To avoid this and keep behavior consistent, fudge the size if 0. - // (asan_malloc already does this) - if (dwBytes == 0) - dwBytes = 1; - - size_t old_size; - if (dwFlags & HEAP_ZERO_MEMORY) - old_size = asan_malloc_usable_size(lpMem, pc, bp); - - void *ptr = asan_realloc(lpMem, dwBytes, &stack); - if (ptr == nullptr) - return nullptr; - - if (dwFlags & HEAP_ZERO_MEMORY) { - size_t new_size = asan_malloc_usable_size(ptr, pc, bp); - if (old_size < new_size) - REAL(memset)(((u8 *)ptr) + old_size, 0, new_size - old_size); - } - - return ptr; -} -} // namespace __asan - -INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags, - LPVOID lpMem, size_t dwBytes) { - return SharedReAlloc(REAL(HeapReAlloc), (SizeFunction)REAL(HeapSize), - REAL(HeapFree), REAL(HeapAlloc), hHeap, dwFlags, lpMem, - dwBytes); -} - -// The following functions are undocumented and subject to change. -// However, hooking them is necessary to hook Windows heap -// allocations with detours and their definitions are unlikely to change. -// Comments in /minkernel/ntos/rtl/heappublic.c indicate that these functions -// are part of the heap's public interface. -typedef unsigned long LOGICAL; - -// This function is documented as part of the Driver Development Kit but *not* -// the Windows Development Kit. -LOGICAL RtlFreeHeap(void* HeapHandle, DWORD Flags, - void* BaseAddress); - -// This function is documented as part of the Driver Development Kit but *not* -// the Windows Development Kit. -void* RtlAllocateHeap(void* HeapHandle, DWORD Flags, size_t Size); - -// This function is completely undocumented. -void* -RtlReAllocateHeap(void* HeapHandle, DWORD Flags, void* BaseAddress, - size_t Size); - -// This function is completely undocumented. -size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress); - -INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags, - void* BaseAddress) { - if (!flags()->windows_hook_rtl_allocators || - UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) { - return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress); - } - GET_CURRENT_PC_BP_SP; - (void)sp; - return asan_malloc_usable_size(BaseAddress, pc, bp); -} - -INTERCEPTOR_WINAPI(BOOL, RtlFreeHeap, HANDLE HeapHandle, DWORD Flags, - void* BaseAddress) { - // Heap allocations happen before this function is hooked, so we must fall - // back to the original function if the pointer is not from the ASAN heap, or - // unsupported flags are provided. - if (!flags()->windows_hook_rtl_allocators || - UNLIKELY((HEAP_FREE_UNSUPPORTED_FLAGS & Flags) != 0 || - OWNED_BY_RTL(HeapHandle, BaseAddress))) { - return REAL(RtlFreeHeap)(HeapHandle, Flags, BaseAddress); - } - GET_STACK_TRACE_FREE; - asan_free(BaseAddress, &stack, FROM_MALLOC); - return true; -} - -INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags, - size_t Size) { - // If the ASAN runtime is not initialized, or we encounter an unsupported - // flag, fall back to the original allocator. - if (!flags()->windows_hook_rtl_allocators || - UNLIKELY(!asan_inited || - (Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) { - return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size); - } - GET_STACK_TRACE_MALLOC; - void *p; - // Reading MSDN suggests that the *entire* usable allocation is zeroed out. - // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY. - // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083 - if (Flags & HEAP_ZERO_MEMORY) { - p = asan_calloc(Size, 1, &stack); - } else { - p = asan_malloc(Size, &stack); - } - return p; -} - -INTERCEPTOR_WINAPI(void*, RtlReAllocateHeap, HANDLE HeapHandle, DWORD Flags, - void* BaseAddress, size_t Size) { - // If it's actually a heap block which was allocated before the ASAN runtime - // came up, use the real RtlFreeHeap function. - if (!flags()->windows_hook_rtl_allocators) - return REAL(RtlReAllocateHeap)(HeapHandle, Flags, BaseAddress, Size); - - return SharedReAlloc(REAL(RtlReAllocateHeap), REAL(RtlSizeHeap), - REAL(RtlFreeHeap), REAL(RtlAllocateHeap), HeapHandle, - Flags, BaseAddress, Size); -} - -namespace __asan { - -static void TryToOverrideFunction(const char *fname, uptr new_func) { - // Failure here is not fatal. The CRT may not be present, and different CRT - // versions use different symbols. - if (!__interception::OverrideFunction(fname, new_func)) - VPrintf(2, "Failed to override function %s\n", fname); -} - -void ReplaceSystemMalloc() { -#if defined(ASAN_DYNAMIC) - TryToOverrideFunction("free", (uptr)free); - TryToOverrideFunction("_free_base", (uptr)free); - TryToOverrideFunction("malloc", (uptr)malloc); - TryToOverrideFunction("_malloc_base", (uptr)malloc); - TryToOverrideFunction("_malloc_crt", (uptr)malloc); - TryToOverrideFunction("calloc", (uptr)calloc); - TryToOverrideFunction("_calloc_base", (uptr)calloc); - TryToOverrideFunction("_calloc_crt", (uptr)calloc); - TryToOverrideFunction("realloc", (uptr)realloc); - TryToOverrideFunction("_realloc_base", (uptr)realloc); - TryToOverrideFunction("_realloc_crt", (uptr)realloc); - TryToOverrideFunction("_recalloc", (uptr)_recalloc); - TryToOverrideFunction("_recalloc_base", (uptr)_recalloc); - TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc); - TryToOverrideFunction("_msize", (uptr)_msize); - TryToOverrideFunction("_msize_base", (uptr)_msize); - TryToOverrideFunction("_expand", (uptr)_expand); - TryToOverrideFunction("_expand_base", (uptr)_expand); - - if (flags()->windows_hook_rtl_allocators) { - INTERCEPT_FUNCTION(HeapSize); - INTERCEPT_FUNCTION(HeapFree); - INTERCEPT_FUNCTION(HeapReAlloc); - INTERCEPT_FUNCTION(HeapAlloc); - - // Undocumented functions must be intercepted by name, not by symbol. - __interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap), - (uptr *)&REAL(RtlSizeHeap)); - __interception::OverrideFunction("RtlFreeHeap", (uptr)WRAP(RtlFreeHeap), - (uptr *)&REAL(RtlFreeHeap)); - __interception::OverrideFunction("RtlReAllocateHeap", - (uptr)WRAP(RtlReAllocateHeap), - (uptr *)&REAL(RtlReAllocateHeap)); - __interception::OverrideFunction("RtlAllocateHeap", - (uptr)WRAP(RtlAllocateHeap), - (uptr *)&REAL(RtlAllocateHeap)); - } else { -#define INTERCEPT_UCRT_FUNCTION(func) \ - if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \ - "api-ms-win-core-heap-l1-1-0.dll", func)) \ - VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func); - INTERCEPT_UCRT_FUNCTION(HeapAlloc); - INTERCEPT_UCRT_FUNCTION(HeapFree); - INTERCEPT_UCRT_FUNCTION(HeapReAlloc); - INTERCEPT_UCRT_FUNCTION(HeapSize); -#undef INTERCEPT_UCRT_FUNCTION - } - // Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which - // enable cross-module inlining. This means our _malloc_base hook won't catch - // all CRT allocations. This code here patches the import table of - // ucrtbase.dll so that all attempts to use the lower-level win32 heap - // allocation API will be directed to ASan's heap. We don't currently - // intercept all calls to HeapAlloc. If we did, we would have to check on - // HeapFree whether the pointer came from ASan of from the system. - -#endif // defined(ASAN_DYNAMIC) -} -} // namespace __asan - -#endif // _WIN32 diff --git a/lib/asan/asan_malloc_win.cpp b/lib/asan/asan_malloc_win.cpp new file mode 100644 index 000000000000..13c6f652119b --- /dev/null +++ b/lib/asan/asan_malloc_win.cpp @@ -0,0 +1,554 @@ +//===-- asan_malloc_win.cpp -----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific malloc interception. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stack.h" +#include "interception/interception.h" +#include + +// Intentionally not including windows.h here, to avoid the risk of +// pulling in conflicting declarations of these functions. (With mingw-w64, +// there's a risk of windows.h pulling in stdint.h.) +typedef int BOOL; +typedef void *HANDLE; +typedef const void *LPCVOID; +typedef void *LPVOID; + +typedef unsigned long DWORD; +constexpr unsigned long HEAP_ZERO_MEMORY = 0x00000008; +constexpr unsigned long HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010; +constexpr unsigned long HEAP_ALLOCATE_SUPPORTED_FLAGS = (HEAP_ZERO_MEMORY); +constexpr unsigned long HEAP_ALLOCATE_UNSUPPORTED_FLAGS = + (~HEAP_ALLOCATE_SUPPORTED_FLAGS); +constexpr unsigned long HEAP_FREE_SUPPORTED_FLAGS = (0); +constexpr unsigned long HEAP_FREE_UNSUPPORTED_FLAGS = + (~HEAP_ALLOCATE_SUPPORTED_FLAGS); +constexpr unsigned long HEAP_REALLOC_SUPPORTED_FLAGS = + (HEAP_REALLOC_IN_PLACE_ONLY | HEAP_ZERO_MEMORY); +constexpr unsigned long HEAP_REALLOC_UNSUPPORTED_FLAGS = + (~HEAP_ALLOCATE_SUPPORTED_FLAGS); + + +extern "C" { +LPVOID WINAPI HeapAlloc(HANDLE hHeap, DWORD dwFlags, size_t dwBytes); +LPVOID WINAPI HeapReAlloc(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, + size_t dwBytes); +BOOL WINAPI HeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem); +size_t WINAPI HeapSize(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem); + +BOOL WINAPI HeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem); +} + +using namespace __asan; + +// MT: Simply defining functions with the same signature in *.obj +// files overrides the standard functions in the CRT. +// MD: Memory allocation functions are defined in the CRT .dll, +// so we have to intercept them before they are called for the first time. + +#if ASAN_DYNAMIC +# define ALLOCATION_FUNCTION_ATTRIBUTE +#else +# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +#endif + +extern "C" { +ALLOCATION_FUNCTION_ATTRIBUTE +size_t _msize(void *ptr) { + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(ptr, pc, bp); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +size_t _msize_base(void *ptr) { + return _msize(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void free(void *ptr) { + GET_STACK_TRACE_FREE; + return asan_free(ptr, &stack, FROM_MALLOC); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void _free_dbg(void *ptr, int) { + free(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void _free_base(void *ptr) { + free(ptr); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *malloc(size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_malloc(size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_malloc_base(size_t size) { + return malloc(size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_malloc_dbg(size_t size, int, const char *, int) { + return malloc(size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *calloc(size_t nmemb, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_base(size_t nmemb, size_t size) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) { + return calloc(nmemb, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *realloc(void *ptr, size_t size) { + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_realloc_dbg(void *ptr, size_t size, int) { + UNREACHABLE("_realloc_dbg should not exist!"); + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_realloc_base(void *ptr, size_t size) { + return realloc(ptr, size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_recalloc(void *p, size_t n, size_t elem_size) { + if (!p) + return calloc(n, elem_size); + const size_t size = n * elem_size; + if (elem_size != 0 && size / elem_size != n) + return 0; + + size_t old_size = _msize(p); + void *new_alloc = malloc(size); + if (new_alloc) { + REAL(memcpy)(new_alloc, p, Min(size, old_size)); + if (old_size < size) + REAL(memset)(((u8 *)new_alloc) + old_size, 0, size - old_size); + free(p); + } + return new_alloc; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_recalloc_base(void *p, size_t n, size_t elem_size) { + return _recalloc(p, n, elem_size); +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand(void *memblock, size_t size) { + // _expand is used in realloc-like functions to resize the buffer if possible. + // We don't want memory to stand still while resizing buffers, so return 0. + return 0; +} + +ALLOCATION_FUNCTION_ATTRIBUTE +void *_expand_dbg(void *memblock, size_t size) { + return _expand(memblock, size); +} + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +int _CrtDbgReport(int, const char*, int, + const char*, const char*, ...) { + ShowStatsAndAbort(); +} + +int _CrtDbgReportW(int reportType, const wchar_t*, int, + const wchar_t*, const wchar_t*, ...) { + ShowStatsAndAbort(); +} + +int _CrtSetReportMode(int, int) { + return 0; +} +} // extern "C" + +#define OWNED_BY_RTL(heap, memory) \ + (!__sanitizer_get_ownership(memory) && HeapValidate(heap, 0, memory)) + +INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags, + LPCVOID lpMem) { + // If the RTL allocators are hooked we need to check whether the ASAN + // allocator owns the pointer we're about to use. Allocations occur before + // interception takes place, so if it is not owned by the RTL heap we can + // pass it to the ASAN heap for inspection. + if (flags()->windows_hook_rtl_allocators) { + if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem)) + return REAL(HeapSize)(hHeap, dwFlags, lpMem); + } else { + CHECK(dwFlags == 0 && "unsupported heap flags"); + } + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(lpMem, pc, bp); +} + +INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags, + size_t dwBytes) { + // If the ASAN runtime is not initialized, or we encounter an unsupported + // flag, fall back to the original allocator. + if (flags()->windows_hook_rtl_allocators) { + if (UNLIKELY(!asan_inited || + (dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) { + return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes); + } + } else { + // In the case that we don't hook the rtl allocators, + // this becomes an assert since there is no failover to the original + // allocator. + CHECK((HEAP_ALLOCATE_UNSUPPORTED_FLAGS & dwFlags) != 0 && + "unsupported flags"); + } + GET_STACK_TRACE_MALLOC; + void *p = asan_malloc(dwBytes, &stack); + // Reading MSDN suggests that the *entire* usable allocation is zeroed out. + // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY. + // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083 + if (p && (dwFlags & HEAP_ZERO_MEMORY)) { + GET_CURRENT_PC_BP_SP; + (void)sp; + auto usable_size = asan_malloc_usable_size(p, pc, bp); + internal_memset(p, 0, usable_size); + } + return p; +} + +INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) { + // Heap allocations happen before this function is hooked, so we must fall + // back to the original function if the pointer is not from the ASAN heap, + // or unsupported flags are provided. + if (flags()->windows_hook_rtl_allocators) { + if (OWNED_BY_RTL(hHeap, lpMem)) + return REAL(HeapFree)(hHeap, dwFlags, lpMem); + } else { + CHECK((HEAP_FREE_UNSUPPORTED_FLAGS & dwFlags) != 0 && "unsupported flags"); + } + GET_STACK_TRACE_FREE; + asan_free(lpMem, &stack, FROM_MALLOC); + return true; +} + +namespace __asan { +using AllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, size_t); +using ReAllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, LPVOID, size_t); +using SizeFunction = size_t(WINAPI *)(HANDLE, DWORD, LPVOID); +using FreeFunction = BOOL(WINAPI *)(HANDLE, DWORD, LPVOID); + +void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc, + FreeFunction freeFunc, AllocFunction allocFunc, + HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, size_t dwBytes) { + CHECK(reallocFunc && heapSizeFunc && freeFunc && allocFunc); + GET_STACK_TRACE_MALLOC; + GET_CURRENT_PC_BP_SP; + (void)sp; + if (flags()->windows_hook_rtl_allocators) { + enum AllocationOwnership { NEITHER = 0, ASAN = 1, RTL = 2 }; + AllocationOwnership ownershipState; + bool owned_rtlalloc = false; + bool owned_asan = __sanitizer_get_ownership(lpMem); + + if (!owned_asan) + owned_rtlalloc = HeapValidate(hHeap, 0, lpMem); + + if (owned_asan && !owned_rtlalloc) + ownershipState = ASAN; + else if (!owned_asan && owned_rtlalloc) + ownershipState = RTL; + else if (!owned_asan && !owned_rtlalloc) + ownershipState = NEITHER; + + // If this heap block which was allocated before the ASAN + // runtime came up, use the real HeapFree function. + if (UNLIKELY(!asan_inited)) { + return reallocFunc(hHeap, dwFlags, lpMem, dwBytes); + } + bool only_asan_supported_flags = + (HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) == 0; + + if (ownershipState == RTL || + (ownershipState == NEITHER && !only_asan_supported_flags)) { + if (only_asan_supported_flags) { + // if this is a conversion to ASAN upported flags, transfer this + // allocation to the ASAN allocator + void *replacement_alloc; + if (dwFlags & HEAP_ZERO_MEMORY) + replacement_alloc = asan_calloc(1, dwBytes, &stack); + else + replacement_alloc = asan_malloc(dwBytes, &stack); + if (replacement_alloc) { + size_t old_size = heapSizeFunc(hHeap, dwFlags, lpMem); + if (old_size == ((size_t)0) - 1) { + asan_free(replacement_alloc, &stack, FROM_MALLOC); + return nullptr; + } + REAL(memcpy)(replacement_alloc, lpMem, old_size); + freeFunc(hHeap, dwFlags, lpMem); + } + return replacement_alloc; + } else { + // owned by rtl or neither with unsupported ASAN flags, + // just pass back to original allocator + CHECK(ownershipState == RTL || ownershipState == NEITHER); + CHECK(!only_asan_supported_flags); + return reallocFunc(hHeap, dwFlags, lpMem, dwBytes); + } + } + + if (ownershipState == ASAN && !only_asan_supported_flags) { + // Conversion to unsupported flags allocation, + // transfer this allocation back to the original allocator. + void *replacement_alloc = allocFunc(hHeap, dwFlags, dwBytes); + size_t old_usable_size = 0; + if (replacement_alloc) { + old_usable_size = asan_malloc_usable_size(lpMem, pc, bp); + REAL(memcpy)(replacement_alloc, lpMem, + Min(dwBytes, old_usable_size)); + asan_free(lpMem, &stack, FROM_MALLOC); + } + return replacement_alloc; + } + + CHECK((ownershipState == ASAN || ownershipState == NEITHER) && + only_asan_supported_flags); + // At this point we should either be ASAN owned with ASAN supported flags + // or we owned by neither and have supported flags. + // Pass through even when it's neither since this could be a null realloc or + // UAF that ASAN needs to catch. + } else { + CHECK((HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) != 0 && + "unsupported flags"); + } + // asan_realloc will never reallocate in place, so for now this flag is + // unsupported until we figure out a way to fake this. + if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY) + return nullptr; + + // HeapReAlloc and HeapAlloc both happily accept 0 sized allocations. + // passing a 0 size into asan_realloc will free the allocation. + // To avoid this and keep behavior consistent, fudge the size if 0. + // (asan_malloc already does this) + if (dwBytes == 0) + dwBytes = 1; + + size_t old_size; + if (dwFlags & HEAP_ZERO_MEMORY) + old_size = asan_malloc_usable_size(lpMem, pc, bp); + + void *ptr = asan_realloc(lpMem, dwBytes, &stack); + if (ptr == nullptr) + return nullptr; + + if (dwFlags & HEAP_ZERO_MEMORY) { + size_t new_size = asan_malloc_usable_size(ptr, pc, bp); + if (old_size < new_size) + REAL(memset)(((u8 *)ptr) + old_size, 0, new_size - old_size); + } + + return ptr; +} +} // namespace __asan + +INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags, + LPVOID lpMem, size_t dwBytes) { + return SharedReAlloc(REAL(HeapReAlloc), (SizeFunction)REAL(HeapSize), + REAL(HeapFree), REAL(HeapAlloc), hHeap, dwFlags, lpMem, + dwBytes); +} + +// The following functions are undocumented and subject to change. +// However, hooking them is necessary to hook Windows heap +// allocations with detours and their definitions are unlikely to change. +// Comments in /minkernel/ntos/rtl/heappublic.c indicate that these functions +// are part of the heap's public interface. +typedef unsigned long LOGICAL; + +// This function is documented as part of the Driver Development Kit but *not* +// the Windows Development Kit. +LOGICAL RtlFreeHeap(void* HeapHandle, DWORD Flags, + void* BaseAddress); + +// This function is documented as part of the Driver Development Kit but *not* +// the Windows Development Kit. +void* RtlAllocateHeap(void* HeapHandle, DWORD Flags, size_t Size); + +// This function is completely undocumented. +void* +RtlReAllocateHeap(void* HeapHandle, DWORD Flags, void* BaseAddress, + size_t Size); + +// This function is completely undocumented. +size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress); + +INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags, + void* BaseAddress) { + if (!flags()->windows_hook_rtl_allocators || + UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) { + return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress); + } + GET_CURRENT_PC_BP_SP; + (void)sp; + return asan_malloc_usable_size(BaseAddress, pc, bp); +} + +INTERCEPTOR_WINAPI(BOOL, RtlFreeHeap, HANDLE HeapHandle, DWORD Flags, + void* BaseAddress) { + // Heap allocations happen before this function is hooked, so we must fall + // back to the original function if the pointer is not from the ASAN heap, or + // unsupported flags are provided. + if (!flags()->windows_hook_rtl_allocators || + UNLIKELY((HEAP_FREE_UNSUPPORTED_FLAGS & Flags) != 0 || + OWNED_BY_RTL(HeapHandle, BaseAddress))) { + return REAL(RtlFreeHeap)(HeapHandle, Flags, BaseAddress); + } + GET_STACK_TRACE_FREE; + asan_free(BaseAddress, &stack, FROM_MALLOC); + return true; +} + +INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags, + size_t Size) { + // If the ASAN runtime is not initialized, or we encounter an unsupported + // flag, fall back to the original allocator. + if (!flags()->windows_hook_rtl_allocators || + UNLIKELY(!asan_inited || + (Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) { + return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size); + } + GET_STACK_TRACE_MALLOC; + void *p; + // Reading MSDN suggests that the *entire* usable allocation is zeroed out. + // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY. + // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083 + if (Flags & HEAP_ZERO_MEMORY) { + p = asan_calloc(Size, 1, &stack); + } else { + p = asan_malloc(Size, &stack); + } + return p; +} + +INTERCEPTOR_WINAPI(void*, RtlReAllocateHeap, HANDLE HeapHandle, DWORD Flags, + void* BaseAddress, size_t Size) { + // If it's actually a heap block which was allocated before the ASAN runtime + // came up, use the real RtlFreeHeap function. + if (!flags()->windows_hook_rtl_allocators) + return REAL(RtlReAllocateHeap)(HeapHandle, Flags, BaseAddress, Size); + + return SharedReAlloc(REAL(RtlReAllocateHeap), REAL(RtlSizeHeap), + REAL(RtlFreeHeap), REAL(RtlAllocateHeap), HeapHandle, + Flags, BaseAddress, Size); +} + +namespace __asan { + +static void TryToOverrideFunction(const char *fname, uptr new_func) { + // Failure here is not fatal. The CRT may not be present, and different CRT + // versions use different symbols. + if (!__interception::OverrideFunction(fname, new_func)) + VPrintf(2, "Failed to override function %s\n", fname); +} + +void ReplaceSystemMalloc() { +#if defined(ASAN_DYNAMIC) + TryToOverrideFunction("free", (uptr)free); + TryToOverrideFunction("_free_base", (uptr)free); + TryToOverrideFunction("malloc", (uptr)malloc); + TryToOverrideFunction("_malloc_base", (uptr)malloc); + TryToOverrideFunction("_malloc_crt", (uptr)malloc); + TryToOverrideFunction("calloc", (uptr)calloc); + TryToOverrideFunction("_calloc_base", (uptr)calloc); + TryToOverrideFunction("_calloc_crt", (uptr)calloc); + TryToOverrideFunction("realloc", (uptr)realloc); + TryToOverrideFunction("_realloc_base", (uptr)realloc); + TryToOverrideFunction("_realloc_crt", (uptr)realloc); + TryToOverrideFunction("_recalloc", (uptr)_recalloc); + TryToOverrideFunction("_recalloc_base", (uptr)_recalloc); + TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc); + TryToOverrideFunction("_msize", (uptr)_msize); + TryToOverrideFunction("_msize_base", (uptr)_msize); + TryToOverrideFunction("_expand", (uptr)_expand); + TryToOverrideFunction("_expand_base", (uptr)_expand); + + if (flags()->windows_hook_rtl_allocators) { + INTERCEPT_FUNCTION(HeapSize); + INTERCEPT_FUNCTION(HeapFree); + INTERCEPT_FUNCTION(HeapReAlloc); + INTERCEPT_FUNCTION(HeapAlloc); + + // Undocumented functions must be intercepted by name, not by symbol. + __interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap), + (uptr *)&REAL(RtlSizeHeap)); + __interception::OverrideFunction("RtlFreeHeap", (uptr)WRAP(RtlFreeHeap), + (uptr *)&REAL(RtlFreeHeap)); + __interception::OverrideFunction("RtlReAllocateHeap", + (uptr)WRAP(RtlReAllocateHeap), + (uptr *)&REAL(RtlReAllocateHeap)); + __interception::OverrideFunction("RtlAllocateHeap", + (uptr)WRAP(RtlAllocateHeap), + (uptr *)&REAL(RtlAllocateHeap)); + } else { +#define INTERCEPT_UCRT_FUNCTION(func) \ + if (!INTERCEPT_FUNCTION_DLLIMPORT( \ + "ucrtbase.dll", "api-ms-win-core-heap-l1-1-0.dll", func)) { \ + VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func); \ + } + INTERCEPT_UCRT_FUNCTION(HeapAlloc); + INTERCEPT_UCRT_FUNCTION(HeapFree); + INTERCEPT_UCRT_FUNCTION(HeapReAlloc); + INTERCEPT_UCRT_FUNCTION(HeapSize); +#undef INTERCEPT_UCRT_FUNCTION + } + // Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which + // enable cross-module inlining. This means our _malloc_base hook won't catch + // all CRT allocations. This code here patches the import table of + // ucrtbase.dll so that all attempts to use the lower-level win32 heap + // allocation API will be directed to ASan's heap. We don't currently + // intercept all calls to HeapAlloc. If we did, we would have to check on + // HeapFree whether the pointer came from ASan of from the system. + +#endif // defined(ASAN_DYNAMIC) +} +} // namespace __asan + +#endif // _WIN32 diff --git a/lib/asan/asan_memory_profile.cc b/lib/asan/asan_memory_profile.cc deleted file mode 100644 index 87d874d2f274..000000000000 --- a/lib/asan/asan_memory_profile.cc +++ /dev/null @@ -1,129 +0,0 @@ -//===-- asan_memory_profile.cc.cc -----------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// This file implements __sanitizer_print_memory_profile. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_stoptheworld.h" -#include "lsan/lsan_common.h" -#include "asan/asan_allocator.h" - -#if CAN_SANITIZE_LEAKS - -namespace __asan { - -struct AllocationSite { - u32 id; - uptr total_size; - uptr count; -}; - -class HeapProfile { - public: - HeapProfile() { allocations_.reserve(1024); } - - void ProcessChunk(const AsanChunkView &cv) { - if (cv.IsAllocated()) { - total_allocated_user_size_ += cv.UsedSize(); - total_allocated_count_++; - u32 id = cv.GetAllocStackId(); - if (id) - Insert(id, cv.UsedSize()); - } else if (cv.IsQuarantined()) { - total_quarantined_user_size_ += cv.UsedSize(); - total_quarantined_count_++; - } else { - total_other_count_++; - } - } - - void Print(uptr top_percent, uptr max_number_of_contexts) { - Sort(allocations_.data(), allocations_.size(), - [](const AllocationSite &a, const AllocationSite &b) { - return a.total_size > b.total_size; - }); - CHECK(total_allocated_user_size_); - uptr total_shown = 0; - Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: " - "%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; " - "showing top %zd%% (at most %zd unique contexts)\n", - total_allocated_user_size_, total_allocated_count_, - total_quarantined_user_size_, total_quarantined_count_, - total_other_count_, total_allocated_count_ + - total_quarantined_count_ + total_other_count_, top_percent, - max_number_of_contexts); - for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts); - i++) { - auto &a = allocations_[i]; - Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size, - a.total_size * 100 / total_allocated_user_size_, a.count); - StackDepotGet(a.id).Print(); - total_shown += a.total_size; - if (total_shown * 100 / total_allocated_user_size_ > top_percent) - break; - } - } - - private: - uptr total_allocated_user_size_ = 0; - uptr total_allocated_count_ = 0; - uptr total_quarantined_user_size_ = 0; - uptr total_quarantined_count_ = 0; - uptr total_other_count_ = 0; - InternalMmapVector allocations_; - - void Insert(u32 id, uptr size) { - // Linear lookup will be good enough for most cases (although not all). - for (uptr i = 0; i < allocations_.size(); i++) { - if (allocations_[i].id == id) { - allocations_[i].total_size += size; - allocations_[i].count++; - return; - } - } - allocations_.push_back({id, size, 1}); - } -}; - -static void ChunkCallback(uptr chunk, void *arg) { - reinterpret_cast(arg)->ProcessChunk( - FindHeapChunkByAllocBeg(chunk)); -} - -static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list, - void *argument) { - HeapProfile hp; - __lsan::ForEachChunk(ChunkCallback, &hp); - uptr *Arg = reinterpret_cast(argument); - hp.Print(Arg[0], Arg[1]); - - if (Verbosity()) - __asan_print_accumulated_stats(); -} - -} // namespace __asan - -#endif // CAN_SANITIZE_LEAKS - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_print_memory_profile(uptr top_percent, - uptr max_number_of_contexts) { -#if CAN_SANITIZE_LEAKS - uptr Arg[2]; - Arg[0] = top_percent; - Arg[1] = max_number_of_contexts; - __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg); -#endif // CAN_SANITIZE_LEAKS -} -} // extern "C" diff --git a/lib/asan/asan_memory_profile.cpp b/lib/asan/asan_memory_profile.cpp new file mode 100644 index 000000000000..4fcd5600ed1a --- /dev/null +++ b/lib/asan/asan_memory_profile.cpp @@ -0,0 +1,129 @@ +//===-- asan_memory_profile.cpp ----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file implements __sanitizer_print_memory_profile. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "lsan/lsan_common.h" +#include "asan/asan_allocator.h" + +#if CAN_SANITIZE_LEAKS + +namespace __asan { + +struct AllocationSite { + u32 id; + uptr total_size; + uptr count; +}; + +class HeapProfile { + public: + HeapProfile() { allocations_.reserve(1024); } + + void ProcessChunk(const AsanChunkView &cv) { + if (cv.IsAllocated()) { + total_allocated_user_size_ += cv.UsedSize(); + total_allocated_count_++; + u32 id = cv.GetAllocStackId(); + if (id) + Insert(id, cv.UsedSize()); + } else if (cv.IsQuarantined()) { + total_quarantined_user_size_ += cv.UsedSize(); + total_quarantined_count_++; + } else { + total_other_count_++; + } + } + + void Print(uptr top_percent, uptr max_number_of_contexts) { + Sort(allocations_.data(), allocations_.size(), + [](const AllocationSite &a, const AllocationSite &b) { + return a.total_size > b.total_size; + }); + CHECK(total_allocated_user_size_); + uptr total_shown = 0; + Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: " + "%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; " + "showing top %zd%% (at most %zd unique contexts)\n", + total_allocated_user_size_, total_allocated_count_, + total_quarantined_user_size_, total_quarantined_count_, + total_other_count_, total_allocated_count_ + + total_quarantined_count_ + total_other_count_, top_percent, + max_number_of_contexts); + for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts); + i++) { + auto &a = allocations_[i]; + Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size, + a.total_size * 100 / total_allocated_user_size_, a.count); + StackDepotGet(a.id).Print(); + total_shown += a.total_size; + if (total_shown * 100 / total_allocated_user_size_ > top_percent) + break; + } + } + + private: + uptr total_allocated_user_size_ = 0; + uptr total_allocated_count_ = 0; + uptr total_quarantined_user_size_ = 0; + uptr total_quarantined_count_ = 0; + uptr total_other_count_ = 0; + InternalMmapVector allocations_; + + void Insert(u32 id, uptr size) { + // Linear lookup will be good enough for most cases (although not all). + for (uptr i = 0; i < allocations_.size(); i++) { + if (allocations_[i].id == id) { + allocations_[i].total_size += size; + allocations_[i].count++; + return; + } + } + allocations_.push_back({id, size, 1}); + } +}; + +static void ChunkCallback(uptr chunk, void *arg) { + reinterpret_cast(arg)->ProcessChunk( + FindHeapChunkByAllocBeg(chunk)); +} + +static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list, + void *argument) { + HeapProfile hp; + __lsan::ForEachChunk(ChunkCallback, &hp); + uptr *Arg = reinterpret_cast(argument); + hp.Print(Arg[0], Arg[1]); + + if (Verbosity()) + __asan_print_accumulated_stats(); +} + +} // namespace __asan + +#endif // CAN_SANITIZE_LEAKS + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_memory_profile(uptr top_percent, + uptr max_number_of_contexts) { +#if CAN_SANITIZE_LEAKS + uptr Arg[2]; + Arg[0] = top_percent; + Arg[1] = max_number_of_contexts; + __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg); +#endif // CAN_SANITIZE_LEAKS +} +} // extern "C" diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc deleted file mode 100644 index 5f51d12b1b5a..000000000000 --- a/lib/asan/asan_new_delete.cc +++ /dev/null @@ -1,204 +0,0 @@ -//===-- asan_interceptors.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Interceptors for operators new and delete. -//===----------------------------------------------------------------------===// - -#include "asan_allocator.h" -#include "asan_internal.h" -#include "asan_malloc_local.h" -#include "asan_report.h" -#include "asan_stack.h" - -#include "interception/interception.h" - -#include - -// C++ operators can't have dllexport attributes on Windows. We export them -// anyway by passing extra -export flags to the linker, which is exactly that -// dllexport would normally do. We need to export them in order to make the -// VS2015 dynamic CRT (MD) work. -#if SANITIZER_WINDOWS && defined(_MSC_VER) -#define CXX_OPERATOR_ATTRIBUTE -#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym)) -#ifdef _WIN64 -COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new -COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow -COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete -COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete -COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[] -COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[] -#else -COMMENT_EXPORT("??2@YAPAXI@Z") // operator new -COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow -COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete -COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete -COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[] -COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[] -#endif -#undef COMMENT_EXPORT -#else -#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE -#endif - -using namespace __asan; // NOLINT - -// FreeBSD prior v9.2 have wrong definition of 'size_t'. -// http://svnweb.freebsd.org/base?view=revision&revision=232261 -#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 -#include -#if __FreeBSD_version <= 902001 // v9.2 -#define size_t unsigned -#endif // __FreeBSD_version -#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 - -// This code has issues on OSX. -// See https://github.com/google/sanitizers/issues/131. - -// Fake std::nothrow_t and std::align_val_t to avoid including . -namespace std { -struct nothrow_t {}; -enum class align_val_t: size_t {}; -} // namespace std - -// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM. -// For local pool allocation, align to SHADOW_GRANULARITY to match asan -// allocator behavior. -#define OPERATOR_NEW_BODY(type, nothrow) \ - MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \ - GET_STACK_TRACE_MALLOC; \ - void *res = asan_memalign(0, size, &stack, type); \ - if (!nothrow && UNLIKELY(!res)) \ - ReportOutOfMemory(size, &stack); \ - return res; -#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ - MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \ - GET_STACK_TRACE_MALLOC; \ - void *res = asan_memalign((uptr)align, size, &stack, type); \ - if (!nothrow && UNLIKELY(!res)) \ - ReportOutOfMemory(size, &stack); \ - return res; - -// On OS X it's not enough to just provide our own 'operator new' and -// 'operator delete' implementations, because they're going to be in the -// runtime dylib, and the main executable will depend on both the runtime -// dylib and libstdc++, each of those'll have its implementation of new and -// delete. -// To make sure that C++ allocation/deallocation operators are overridden on -// OS X we need to intercept them using their mangled names. -#if !SANITIZER_MAC -CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size) -{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size) -{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } - -#else // SANITIZER_MAC -INTERCEPTOR(void *, _Znwm, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); -} -INTERCEPTOR(void *, _Znam, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); -} -INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); -} -INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); -} -#endif // !SANITIZER_MAC - -#define OPERATOR_DELETE_BODY(type) \ - if (IS_FROM_LOCAL_POOL(ptr)) return;\ - GET_STACK_TRACE_FREE;\ - asan_delete(ptr, 0, 0, &stack, type); - -#define OPERATOR_DELETE_BODY_SIZE(type) \ - if (IS_FROM_LOCAL_POOL(ptr)) return;\ - GET_STACK_TRACE_FREE;\ - asan_delete(ptr, size, 0, &stack, type); - -#define OPERATOR_DELETE_BODY_ALIGN(type) \ - if (IS_FROM_LOCAL_POOL(ptr)) return;\ - GET_STACK_TRACE_FREE;\ - asan_delete(ptr, 0, static_cast(align), &stack, type); - -#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \ - if (IS_FROM_LOCAL_POOL(ptr)) return;\ - GET_STACK_TRACE_FREE;\ - asan_delete(ptr, size, static_cast(align), &stack, type); - -#if !SANITIZER_MAC -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr) NOEXCEPT -{ OPERATOR_DELETE_BODY(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr) NOEXCEPT -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); } -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } -CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); } -CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT -{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); } - -#else // SANITIZER_MAC -INTERCEPTOR(void, _ZdlPv, void *ptr) -{ OPERATOR_DELETE_BODY(FROM_NEW); } -INTERCEPTOR(void, _ZdaPv, void *ptr) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } -INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW); } -INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) -{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } -#endif // !SANITIZER_MAC diff --git a/lib/asan/asan_new_delete.cpp b/lib/asan/asan_new_delete.cpp new file mode 100644 index 000000000000..5dfcc00fd5d1 --- /dev/null +++ b/lib/asan/asan_new_delete.cpp @@ -0,0 +1,204 @@ +//===-- asan_interceptors.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_internal.h" +#include "asan_malloc_local.h" +#include "asan_report.h" +#include "asan_stack.h" + +#include "interception/interception.h" + +#include + +// C++ operators can't have dllexport attributes on Windows. We export them +// anyway by passing extra -export flags to the linker, which is exactly that +// dllexport would normally do. We need to export them in order to make the +// VS2015 dynamic CRT (MD) work. +#if SANITIZER_WINDOWS && defined(_MSC_VER) +#define CXX_OPERATOR_ATTRIBUTE +#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym)) +#ifdef _WIN64 +COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new +COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[] +#else +COMMENT_EXPORT("??2@YAPAXI@Z") // operator new +COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[] +#endif +#undef COMMENT_EXPORT +#else +#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE +#endif + +using namespace __asan; + +// FreeBSD prior v9.2 have wrong definition of 'size_t'. +// http://svnweb.freebsd.org/base?view=revision&revision=232261 +#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 +#include +#if __FreeBSD_version <= 902001 // v9.2 +#define size_t unsigned +#endif // __FreeBSD_version +#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32 + +// This code has issues on OSX. +// See https://github.com/google/sanitizers/issues/131. + +// Fake std::nothrow_t and std::align_val_t to avoid including . +namespace std { +struct nothrow_t {}; +enum class align_val_t: size_t {}; +} // namespace std + +// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM. +// For local pool allocation, align to SHADOW_GRANULARITY to match asan +// allocator behavior. +#define OPERATOR_NEW_BODY(type, nothrow) \ + MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_memalign(0, size, &stack, type); \ + if (!nothrow && UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res; +#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ + MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \ + GET_STACK_TRACE_MALLOC; \ + void *res = asan_memalign((uptr)align, size, &stack, type); \ + if (!nothrow && UNLIKELY(!res)) \ + ReportOutOfMemory(size, &stack); \ + return res; + +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the +// runtime dylib, and the main executable will depend on both the runtime +// dylib and libstdc++, each of those'll have its implementation of new and +// delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } + +#else // SANITIZER_MAC +INTERCEPTOR(void *, _Znwm, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); +} +INTERCEPTOR(void *, _Znam, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); +} +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); +} +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); +} +#endif // !SANITIZER_MAC + +#define OPERATOR_DELETE_BODY(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, 0, 0, &stack, type); + +#define OPERATOR_DELETE_BODY_SIZE(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, size, 0, &stack, type); + +#define OPERATOR_DELETE_BODY_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, 0, static_cast(align), &stack, type); + +#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ + GET_STACK_TRACE_FREE;\ + asan_delete(ptr, size, static_cast(align), &stack, type); + +#if !SANITIZER_MAC +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); } + +#else // SANITIZER_MAC +INTERCEPTOR(void, _ZdlPv, void *ptr) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +INTERCEPTOR(void, _ZdaPv, void *ptr) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW); } +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FROM_NEW_BR); } +#endif // !SANITIZER_MAC diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc deleted file mode 100644 index 44b87c76e9cc..000000000000 --- a/lib/asan/asan_poisoning.cc +++ /dev/null @@ -1,460 +0,0 @@ -//===-- asan_poisoning.cc -------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Shadow memory poisoning by ASan RTL and by user application. -//===----------------------------------------------------------------------===// - -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_flags.h" - -namespace __asan { - -static atomic_uint8_t can_poison_memory; - -void SetCanPoisonMemory(bool value) { - atomic_store(&can_poison_memory, value, memory_order_release); -} - -bool CanPoisonMemory() { - return atomic_load(&can_poison_memory, memory_order_acquire); -} - -void PoisonShadow(uptr addr, uptr size, u8 value) { - if (value && !CanPoisonMemory()) return; - CHECK(AddrIsAlignedByGranularity(addr)); - CHECK(AddrIsInMem(addr)); - CHECK(AddrIsAlignedByGranularity(addr + size)); - CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); - CHECK(REAL(memset)); - FastPoisonShadow(addr, size, value); -} - -void PoisonShadowPartialRightRedzone(uptr addr, - uptr size, - uptr redzone_size, - u8 value) { - if (!CanPoisonMemory()) return; - CHECK(AddrIsAlignedByGranularity(addr)); - CHECK(AddrIsInMem(addr)); - FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); -} - -struct ShadowSegmentEndpoint { - u8 *chunk; - s8 offset; // in [0, SHADOW_GRANULARITY) - s8 value; // = *chunk; - - explicit ShadowSegmentEndpoint(uptr address) { - chunk = (u8*)MemToShadow(address); - offset = address & (SHADOW_GRANULARITY - 1); - value = *chunk; - } -}; - -void FlushUnneededASanShadowMemory(uptr p, uptr size) { - // Since asan's mapping is compacting, the shadow chunk may be - // not page-aligned, so we only flush the page-aligned portion. - ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size)); -} - -void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { - uptr end = ptr + size; - if (Verbosity()) { - Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", - poison ? "" : "un", ptr, end, size); - if (Verbosity() >= 2) - PRINT_CURRENT_STACK(); - } - CHECK(size); - CHECK_LE(size, 4096); - CHECK(IsAligned(end, SHADOW_GRANULARITY)); - if (!IsAligned(ptr, SHADOW_GRANULARITY)) { - *(u8 *)MemToShadow(ptr) = - poison ? static_cast(ptr % SHADOW_GRANULARITY) : 0; - ptr |= SHADOW_GRANULARITY - 1; - ptr++; - } - for (; ptr < end; ptr += SHADOW_GRANULARITY) - *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// Current implementation of __asan_(un)poison_memory_region doesn't check -// that user program (un)poisons the memory it owns. It poisons memory -// conservatively, and unpoisons progressively to make sure asan shadow -// mapping invariant is preserved (see detailed mapping description here: -// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm). -// -// * if user asks to poison region [left, right), the program poisons -// at least [left, AlignDown(right)). -// * if user asks to unpoison region [left, right), the program unpoisons -// at most [AlignDown(left), right). -void __asan_poison_memory_region(void const volatile *addr, uptr size) { - if (!flags()->allow_user_poisoning || size == 0) return; - uptr beg_addr = (uptr)addr; - uptr end_addr = beg_addr + size; - VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, - (void *)end_addr); - ShadowSegmentEndpoint beg(beg_addr); - ShadowSegmentEndpoint end(end_addr); - if (beg.chunk == end.chunk) { - CHECK_LT(beg.offset, end.offset); - s8 value = beg.value; - CHECK_EQ(value, end.value); - // We can only poison memory if the byte in end.offset is unaddressable. - // No need to re-poison memory if it is poisoned already. - if (value > 0 && value <= end.offset) { - if (beg.offset > 0) { - *beg.chunk = Min(value, beg.offset); - } else { - *beg.chunk = kAsanUserPoisonedMemoryMagic; - } - } - return; - } - CHECK_LT(beg.chunk, end.chunk); - if (beg.offset > 0) { - // Mark bytes from beg.offset as unaddressable. - if (beg.value == 0) { - *beg.chunk = beg.offset; - } else { - *beg.chunk = Min(beg.value, beg.offset); - } - beg.chunk++; - } - REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); - // Poison if byte in end.offset is unaddressable. - if (end.value > 0 && end.value <= end.offset) { - *end.chunk = kAsanUserPoisonedMemoryMagic; - } -} - -void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { - if (!flags()->allow_user_poisoning || size == 0) return; - uptr beg_addr = (uptr)addr; - uptr end_addr = beg_addr + size; - VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, - (void *)end_addr); - ShadowSegmentEndpoint beg(beg_addr); - ShadowSegmentEndpoint end(end_addr); - if (beg.chunk == end.chunk) { - CHECK_LT(beg.offset, end.offset); - s8 value = beg.value; - CHECK_EQ(value, end.value); - // We unpoison memory bytes up to enbytes up to end.offset if it is not - // unpoisoned already. - if (value != 0) { - *beg.chunk = Max(value, end.offset); - } - return; - } - CHECK_LT(beg.chunk, end.chunk); - if (beg.offset > 0) { - *beg.chunk = 0; - beg.chunk++; - } - REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); - if (end.offset > 0 && end.value != 0) { - *end.chunk = Max(end.value, end.offset); - } -} - -int __asan_address_is_poisoned(void const volatile *addr) { - return __asan::AddressIsPoisoned((uptr)addr); -} - -uptr __asan_region_is_poisoned(uptr beg, uptr size) { - if (!size) return 0; - uptr end = beg + size; - if (SANITIZER_MYRIAD2) { - // On Myriad, address not in DRAM range need to be treated as - // unpoisoned. - if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0; - if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0; - } else { - if (!AddrIsInMem(beg)) return beg; - if (!AddrIsInMem(end)) return end; - } - CHECK_LT(beg, end); - uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); - uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); - uptr shadow_beg = MemToShadow(aligned_b); - uptr shadow_end = MemToShadow(aligned_e); - // First check the first and the last application bytes, - // then check the SHADOW_GRANULARITY-aligned region by calling - // mem_is_zero on the corresponding shadow. - if (!__asan::AddressIsPoisoned(beg) && - !__asan::AddressIsPoisoned(end - 1) && - (shadow_end <= shadow_beg || - __sanitizer::mem_is_zero((const char *)shadow_beg, - shadow_end - shadow_beg))) - return 0; - // The fast check failed, so we have a poisoned byte somewhere. - // Find it slowly. - for (; beg < end; beg++) - if (__asan::AddressIsPoisoned(beg)) - return beg; - UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); - return 0; -} - -#define CHECK_SMALL_REGION(p, size, isWrite) \ - do { \ - uptr __p = reinterpret_cast(p); \ - uptr __size = size; \ - if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ - __asan::AddressIsPoisoned(__p + __size - 1))) { \ - GET_CURRENT_PC_BP_SP; \ - uptr __bad = __asan_region_is_poisoned(__p, __size); \ - __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ - } \ - } while (false) - - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -u16 __sanitizer_unaligned_load16(const uu16 *p) { - CHECK_SMALL_REGION(p, sizeof(*p), false); - return *p; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -u32 __sanitizer_unaligned_load32(const uu32 *p) { - CHECK_SMALL_REGION(p, sizeof(*p), false); - return *p; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -u64 __sanitizer_unaligned_load64(const uu64 *p) { - CHECK_SMALL_REGION(p, sizeof(*p), false); - return *p; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store16(uu16 *p, u16 x) { - CHECK_SMALL_REGION(p, sizeof(*p), true); - *p = x; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store32(uu32 *p, u32 x) { - CHECK_SMALL_REGION(p, sizeof(*p), true); - *p = x; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store64(uu64 *p, u64 x) { - CHECK_SMALL_REGION(p, sizeof(*p), true); - *p = x; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __asan_poison_cxx_array_cookie(uptr p) { - if (SANITIZER_WORDSIZE != 64) return; - if (!flags()->poison_array_cookie) return; - uptr s = MEM_TO_SHADOW(p); - *reinterpret_cast(s) = kAsanArrayCookieMagic; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -uptr __asan_load_cxx_array_cookie(uptr *p) { - if (SANITIZER_WORDSIZE != 64) return *p; - if (!flags()->poison_array_cookie) return *p; - uptr s = MEM_TO_SHADOW(reinterpret_cast(p)); - u8 sval = *reinterpret_cast(s); - if (sval == kAsanArrayCookieMagic) return *p; - // If sval is not kAsanArrayCookieMagic it can only be freed memory, - // which means that we are going to get double-free. So, return 0 to avoid - // infinite loop of destructors. We don't want to report a double-free here - // though, so print a warning just in case. - // CHECK_EQ(sval, kAsanHeapFreeMagic); - if (sval == kAsanHeapFreeMagic) { - Report("AddressSanitizer: loaded array cookie from free-d memory; " - "expect a double-free report\n"); - return 0; - } - // The cookie may remain unpoisoned if e.g. it comes from a custom - // operator new defined inside a class. - return *p; -} - -// This is a simplified version of __asan_(un)poison_memory_region, which -// assumes that left border of region to be poisoned is properly aligned. -static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { - if (size == 0) return; - uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); - PoisonShadow(addr, aligned_size, - do_poison ? kAsanStackUseAfterScopeMagic : 0); - if (size == aligned_size) - return; - s8 end_offset = (s8)(size - aligned_size); - s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); - s8 end_value = *shadow_end; - if (do_poison) { - // If possible, mark all the bytes mapping to last shadow byte as - // unaddressable. - if (end_value > 0 && end_value <= end_offset) - *shadow_end = (s8)kAsanStackUseAfterScopeMagic; - } else { - // If necessary, mark few first bytes mapping to last shadow byte - // as addressable - if (end_value != 0) - *shadow_end = Max(end_value, end_offset); - } -} - -void __asan_set_shadow_00(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0, size); -} - -void __asan_set_shadow_f1(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0xf1, size); -} - -void __asan_set_shadow_f2(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0xf2, size); -} - -void __asan_set_shadow_f3(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0xf3, size); -} - -void __asan_set_shadow_f5(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0xf5, size); -} - -void __asan_set_shadow_f8(uptr addr, uptr size) { - REAL(memset)((void *)addr, 0xf8, size); -} - -void __asan_poison_stack_memory(uptr addr, uptr size) { - VReport(1, "poisoning: %p %zx\n", (void *)addr, size); - PoisonAlignedStackMemory(addr, size, true); -} - -void __asan_unpoison_stack_memory(uptr addr, uptr size) { - VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); - PoisonAlignedStackMemory(addr, size, false); -} - -void __sanitizer_annotate_contiguous_container(const void *beg_p, - const void *end_p, - const void *old_mid_p, - const void *new_mid_p) { - if (!flags()->detect_container_overflow) return; - VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, - new_mid_p); - uptr beg = reinterpret_cast(beg_p); - uptr end = reinterpret_cast(end_p); - uptr old_mid = reinterpret_cast(old_mid_p); - uptr new_mid = reinterpret_cast(new_mid_p); - uptr granularity = SHADOW_GRANULARITY; - if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && - IsAligned(beg, granularity))) { - GET_STACK_TRACE_FATAL_HERE; - ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, - &stack); - } - CHECK_LE(end - beg, - FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check. - - uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); - uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); - uptr d1 = RoundDownTo(old_mid, granularity); - // uptr d2 = RoundUpTo(old_mid, granularity); - // Currently we should be in this state: - // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. - // Make a quick sanity check that we are indeed in this state. - // - // FIXME: Two of these three checks are disabled until we fix - // https://github.com/google/sanitizers/issues/258. - // if (d1 != d2) - // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); - if (a + granularity <= d1) - CHECK_EQ(*(u8*)MemToShadow(a), 0); - // if (d2 + granularity <= c && c <= end) - // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), - // kAsanContiguousContainerOOBMagic); - - uptr b1 = RoundDownTo(new_mid, granularity); - uptr b2 = RoundUpTo(new_mid, granularity); - // New state: - // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. - PoisonShadow(a, b1 - a, 0); - PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); - if (b1 != b2) { - CHECK_EQ(b2 - b1, granularity); - *(u8*)MemToShadow(b1) = static_cast(new_mid - b1); - } -} - -const void *__sanitizer_contiguous_container_find_bad_address( - const void *beg_p, const void *mid_p, const void *end_p) { - if (!flags()->detect_container_overflow) - return nullptr; - uptr beg = reinterpret_cast(beg_p); - uptr end = reinterpret_cast(end_p); - uptr mid = reinterpret_cast(mid_p); - CHECK_LE(beg, mid); - CHECK_LE(mid, end); - // Check some bytes starting from beg, some bytes around mid, and some bytes - // ending with end. - uptr kMaxRangeToCheck = 32; - uptr r1_beg = beg; - uptr r1_end = Min(beg + kMaxRangeToCheck, mid); - uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); - uptr r2_end = Min(end, mid + kMaxRangeToCheck); - uptr r3_beg = Max(end - kMaxRangeToCheck, mid); - uptr r3_end = end; - for (uptr i = r1_beg; i < r1_end; i++) - if (AddressIsPoisoned(i)) - return reinterpret_cast(i); - for (uptr i = r2_beg; i < mid; i++) - if (AddressIsPoisoned(i)) - return reinterpret_cast(i); - for (uptr i = mid; i < r2_end; i++) - if (!AddressIsPoisoned(i)) - return reinterpret_cast(i); - for (uptr i = r3_beg; i < r3_end; i++) - if (!AddressIsPoisoned(i)) - return reinterpret_cast(i); - return nullptr; -} - -int __sanitizer_verify_contiguous_container(const void *beg_p, - const void *mid_p, - const void *end_p) { - return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p, - end_p) == nullptr; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { - AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { - AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); -} - -// --- Implementation of LSan-specific functions --- {{{1 -namespace __lsan { -bool WordIsPoisoned(uptr addr) { - return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); -} -} diff --git a/lib/asan/asan_poisoning.cpp b/lib/asan/asan_poisoning.cpp new file mode 100644 index 000000000000..f3fbe684e2cb --- /dev/null +++ b/lib/asan/asan_poisoning.cpp @@ -0,0 +1,460 @@ +//===-- asan_poisoning.cpp ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_flags.h" + +namespace __asan { + +static atomic_uint8_t can_poison_memory; + +void SetCanPoisonMemory(bool value) { + atomic_store(&can_poison_memory, value, memory_order_release); +} + +bool CanPoisonMemory() { + return atomic_load(&can_poison_memory, memory_order_acquire); +} + +void PoisonShadow(uptr addr, uptr size, u8 value) { + if (value && !CanPoisonMemory()) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + CHECK(AddrIsAlignedByGranularity(addr + size)); + CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); + CHECK(REAL(memset)); + FastPoisonShadow(addr, size, value); +} + +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value) { + if (!CanPoisonMemory()) return; + CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); + FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); +} + +struct ShadowSegmentEndpoint { + u8 *chunk; + s8 offset; // in [0, SHADOW_GRANULARITY) + s8 value; // = *chunk; + + explicit ShadowSegmentEndpoint(uptr address) { + chunk = (u8*)MemToShadow(address); + offset = address & (SHADOW_GRANULARITY - 1); + value = *chunk; + } +}; + +void FlushUnneededASanShadowMemory(uptr p, uptr size) { + // Since asan's mapping is compacting, the shadow chunk may be + // not page-aligned, so we only flush the page-aligned portion. + ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size)); +} + +void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { + uptr end = ptr + size; + if (Verbosity()) { + Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", + poison ? "" : "un", ptr, end, size); + if (Verbosity() >= 2) + PRINT_CURRENT_STACK(); + } + CHECK(size); + CHECK_LE(size, 4096); + CHECK(IsAligned(end, SHADOW_GRANULARITY)); + if (!IsAligned(ptr, SHADOW_GRANULARITY)) { + *(u8 *)MemToShadow(ptr) = + poison ? static_cast(ptr % SHADOW_GRANULARITY) : 0; + ptr |= SHADOW_GRANULARITY - 1; + ptr++; + } + for (; ptr < end; ptr += SHADOW_GRANULARITY) + *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0; +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +// Current implementation of __asan_(un)poison_memory_region doesn't check +// that user program (un)poisons the memory it owns. It poisons memory +// conservatively, and unpoisons progressively to make sure asan shadow +// mapping invariant is preserved (see detailed mapping description here: +// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm). +// +// * if user asks to poison region [left, right), the program poisons +// at least [left, AlignDown(right)). +// * if user asks to unpoison region [left, right), the program unpoisons +// at most [AlignDown(left), right). +void __asan_poison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK_LT(beg.offset, end.offset); + s8 value = beg.value; + CHECK_EQ(value, end.value); + // We can only poison memory if the byte in end.offset is unaddressable. + // No need to re-poison memory if it is poisoned already. + if (value > 0 && value <= end.offset) { + if (beg.offset > 0) { + *beg.chunk = Min(value, beg.offset); + } else { + *beg.chunk = kAsanUserPoisonedMemoryMagic; + } + } + return; + } + CHECK_LT(beg.chunk, end.chunk); + if (beg.offset > 0) { + // Mark bytes from beg.offset as unaddressable. + if (beg.value == 0) { + *beg.chunk = beg.offset; + } else { + *beg.chunk = Min(beg.value, beg.offset); + } + beg.chunk++; + } + REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk); + // Poison if byte in end.offset is unaddressable. + if (end.value > 0 && end.value <= end.offset) { + *end.chunk = kAsanUserPoisonedMemoryMagic; + } +} + +void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { + if (!flags()->allow_user_poisoning || size == 0) return; + uptr beg_addr = (uptr)addr; + uptr end_addr = beg_addr + size; + VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, + (void *)end_addr); + ShadowSegmentEndpoint beg(beg_addr); + ShadowSegmentEndpoint end(end_addr); + if (beg.chunk == end.chunk) { + CHECK_LT(beg.offset, end.offset); + s8 value = beg.value; + CHECK_EQ(value, end.value); + // We unpoison memory bytes up to enbytes up to end.offset if it is not + // unpoisoned already. + if (value != 0) { + *beg.chunk = Max(value, end.offset); + } + return; + } + CHECK_LT(beg.chunk, end.chunk); + if (beg.offset > 0) { + *beg.chunk = 0; + beg.chunk++; + } + REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk); + if (end.offset > 0 && end.value != 0) { + *end.chunk = Max(end.value, end.offset); + } +} + +int __asan_address_is_poisoned(void const volatile *addr) { + return __asan::AddressIsPoisoned((uptr)addr); +} + +uptr __asan_region_is_poisoned(uptr beg, uptr size) { + if (!size) return 0; + uptr end = beg + size; + if (SANITIZER_MYRIAD2) { + // On Myriad, address not in DRAM range need to be treated as + // unpoisoned. + if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0; + if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0; + } else { + if (!AddrIsInMem(beg)) return beg; + if (!AddrIsInMem(end)) return end; + } + CHECK_LT(beg, end); + uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); + uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); + uptr shadow_beg = MemToShadow(aligned_b); + uptr shadow_end = MemToShadow(aligned_e); + // First check the first and the last application bytes, + // then check the SHADOW_GRANULARITY-aligned region by calling + // mem_is_zero on the corresponding shadow. + if (!__asan::AddressIsPoisoned(beg) && + !__asan::AddressIsPoisoned(end - 1) && + (shadow_end <= shadow_beg || + __sanitizer::mem_is_zero((const char *)shadow_beg, + shadow_end - shadow_beg))) + return 0; + // The fast check failed, so we have a poisoned byte somewhere. + // Find it slowly. + for (; beg < end; beg++) + if (__asan::AddressIsPoisoned(beg)) + return beg; + UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found"); + return 0; +} + +#define CHECK_SMALL_REGION(p, size, isWrite) \ + do { \ + uptr __p = reinterpret_cast(p); \ + uptr __size = size; \ + if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ + __asan::AddressIsPoisoned(__p + __size - 1))) { \ + GET_CURRENT_PC_BP_SP; \ + uptr __bad = __asan_region_is_poisoned(__p, __size); \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ + } \ + } while (false) + + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u16 __sanitizer_unaligned_load16(const uu16 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u32 __sanitizer_unaligned_load32(const uu32 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u64 __sanitizer_unaligned_load64(const uu64 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store16(uu16 *p, u16 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store32(uu32 *p, u32 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store64(uu64 *p, u64 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_cxx_array_cookie(uptr p) { + if (SANITIZER_WORDSIZE != 64) return; + if (!flags()->poison_array_cookie) return; + uptr s = MEM_TO_SHADOW(p); + *reinterpret_cast(s) = kAsanArrayCookieMagic; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_load_cxx_array_cookie(uptr *p) { + if (SANITIZER_WORDSIZE != 64) return *p; + if (!flags()->poison_array_cookie) return *p; + uptr s = MEM_TO_SHADOW(reinterpret_cast(p)); + u8 sval = *reinterpret_cast(s); + if (sval == kAsanArrayCookieMagic) return *p; + // If sval is not kAsanArrayCookieMagic it can only be freed memory, + // which means that we are going to get double-free. So, return 0 to avoid + // infinite loop of destructors. We don't want to report a double-free here + // though, so print a warning just in case. + // CHECK_EQ(sval, kAsanHeapFreeMagic); + if (sval == kAsanHeapFreeMagic) { + Report("AddressSanitizer: loaded array cookie from free-d memory; " + "expect a double-free report\n"); + return 0; + } + // The cookie may remain unpoisoned if e.g. it comes from a custom + // operator new defined inside a class. + return *p; +} + +// This is a simplified version of __asan_(un)poison_memory_region, which +// assumes that left border of region to be poisoned is properly aligned. +static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { + if (size == 0) return; + uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); + PoisonShadow(addr, aligned_size, + do_poison ? kAsanStackUseAfterScopeMagic : 0); + if (size == aligned_size) + return; + s8 end_offset = (s8)(size - aligned_size); + s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); + s8 end_value = *shadow_end; + if (do_poison) { + // If possible, mark all the bytes mapping to last shadow byte as + // unaddressable. + if (end_value > 0 && end_value <= end_offset) + *shadow_end = (s8)kAsanStackUseAfterScopeMagic; + } else { + // If necessary, mark few first bytes mapping to last shadow byte + // as addressable + if (end_value != 0) + *shadow_end = Max(end_value, end_offset); + } +} + +void __asan_set_shadow_00(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0, size); +} + +void __asan_set_shadow_f1(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf1, size); +} + +void __asan_set_shadow_f2(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf2, size); +} + +void __asan_set_shadow_f3(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf3, size); +} + +void __asan_set_shadow_f5(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf5, size); +} + +void __asan_set_shadow_f8(uptr addr, uptr size) { + REAL(memset)((void *)addr, 0xf8, size); +} + +void __asan_poison_stack_memory(uptr addr, uptr size) { + VReport(1, "poisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, true); +} + +void __asan_unpoison_stack_memory(uptr addr, uptr size) { + VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size); + PoisonAlignedStackMemory(addr, size, false); +} + +void __sanitizer_annotate_contiguous_container(const void *beg_p, + const void *end_p, + const void *old_mid_p, + const void *new_mid_p) { + if (!flags()->detect_container_overflow) return; + VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p, + new_mid_p); + uptr beg = reinterpret_cast(beg_p); + uptr end = reinterpret_cast(end_p); + uptr old_mid = reinterpret_cast(old_mid_p); + uptr new_mid = reinterpret_cast(new_mid_p); + uptr granularity = SHADOW_GRANULARITY; + if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end && + IsAligned(beg, granularity))) { + GET_STACK_TRACE_FATAL_HERE; + ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid, + &stack); + } + CHECK_LE(end - beg, + FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check. + + uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); + uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); + uptr d1 = RoundDownTo(old_mid, granularity); + // uptr d2 = RoundUpTo(old_mid, granularity); + // Currently we should be in this state: + // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good. + // Make a quick sanity check that we are indeed in this state. + // + // FIXME: Two of these three checks are disabled until we fix + // https://github.com/google/sanitizers/issues/258. + // if (d1 != d2) + // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1); + if (a + granularity <= d1) + CHECK_EQ(*(u8*)MemToShadow(a), 0); + // if (d2 + granularity <= c && c <= end) + // CHECK_EQ(*(u8 *)MemToShadow(c - granularity), + // kAsanContiguousContainerOOBMagic); + + uptr b1 = RoundDownTo(new_mid, granularity); + uptr b2 = RoundUpTo(new_mid, granularity); + // New state: + // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good. + PoisonShadow(a, b1 - a, 0); + PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic); + if (b1 != b2) { + CHECK_EQ(b2 - b1, granularity); + *(u8*)MemToShadow(b1) = static_cast(new_mid - b1); + } +} + +const void *__sanitizer_contiguous_container_find_bad_address( + const void *beg_p, const void *mid_p, const void *end_p) { + if (!flags()->detect_container_overflow) + return nullptr; + uptr beg = reinterpret_cast(beg_p); + uptr end = reinterpret_cast(end_p); + uptr mid = reinterpret_cast(mid_p); + CHECK_LE(beg, mid); + CHECK_LE(mid, end); + // Check some bytes starting from beg, some bytes around mid, and some bytes + // ending with end. + uptr kMaxRangeToCheck = 32; + uptr r1_beg = beg; + uptr r1_end = Min(beg + kMaxRangeToCheck, mid); + uptr r2_beg = Max(beg, mid - kMaxRangeToCheck); + uptr r2_end = Min(end, mid + kMaxRangeToCheck); + uptr r3_beg = Max(end - kMaxRangeToCheck, mid); + uptr r3_end = end; + for (uptr i = r1_beg; i < r1_end; i++) + if (AddressIsPoisoned(i)) + return reinterpret_cast(i); + for (uptr i = r2_beg; i < mid; i++) + if (AddressIsPoisoned(i)) + return reinterpret_cast(i); + for (uptr i = mid; i < r2_end; i++) + if (!AddressIsPoisoned(i)) + return reinterpret_cast(i); + for (uptr i = r3_beg; i < r3_end; i++) + if (!AddressIsPoisoned(i)) + return reinterpret_cast(i); + return nullptr; +} + +int __sanitizer_verify_contiguous_container(const void *beg_p, + const void *mid_p, + const void *end_p) { + return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p, + end_p) == nullptr; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_poison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) { + AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false); +} + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool WordIsPoisoned(uptr addr) { + return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); +} +} diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc deleted file mode 100644 index 5c5e0359ad6c..000000000000 --- a/lib/asan/asan_posix.cc +++ /dev/null @@ -1,117 +0,0 @@ -//===-- asan_posix.cc -----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Posix-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_POSIX - -#include "asan_internal.h" -#include "asan_interceptors.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_posix.h" -#include "sanitizer_common/sanitizer_procmaps.h" - -#include -#include -#include -#include -#include - -namespace __asan { - -void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { - StartReportDeadlySignal(); - SignalContext sig(siginfo, context); - ReportDeadlySignal(sig); -} - -// ---------------------- TSD ---------------- {{{1 - -#if SANITIZER_NETBSD || SANITIZER_FREEBSD -// Thread Static Data cannot be used in early init on NetBSD and FreeBSD. -// Reuse the Asan TSD API for compatibility with existing code -// with an alternative implementation. - -static void (*tsd_destructor)(void *tsd) = nullptr; - -struct tsd_key { - tsd_key() : key(nullptr) {} - ~tsd_key() { - CHECK(tsd_destructor); - if (key) - (*tsd_destructor)(key); - } - void *key; -}; - -static thread_local struct tsd_key key; - -void AsanTSDInit(void (*destructor)(void *tsd)) { - CHECK(!tsd_destructor); - tsd_destructor = destructor; -} - -void *AsanTSDGet() { - CHECK(tsd_destructor); - return key.key; -} - -void AsanTSDSet(void *tsd) { - CHECK(tsd_destructor); - CHECK(tsd); - CHECK(!key.key); - key.key = tsd; -} - -void PlatformTSDDtor(void *tsd) { - CHECK(tsd_destructor); - CHECK_EQ(key.key, tsd); - key.key = nullptr; - // Make sure that signal handler can not see a stale current thread pointer. - atomic_signal_fence(memory_order_seq_cst); - AsanThread::TSDDtor(tsd); -} -#else -static pthread_key_t tsd_key; -static bool tsd_key_inited = false; -void AsanTSDInit(void (*destructor)(void *tsd)) { - CHECK(!tsd_key_inited); - tsd_key_inited = true; - CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); -} - -void *AsanTSDGet() { - CHECK(tsd_key_inited); - return pthread_getspecific(tsd_key); -} - -void AsanTSDSet(void *tsd) { - CHECK(tsd_key_inited); - pthread_setspecific(tsd_key, tsd); -} - -void PlatformTSDDtor(void *tsd) { - AsanThreadContext *context = (AsanThreadContext*)tsd; - if (context->destructor_iterations > 1) { - context->destructor_iterations--; - CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); - return; - } - AsanThread::TSDDtor(tsd); -} -#endif -} // namespace __asan - -#endif // SANITIZER_POSIX diff --git a/lib/asan/asan_posix.cpp b/lib/asan/asan_posix.cpp new file mode 100644 index 000000000000..920d216624a3 --- /dev/null +++ b/lib/asan/asan_posix.cpp @@ -0,0 +1,117 @@ +//===-- asan_posix.cpp ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Posix-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_POSIX + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#include +#include +#include +#include +#include + +namespace __asan { + +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + StartReportDeadlySignal(); + SignalContext sig(siginfo, context); + ReportDeadlySignal(sig); +} + +// ---------------------- TSD ---------------- {{{1 + +#if SANITIZER_NETBSD && !ASAN_DYNAMIC +// Thread Static Data cannot be used in early static ASan init on NetBSD. +// Reuse the Asan TSD API for compatibility with existing code +// with an alternative implementation. + +static void (*tsd_destructor)(void *tsd) = nullptr; + +struct tsd_key { + tsd_key() : key(nullptr) {} + ~tsd_key() { + CHECK(tsd_destructor); + if (key) + (*tsd_destructor)(key); + } + void *key; +}; + +static thread_local struct tsd_key key; + +void AsanTSDInit(void (*destructor)(void *tsd)) { + CHECK(!tsd_destructor); + tsd_destructor = destructor; +} + +void *AsanTSDGet() { + CHECK(tsd_destructor); + return key.key; +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_destructor); + CHECK(tsd); + CHECK(!key.key); + key.key = tsd; +} + +void PlatformTSDDtor(void *tsd) { + CHECK(tsd_destructor); + CHECK_EQ(key.key, tsd); + key.key = nullptr; + // Make sure that signal handler can not see a stale current thread pointer. + atomic_signal_fence(memory_order_seq_cst); + AsanThread::TSDDtor(tsd); +} +#else +static pthread_key_t tsd_key; +static bool tsd_key_inited = false; +void AsanTSDInit(void (*destructor)(void *tsd)) { + CHECK(!tsd_key_inited); + tsd_key_inited = true; + CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return pthread_getspecific(tsd_key); +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + pthread_setspecific(tsd_key, tsd); +} + +void PlatformTSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + if (context->destructor_iterations > 1) { + context->destructor_iterations--; + CHECK_EQ(0, pthread_setspecific(tsd_key, tsd)); + return; + } + AsanThread::TSDDtor(tsd); +} +#endif +} // namespace __asan + +#endif // SANITIZER_POSIX diff --git a/lib/asan/asan_preinit.cc b/lib/asan/asan_preinit.cc deleted file mode 100644 index 444998c44176..000000000000 --- a/lib/asan/asan_preinit.cc +++ /dev/null @@ -1,24 +0,0 @@ -//===-- asan_preinit.cc ---------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Call __asan_init at the very early stage of process startup. -//===----------------------------------------------------------------------===// -#include "asan_internal.h" - -using namespace __asan; - -#if SANITIZER_CAN_USE_PREINIT_ARRAY - // The symbol is called __local_asan_preinit, because it's not intended to be - // exported. - // This code linked into the main executable when -fsanitize=address is in - // the link flags. It can only use exported interface functions. - __attribute__((section(".preinit_array"), used)) - void (*__local_asan_preinit)(void) = __asan_init; -#endif diff --git a/lib/asan/asan_preinit.cpp b/lib/asan/asan_preinit.cpp new file mode 100644 index 000000000000..b07556ec96f8 --- /dev/null +++ b/lib/asan/asan_preinit.cpp @@ -0,0 +1,24 @@ +//===-- asan_preinit.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Call __asan_init at the very early stage of process startup. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" + +using namespace __asan; + +#if SANITIZER_CAN_USE_PREINIT_ARRAY + // The symbol is called __local_asan_preinit, because it's not intended to be + // exported. + // This code linked into the main executable when -fsanitize=address is in + // the link flags. It can only use exported interface functions. + __attribute__((section(".preinit_array"), used)) + void (*__local_asan_preinit)(void) = __asan_init; +#endif diff --git a/lib/asan/asan_premap_shadow.cc b/lib/asan/asan_premap_shadow.cc deleted file mode 100644 index 6e547718c68e..000000000000 --- a/lib/asan/asan_premap_shadow.cc +++ /dev/null @@ -1,78 +0,0 @@ -//===-- asan_premap_shadow.cc ---------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Reserve shadow memory with an ifunc resolver. -//===----------------------------------------------------------------------===// - -#include "asan_mapping.h" - -#if ASAN_PREMAP_SHADOW - -#include "asan_premap_shadow.h" -#include "sanitizer_common/sanitizer_posix.h" - -namespace __asan { - -// The code in this file needs to run in an unrelocated binary. It may not -// access any external symbol, including its own non-hidden globals. - -// Conservative upper limit. -uptr PremapShadowSize() { - uptr granularity = GetMmapGranularity(); - return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity); -} - -// Returns an address aligned to 8 pages, such that one page on the left and -// PremapShadowSize() bytes on the right of it are mapped r/o. -uptr PremapShadow() { - uptr granularity = GetMmapGranularity(); - uptr alignment = granularity * 8; - uptr left_padding = granularity; - uptr shadow_size = PremapShadowSize(); - uptr map_size = shadow_size + left_padding + alignment; - - uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - uptr shadow_end = shadow_start + shadow_size; - internal_munmap(reinterpret_cast(map_start), - shadow_start - left_padding - map_start); - internal_munmap(reinterpret_cast(shadow_end), - map_start + map_size - shadow_end); - return shadow_start; -} - -bool PremapShadowFailed() { - uptr shadow = reinterpret_cast(&__asan_shadow); - uptr resolver = reinterpret_cast(&__asan_premap_shadow); - // shadow == resolver is how Android KitKat and older handles ifunc. - // shadow == 0 just in case. - if (shadow == 0 || shadow == resolver) - return true; - return false; -} -} // namespace __asan - -extern "C" { -decltype(__asan_shadow)* __asan_premap_shadow() { - // The resolver may be called multiple times. Map the shadow just once. - static uptr premapped_shadow = 0; - if (!premapped_shadow) premapped_shadow = __asan::PremapShadow(); - return reinterpret_cast(premapped_shadow); -} - -// __asan_shadow is a "function" that has the same address as the first byte of -// the shadow mapping. -INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void -__asan_shadow(); -} - -#endif // ASAN_PREMAP_SHADOW diff --git a/lib/asan/asan_premap_shadow.cpp b/lib/asan/asan_premap_shadow.cpp new file mode 100644 index 000000000000..7835e99748ff --- /dev/null +++ b/lib/asan/asan_premap_shadow.cpp @@ -0,0 +1,78 @@ +//===-- asan_premap_shadow.cpp --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Reserve shadow memory with an ifunc resolver. +//===----------------------------------------------------------------------===// + +#include "asan_mapping.h" + +#if ASAN_PREMAP_SHADOW + +#include "asan_premap_shadow.h" +#include "sanitizer_common/sanitizer_posix.h" + +namespace __asan { + +// The code in this file needs to run in an unrelocated binary. It may not +// access any external symbol, including its own non-hidden globals. + +// Conservative upper limit. +uptr PremapShadowSize() { + uptr granularity = GetMmapGranularity(); + return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity); +} + +// Returns an address aligned to 8 pages, such that one page on the left and +// PremapShadowSize() bytes on the right of it are mapped r/o. +uptr PremapShadow() { + uptr granularity = GetMmapGranularity(); + uptr alignment = granularity * 8; + uptr left_padding = granularity; + uptr shadow_size = PremapShadowSize(); + uptr map_size = shadow_size + left_padding + alignment; + + uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + uptr shadow_end = shadow_start + shadow_size; + internal_munmap(reinterpret_cast(map_start), + shadow_start - left_padding - map_start); + internal_munmap(reinterpret_cast(shadow_end), + map_start + map_size - shadow_end); + return shadow_start; +} + +bool PremapShadowFailed() { + uptr shadow = reinterpret_cast(&__asan_shadow); + uptr resolver = reinterpret_cast(&__asan_premap_shadow); + // shadow == resolver is how Android KitKat and older handles ifunc. + // shadow == 0 just in case. + if (shadow == 0 || shadow == resolver) + return true; + return false; +} +} // namespace __asan + +extern "C" { +decltype(__asan_shadow)* __asan_premap_shadow() { + // The resolver may be called multiple times. Map the shadow just once. + static uptr premapped_shadow = 0; + if (!premapped_shadow) premapped_shadow = __asan::PremapShadow(); + return reinterpret_cast(premapped_shadow); +} + +// __asan_shadow is a "function" that has the same address as the first byte of +// the shadow mapping. +INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void +__asan_shadow(); +} + +#endif // ASAN_PREMAP_SHADOW diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc deleted file mode 100644 index 49067437d9d4..000000000000 --- a/lib/asan/asan_report.cc +++ /dev/null @@ -1,558 +0,0 @@ -//===-- asan_report.cc ----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// This file contains error reporting code. -//===----------------------------------------------------------------------===// - -#include "asan_errors.h" -#include "asan_flags.h" -#include "asan_descriptions.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_scariness_score.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_report_decorator.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_symbolizer.h" - -namespace __asan { - -// -------------------- User-specified callbacks ----------------- {{{1 -static void (*error_report_callback)(const char*); -static char *error_message_buffer = nullptr; -static uptr error_message_buffer_pos = 0; -static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); -static const unsigned kAsanBuggyPcPoolSize = 25; -static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; - -void AppendToErrorMessageBuffer(const char *buffer) { - BlockingMutexLock l(&error_message_buf_mutex); - if (!error_message_buffer) { - error_message_buffer = - (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); - error_message_buffer_pos = 0; - } - uptr length = internal_strlen(buffer); - RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); - uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; - internal_strncpy(error_message_buffer + error_message_buffer_pos, - buffer, remaining); - error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; - // FIXME: reallocate the buffer instead of truncating the message. - error_message_buffer_pos += Min(remaining, length); -} - -// ---------------------- Helper functions ----------------------- {{{1 - -void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, - bool in_shadow, const char *after) { - Decorator d; - str->append("%s%s%x%x%s%s", before, - in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, - byte & 15, d.Default(), after); -} - -static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, - const char *zone_name) { - if (zone_ptr) { - if (zone_name) { - Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", - ptr, zone_ptr, zone_name); - } else { - Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", - ptr, zone_ptr); - } - } else { - Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); - } -} - -// ---------------------- Address Descriptions ------------------- {{{1 - -bool ParseFrameDescription(const char *frame_descr, - InternalMmapVector *vars) { - CHECK(frame_descr); - const char *p; - // This string is created by the compiler and has the following form: - // "n alloc_1 alloc_2 ... alloc_n" - // where alloc_i looks like "offset size len ObjectName" - // or "offset size len ObjectName:line". - uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); - if (n_objects == 0) - return false; - - for (uptr i = 0; i < n_objects; i++) { - uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); - uptr size = (uptr)internal_simple_strtoll(p, &p, 10); - uptr len = (uptr)internal_simple_strtoll(p, &p, 10); - if (beg == 0 || size == 0 || *p != ' ') { - return false; - } - p++; - char *colon_pos = internal_strchr(p, ':'); - uptr line = 0; - uptr name_len = len; - if (colon_pos != nullptr && colon_pos < p + len) { - name_len = colon_pos - p; - line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); - } - StackVarDescr var = {beg, size, p, name_len, line}; - vars->push_back(var); - p += len; - } - - return true; -} - -// -------------------- Different kinds of reports ----------------- {{{1 - -// Use ScopedInErrorReport to run common actions just before and -// immediately after printing error report. -class ScopedInErrorReport { - public: - explicit ScopedInErrorReport(bool fatal = false) - : halt_on_error_(fatal || flags()->halt_on_error) { - // Make sure the registry and sanitizer report mutexes are locked while - // we're printing an error report. - // We can lock them only here to avoid self-deadlock in case of - // recursive reports. - asanThreadRegistry().Lock(); - Printf( - "=================================================================\n"); - } - - ~ScopedInErrorReport() { - if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { - asanThreadRegistry().Unlock(); - return; - } - ASAN_ON_ERROR(); - if (current_error_.IsValid()) current_error_.Print(); - - // Make sure the current thread is announced. - DescribeThread(GetCurrentThread()); - // We may want to grab this lock again when printing stats. - asanThreadRegistry().Unlock(); - // Print memory stats. - if (flags()->print_stats) - __asan_print_accumulated_stats(); - - if (common_flags()->print_cmdline) - PrintCmdline(); - - if (common_flags()->print_module_map == 2) PrintModuleMap(); - - // Copy the message buffer so that we could start logging without holding a - // lock that gets aquired during printing. - InternalMmapVector buffer_copy(kErrorMessageBufferSize); - { - BlockingMutexLock l(&error_message_buf_mutex); - internal_memcpy(buffer_copy.data(), - error_message_buffer, kErrorMessageBufferSize); - } - - LogFullErrorReport(buffer_copy.data()); - - if (error_report_callback) { - error_report_callback(buffer_copy.data()); - } - - if (halt_on_error_ && common_flags()->abort_on_error) { - // On Android the message is truncated to 512 characters. - // FIXME: implement "compact" error format, possibly without, or with - // highly compressed stack traces? - // FIXME: or just use the summary line as abort message? - SetAbortMessage(buffer_copy.data()); - } - - // In halt_on_error = false mode, reset the current error object (before - // unlocking). - if (!halt_on_error_) - internal_memset(¤t_error_, 0, sizeof(current_error_)); - - if (halt_on_error_) { - Report("ABORTING\n"); - Die(); - } - } - - void ReportError(const ErrorDescription &description) { - // Can only report one error per ScopedInErrorReport. - CHECK_EQ(current_error_.kind, kErrorKindInvalid); - internal_memcpy(¤t_error_, &description, sizeof(current_error_)); - } - - static ErrorDescription &CurrentError() { - return current_error_; - } - - private: - ScopedErrorReportLock error_report_lock_; - // Error currently being reported. This enables the destructor to interact - // with the debugger and point it to an error description. - static ErrorDescription current_error_; - bool halt_on_error_; -}; - -ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); - -void ReportDeadlySignal(const SignalContext &sig) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); - in_report.ReportError(error); -} - -void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { - ScopedInErrorReport in_report; - ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); - in_report.ReportError(error); -} - -void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, - uptr delete_alignment, - BufferedStackTrace *free_stack) { - ScopedInErrorReport in_report; - ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, - delete_size, delete_alignment); - in_report.ReportError(error); -} - -void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { - ScopedInErrorReport in_report; - ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); - in_report.ReportError(error); -} - -void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, - AllocType alloc_type, - AllocType dealloc_type) { - ScopedInErrorReport in_report; - ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, - alloc_type, dealloc_type); - in_report.ReportError(error); -} - -void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); - in_report.ReportError(error); -} - -void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, - addr); - in_report.ReportError(error); -} - -void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); - in_report.ReportError(error); -} - -void ReportReallocArrayOverflow(uptr count, uptr size, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size); - in_report.ReportError(error); -} - -void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); - in_report.ReportError(error); -} - -void ReportInvalidAllocationAlignment(uptr alignment, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, - alignment); - in_report.ReportError(error); -} - -void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, - size, alignment); - in_report.ReportError(error); -} - -void ReportInvalidPosixMemalignAlignment(uptr alignment, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, - alignment); - in_report.ReportError(error); -} - -void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, - total_size, max_size); - in_report.ReportError(error); -} - -void ReportRssLimitExceeded(BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); - in_report.ReportError(error); -} - -void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { - ScopedInErrorReport in_report(/*fatal*/ true); - ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); - in_report.ReportError(error); -} - -void ReportStringFunctionMemoryRangesOverlap(const char *function, - const char *offset1, uptr length1, - const char *offset2, uptr length2, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - ErrorStringFunctionMemoryRangesOverlap error( - GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, - length2, function); - in_report.ReportError(error); -} - -void ReportStringFunctionSizeOverflow(uptr offset, uptr size, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, - size); - in_report.ReportError(error); -} - -void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, - uptr old_mid, uptr new_mid, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - ErrorBadParamsToAnnotateContiguousContainer error( - GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); - in_report.ReportError(error); -} - -void ReportODRViolation(const __asan_global *g1, u32 stack_id1, - const __asan_global *g2, u32 stack_id2) { - ScopedInErrorReport in_report; - ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, - stack_id2); - in_report.ReportError(error); -} - -// ----------------------- CheckForInvalidPointerPair ----------- {{{1 -static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, - uptr a1, uptr a2) { - ScopedInErrorReport in_report; - ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); - in_report.ReportError(error); -} - -static bool IsInvalidPointerPair(uptr a1, uptr a2) { - if (a1 == a2) - return false; - - // 256B in shadow memory can be iterated quite fast - static const uptr kMaxOffset = 2048; - - uptr left = a1 < a2 ? a1 : a2; - uptr right = a1 < a2 ? a2 : a1; - uptr offset = right - left; - if (offset <= kMaxOffset) - return __asan_region_is_poisoned(left, offset); - - AsanThread *t = GetCurrentThread(); - - // check whether left is a stack memory pointer - if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { - uptr shadow_offset2 = t->GetStackVariableShadowStart(right); - return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; - } - - // check whether left is a heap memory address - HeapAddressDescription hdesc1, hdesc2; - if (GetHeapAddressInformation(left, 0, &hdesc1) && - hdesc1.chunk_access.access_type == kAccessTypeInside) - return !GetHeapAddressInformation(right, 0, &hdesc2) || - hdesc2.chunk_access.access_type != kAccessTypeInside || - hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; - - // check whether left is an address of a global variable - GlobalAddressDescription gdesc1, gdesc2; - if (GetGlobalAddressInformation(left, 0, &gdesc1)) - return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || - !gdesc1.PointsInsideTheSameVariable(gdesc2); - - if (t->GetStackVariableShadowStart(right) || - GetHeapAddressInformation(right, 0, &hdesc2) || - GetGlobalAddressInformation(right - 1, 0, &gdesc2)) - return true; - - // At this point we know nothing about both a1 and a2 addresses. - return false; -} - -static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { - switch (flags()->detect_invalid_pointer_pairs) { - case 0 : return; - case 1 : if (p1 == nullptr || p2 == nullptr) return; break; - } - - uptr a1 = reinterpret_cast(p1); - uptr a2 = reinterpret_cast(p2); - - if (IsInvalidPointerPair(a1, a2)) { - GET_CALLER_PC_BP_SP; - ReportInvalidPointerPair(pc, bp, sp, a1, a2); - } -} -// ----------------------- Mac-specific reports ----------------- {{{1 - -void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, - BufferedStackTrace *stack) { - ScopedInErrorReport in_report; - Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" - "This is an unrecoverable problem, exiting now.\n", - addr); - PrintZoneForPointer(addr, zone_ptr, zone_name); - stack->Print(); - DescribeAddressIfHeap(addr); -} - -// -------------- SuppressErrorReport -------------- {{{1 -// Avoid error reports duplicating for ASan recover mode. -static bool SuppressErrorReport(uptr pc) { - if (!common_flags()->suppress_equal_pcs) return false; - for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { - uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); - if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, - pc, memory_order_relaxed)) - return false; - if (cmp == pc) return true; - } - Die(); -} - -void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, - uptr access_size, u32 exp, bool fatal) { - if (!fatal && SuppressErrorReport(pc)) return; - ENABLE_FRAME_POINTER; - - // Optimization experiments. - // The experiments can be used to evaluate potential optimizations that remove - // instrumentation (assess false negatives). Instead of completely removing - // some instrumentation, compiler can emit special calls into runtime - // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass - // mask of experiments (exp). - // The reaction to a non-zero value of exp is to be defined. - (void)exp; - - ScopedInErrorReport in_report(fatal); - ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, - access_size); - in_report.ReportError(error); -} - -} // namespace __asan - -// --------------------------- Interface --------------------- {{{1 -using namespace __asan; // NOLINT - -void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, - uptr access_size, u32 exp) { - ENABLE_FRAME_POINTER; - bool fatal = flags()->halt_on_error; - ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); -} - -void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { - BlockingMutexLock l(&error_message_buf_mutex); - error_report_callback = callback; -} - -void __asan_describe_address(uptr addr) { - // Thread registry must be locked while we're describing an address. - asanThreadRegistry().Lock(); - PrintAddressDescription(addr, 1, ""); - asanThreadRegistry().Unlock(); -} - -int __asan_report_present() { - return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; -} - -uptr __asan_get_report_pc() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.pc; - return 0; -} - -uptr __asan_get_report_bp() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.bp; - return 0; -} - -uptr __asan_get_report_sp() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.sp; - return 0; -} - -uptr __asan_get_report_address() { - ErrorDescription &err = ScopedInErrorReport::CurrentError(); - if (err.kind == kErrorKindGeneric) - return err.Generic.addr_description.Address(); - else if (err.kind == kErrorKindDoubleFree) - return err.DoubleFree.addr_description.addr; - return 0; -} - -int __asan_get_report_access_type() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.is_write; - return 0; -} - -uptr __asan_get_report_access_size() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.access_size; - return 0; -} - -const char *__asan_get_report_description() { - if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) - return ScopedInErrorReport::CurrentError().Generic.bug_descr; - return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); -} - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_ptr_sub(void *a, void *b) { - CheckForInvalidPointerPair(a, b); -} -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_ptr_cmp(void *a, void *b) { - CheckForInvalidPointerPair(a, b); -} -} // extern "C" - -// Provide default implementation of __asan_on_error that does nothing -// and may be overriden by user. -SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} diff --git a/lib/asan/asan_report.cpp b/lib/asan/asan_report.cpp new file mode 100644 index 000000000000..2e6ce436d030 --- /dev/null +++ b/lib/asan/asan_report.cpp @@ -0,0 +1,562 @@ +//===-- asan_report.cpp ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file contains error reporting code. +//===----------------------------------------------------------------------===// + +#include "asan_errors.h" +#include "asan_flags.h" +#include "asan_descriptions.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_scariness_score.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +// -------------------- User-specified callbacks ----------------- {{{1 +static void (*error_report_callback)(const char*); +static char *error_message_buffer = nullptr; +static uptr error_message_buffer_pos = 0; +static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); +static const unsigned kAsanBuggyPcPoolSize = 25; +static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; + +void AppendToErrorMessageBuffer(const char *buffer) { + BlockingMutexLock l(&error_message_buf_mutex); + if (!error_message_buffer) { + error_message_buffer = + (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); + error_message_buffer_pos = 0; + } + uptr length = internal_strlen(buffer); + RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); + uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; + internal_strncpy(error_message_buffer + error_message_buffer_pos, + buffer, remaining); + error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; + // FIXME: reallocate the buffer instead of truncating the message. + error_message_buffer_pos += Min(remaining, length); +} + +// ---------------------- Helper functions ----------------------- {{{1 + +void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, + bool in_shadow, const char *after) { + Decorator d; + str->append("%s%s%x%x%s%s", before, + in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, + byte & 15, d.Default(), after); +} + +static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, + const char *zone_name) { + if (zone_ptr) { + if (zone_name) { + Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", + ptr, zone_ptr, zone_name); + } else { + Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", + ptr, zone_ptr); + } + } else { + Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); + } +} + +// ---------------------- Address Descriptions ------------------- {{{1 + +bool ParseFrameDescription(const char *frame_descr, + InternalMmapVector *vars) { + CHECK(frame_descr); + const char *p; + // This string is created by the compiler and has the following form: + // "n alloc_1 alloc_2 ... alloc_n" + // where alloc_i looks like "offset size len ObjectName" + // or "offset size len ObjectName:line". + uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); + if (n_objects == 0) + return false; + + for (uptr i = 0; i < n_objects; i++) { + uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); + uptr size = (uptr)internal_simple_strtoll(p, &p, 10); + uptr len = (uptr)internal_simple_strtoll(p, &p, 10); + if (beg == 0 || size == 0 || *p != ' ') { + return false; + } + p++; + char *colon_pos = internal_strchr(p, ':'); + uptr line = 0; + uptr name_len = len; + if (colon_pos != nullptr && colon_pos < p + len) { + name_len = colon_pos - p; + line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); + } + StackVarDescr var = {beg, size, p, name_len, line}; + vars->push_back(var); + p += len; + } + + return true; +} + +// -------------------- Different kinds of reports ----------------- {{{1 + +// Use ScopedInErrorReport to run common actions just before and +// immediately after printing error report. +class ScopedInErrorReport { + public: + explicit ScopedInErrorReport(bool fatal = false) + : halt_on_error_(fatal || flags()->halt_on_error) { + // Make sure the registry and sanitizer report mutexes are locked while + // we're printing an error report. + // We can lock them only here to avoid self-deadlock in case of + // recursive reports. + asanThreadRegistry().Lock(); + Printf( + "=================================================================\n"); + } + + ~ScopedInErrorReport() { + if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { + asanThreadRegistry().Unlock(); + return; + } + ASAN_ON_ERROR(); + if (current_error_.IsValid()) current_error_.Print(); + + // Make sure the current thread is announced. + DescribeThread(GetCurrentThread()); + // We may want to grab this lock again when printing stats. + asanThreadRegistry().Unlock(); + // Print memory stats. + if (flags()->print_stats) + __asan_print_accumulated_stats(); + + if (common_flags()->print_cmdline) + PrintCmdline(); + + if (common_flags()->print_module_map == 2) PrintModuleMap(); + + // Copy the message buffer so that we could start logging without holding a + // lock that gets aquired during printing. + InternalMmapVector buffer_copy(kErrorMessageBufferSize); + { + BlockingMutexLock l(&error_message_buf_mutex); + internal_memcpy(buffer_copy.data(), + error_message_buffer, kErrorMessageBufferSize); + } + + LogFullErrorReport(buffer_copy.data()); + + if (error_report_callback) { + error_report_callback(buffer_copy.data()); + } + + if (halt_on_error_ && common_flags()->abort_on_error) { + // On Android the message is truncated to 512 characters. + // FIXME: implement "compact" error format, possibly without, or with + // highly compressed stack traces? + // FIXME: or just use the summary line as abort message? + SetAbortMessage(buffer_copy.data()); + } + + // In halt_on_error = false mode, reset the current error object (before + // unlocking). + if (!halt_on_error_) + internal_memset(¤t_error_, 0, sizeof(current_error_)); + + if (halt_on_error_) { + Report("ABORTING\n"); + Die(); + } + } + + void ReportError(const ErrorDescription &description) { + // Can only report one error per ScopedInErrorReport. + CHECK_EQ(current_error_.kind, kErrorKindInvalid); + internal_memcpy(¤t_error_, &description, sizeof(current_error_)); + } + + static ErrorDescription &CurrentError() { + return current_error_; + } + + private: + ScopedErrorReportLock error_report_lock_; + // Error currently being reported. This enables the destructor to interact + // with the debugger and point it to an error description. + static ErrorDescription current_error_; + bool halt_on_error_; +}; + +ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); + +void ReportDeadlySignal(const SignalContext &sig) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); + in_report.ReportError(error); +} + +void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); + in_report.ReportError(error); +} + +void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, + uptr delete_alignment, + BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, + delete_size, delete_alignment); + in_report.ReportError(error); +} + +void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { + ScopedInErrorReport in_report; + ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); + in_report.ReportError(error); +} + +void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, + AllocType alloc_type, + AllocType dealloc_type) { + ScopedInErrorReport in_report; + ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, + alloc_type, dealloc_type); + in_report.ReportError(error); +} + +void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); + in_report.ReportError(error); +} + +void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, + addr); + in_report.ReportError(error); +} + +void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); + in_report.ReportError(error); +} + +void ReportReallocArrayOverflow(uptr count, uptr size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size); + in_report.ReportError(error); +} + +void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); + in_report.ReportError(error); +} + +void ReportInvalidAllocationAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, + size, alignment); + in_report.ReportError(error); +} + +void ReportInvalidPosixMemalignAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, + total_size, max_size); + in_report.ReportError(error); +} + +void ReportRssLimitExceeded(BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); + in_report.ReportError(error); +} + +void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); + in_report.ReportError(error); +} + +void ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorStringFunctionMemoryRangesOverlap error( + GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, + length2, function); + in_report.ReportError(error); +} + +void ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, + size); + in_report.ReportError(error); +} + +void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + ErrorBadParamsToAnnotateContiguousContainer error( + GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); + in_report.ReportError(error); +} + +void ReportODRViolation(const __asan_global *g1, u32 stack_id1, + const __asan_global *g2, u32 stack_id2) { + ScopedInErrorReport in_report; + ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, + stack_id2); + in_report.ReportError(error); +} + +// ----------------------- CheckForInvalidPointerPair ----------- {{{1 +static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, + uptr a1, uptr a2) { + ScopedInErrorReport in_report; + ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); + in_report.ReportError(error); +} + +static bool IsInvalidPointerPair(uptr a1, uptr a2) { + if (a1 == a2) + return false; + + // 256B in shadow memory can be iterated quite fast + static const uptr kMaxOffset = 2048; + + uptr left = a1 < a2 ? a1 : a2; + uptr right = a1 < a2 ? a2 : a1; + uptr offset = right - left; + if (offset <= kMaxOffset) + return __asan_region_is_poisoned(left, offset); + + AsanThread *t = GetCurrentThread(); + + // check whether left is a stack memory pointer + if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { + uptr shadow_offset2 = t->GetStackVariableShadowStart(right); + return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; + } + + // check whether left is a heap memory address + HeapAddressDescription hdesc1, hdesc2; + if (GetHeapAddressInformation(left, 0, &hdesc1) && + hdesc1.chunk_access.access_type == kAccessTypeInside) + return !GetHeapAddressInformation(right, 0, &hdesc2) || + hdesc2.chunk_access.access_type != kAccessTypeInside || + hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; + + // check whether left is an address of a global variable + GlobalAddressDescription gdesc1, gdesc2; + if (GetGlobalAddressInformation(left, 0, &gdesc1)) + return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || + !gdesc1.PointsInsideTheSameVariable(gdesc2); + + if (t->GetStackVariableShadowStart(right) || + GetHeapAddressInformation(right, 0, &hdesc2) || + GetGlobalAddressInformation(right - 1, 0, &gdesc2)) + return true; + + // At this point we know nothing about both a1 and a2 addresses. + return false; +} + +static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { + switch (flags()->detect_invalid_pointer_pairs) { + case 0: + return; + case 1: + if (p1 == nullptr || p2 == nullptr) + return; + break; + } + + uptr a1 = reinterpret_cast(p1); + uptr a2 = reinterpret_cast(p2); + + if (IsInvalidPointerPair(a1, a2)) { + GET_CALLER_PC_BP_SP; + ReportInvalidPointerPair(pc, bp, sp, a1, a2); + } +} +// ----------------------- Mac-specific reports ----------------- {{{1 + +void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report; + Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" + "This is an unrecoverable problem, exiting now.\n", + addr); + PrintZoneForPointer(addr, zone_ptr, zone_name); + stack->Print(); + DescribeAddressIfHeap(addr); +} + +// -------------- SuppressErrorReport -------------- {{{1 +// Avoid error reports duplicating for ASan recover mode. +static bool SuppressErrorReport(uptr pc) { + if (!common_flags()->suppress_equal_pcs) return false; + for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { + uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); + if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, + pc, memory_order_relaxed)) + return false; + if (cmp == pc) return true; + } + Die(); +} + +void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, + uptr access_size, u32 exp, bool fatal) { + if (!fatal && SuppressErrorReport(pc)) return; + ENABLE_FRAME_POINTER; + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, compiler can emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass + // mask of experiments (exp). + // The reaction to a non-zero value of exp is to be defined. + (void)exp; + + ScopedInErrorReport in_report(fatal); + ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, + access_size); + in_report.ReportError(error); +} + +} // namespace __asan + +// --------------------------- Interface --------------------- {{{1 +using namespace __asan; + +void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, + uptr access_size, u32 exp) { + ENABLE_FRAME_POINTER; + bool fatal = flags()->halt_on_error; + ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); +} + +void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { + BlockingMutexLock l(&error_message_buf_mutex); + error_report_callback = callback; +} + +void __asan_describe_address(uptr addr) { + // Thread registry must be locked while we're describing an address. + asanThreadRegistry().Lock(); + PrintAddressDescription(addr, 1, ""); + asanThreadRegistry().Unlock(); +} + +int __asan_report_present() { + return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; +} + +uptr __asan_get_report_pc() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.pc; + return 0; +} + +uptr __asan_get_report_bp() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.bp; + return 0; +} + +uptr __asan_get_report_sp() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.sp; + return 0; +} + +uptr __asan_get_report_address() { + ErrorDescription &err = ScopedInErrorReport::CurrentError(); + if (err.kind == kErrorKindGeneric) + return err.Generic.addr_description.Address(); + else if (err.kind == kErrorKindDoubleFree) + return err.DoubleFree.addr_description.addr; + return 0; +} + +int __asan_get_report_access_type() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.is_write; + return 0; +} + +uptr __asan_get_report_access_size() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.access_size; + return 0; +} + +const char *__asan_get_report_description() { + if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) + return ScopedInErrorReport::CurrentError().Generic.bug_descr; + return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_sub(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_ptr_cmp(void *a, void *b) { + CheckForInvalidPointerPair(a, b); +} +} // extern "C" + +// Provide default implementation of __asan_on_error that does nothing +// and may be overriden by user. +SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} diff --git a/lib/asan/asan_rtems.cc b/lib/asan/asan_rtems.cc deleted file mode 100644 index 4878f4d67c86..000000000000 --- a/lib/asan/asan_rtems.cc +++ /dev/null @@ -1,258 +0,0 @@ -//===-- asan_rtems.cc -----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// RTEMS-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_rtems.h" -#if SANITIZER_RTEMS - -#include "asan_internal.h" -#include "asan_interceptors.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_libc.h" - -#include -#include - -namespace __asan { - -static void ResetShadowMemory() { - uptr shadow_start = SHADOW_OFFSET; - uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32); - uptr gap_start = MEM_TO_SHADOW(shadow_start); - uptr gap_end = MEM_TO_SHADOW(shadow_end); - - REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start); - REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start); -} - -void InitializeShadowMemory() { - kHighMemEnd = 0; - kMidMemBeg = 0; - kMidMemEnd = 0; - - ResetShadowMemory(); -} - -void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { - UNIMPLEMENTED(); -} - -void AsanCheckDynamicRTPrereqs() {} -void AsanCheckIncompatibleRT() {} -void InitializeAsanInterceptors() {} -void InitializePlatformInterceptors() {} -void InitializePlatformExceptionHandlers() {} - -// RTEMS only support static linking; it sufficies to return with no -// error. -void *AsanDoesNotSupportStaticLinkage() { return nullptr; } - -void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { - UNIMPLEMENTED(); -} - -void EarlyInit() { - // Provide early initialization of shadow memory so that - // instrumented code running before full initialzation will not - // report spurious errors. - ResetShadowMemory(); -} - -// We can use a plain thread_local variable for TSD. -static thread_local void *per_thread; - -void *AsanTSDGet() { return per_thread; } - -void AsanTSDSet(void *tsd) { per_thread = tsd; } - -// There's no initialization needed, and the passed-in destructor -// will never be called. Instead, our own thread destruction hook -// (below) will call AsanThread::TSDDtor directly. -void AsanTSDInit(void (*destructor)(void *tsd)) { - DCHECK(destructor == &PlatformTSDDtor); -} - -void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } - -// -// Thread registration. We provide an API similar to the Fushia port. -// - -struct AsanThread::InitOptions { - uptr stack_bottom, stack_size, tls_bottom, tls_size; -}; - -// Shared setup between thread creation and startup for the initial thread. -static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, - uptr user_id, bool detached, - uptr stack_bottom, uptr stack_size, - uptr tls_bottom, uptr tls_size) { - // In lieu of AsanThread::Create. - AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__); - AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; - asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); - - // On other systems, AsanThread::Init() is called from the new - // thread itself. But on RTEMS we already know the stack address - // range beforehand, so we can do most of the setup right now. - const AsanThread::InitOptions options = {stack_bottom, stack_size, - tls_bottom, tls_size}; - thread->Init(&options); - return thread; -} - -// This gets the same arguments passed to Init by CreateAsanThread, above. -// We're in the creator thread before the new thread is actually started, but -// its stack and tls address range are already known. -void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { - DCHECK_NE(GetCurrentThread(), this); - DCHECK_NE(GetCurrentThread(), nullptr); - CHECK_NE(options->stack_bottom, 0); - CHECK_NE(options->stack_size, 0); - stack_bottom_ = options->stack_bottom; - stack_top_ = options->stack_bottom + options->stack_size; - tls_begin_ = options->tls_bottom; - tls_end_ = options->tls_bottom + options->tls_size; -} - -// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the -// main thread on RTEMS does not require special treatment; its AsanThread is -// already created by the provided hooks. This function simply looks up and -// returns the created thread. -AsanThread *CreateMainThread() { - return GetThreadContextByTidLocked(0)->thread; -} - -// This is called before each thread creation is attempted. So, in -// its first call, the calling thread is the initial and sole thread. -static void *BeforeThreadCreateHook(uptr user_id, bool detached, - uptr stack_bottom, uptr stack_size, - uptr tls_bottom, uptr tls_size) { - EnsureMainThreadIDIsCorrect(); - // Strict init-order checking is thread-hostile. - if (flags()->strict_init_order) StopInitOrderChecking(); - - GET_STACK_TRACE_THREAD; - u32 parent_tid = GetCurrentTidOrInvalid(); - - return CreateAsanThread(&stack, parent_tid, user_id, detached, - stack_bottom, stack_size, tls_bottom, tls_size); -} - -// This is called after creating a new thread (in the creating thread), -// with the pointer returned by BeforeThreadCreateHook (above). -static void ThreadCreateHook(void *hook, bool aborted) { - AsanThread *thread = static_cast(hook); - if (!aborted) { - // The thread was created successfully. - // ThreadStartHook is already running in the new thread. - } else { - // The thread wasn't created after all. - // Clean up everything we set up in BeforeThreadCreateHook. - asanThreadRegistry().FinishThread(thread->tid()); - UnmapOrDie(thread, sizeof(AsanThread)); - } -} - -// This is called (1) in the newly-created thread before it runs anything else, -// with the pointer returned by BeforeThreadCreateHook (above). (2) before a -// thread restart. -static void ThreadStartHook(void *hook, uptr os_id) { - if (!hook) - return; - - AsanThread *thread = static_cast(hook); - SetCurrentThread(thread); - - ThreadStatus status = - asanThreadRegistry().GetThreadLocked(thread->tid())->status; - DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning); - // Determine whether we are starting or restarting the thread. - if (status == ThreadStatusCreated) - // In lieu of AsanThread::ThreadStart. - asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular, - nullptr); - else { - // In a thread restart, a thread may resume execution at an - // arbitrary function entry point, with its stack and TLS state - // reset. We unpoison the stack in that case. - PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0); - } -} - -// Each thread runs this just before it exits, -// with the pointer returned by BeforeThreadCreateHook (above). -// All per-thread destructors have already been called. -static void ThreadExitHook(void *hook, uptr os_id) { - AsanThread *thread = static_cast(hook); - if (thread) - AsanThread::TSDDtor(thread->context()); -} - -static void HandleExit() { - // Disable ASan by setting it to uninitialized. Also reset the - // shadow memory to avoid reporting errors after the run-time has - // been desroyed. - if (asan_inited) { - asan_inited = false; - ResetShadowMemory(); - } -} - -bool HandleDlopenInit() { - // Not supported on this platform. - static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, - "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); - return false; -} -} // namespace __asan - -// These are declared (in extern "C") by . -// The system runtime will call our definitions directly. - -extern "C" { -void __sanitizer_early_init() { - __asan::EarlyInit(); -} - -void *__sanitizer_before_thread_create_hook(uptr thread, bool detached, - const char *name, - void *stack_base, size_t stack_size, - void *tls_base, size_t tls_size) { - return __asan::BeforeThreadCreateHook( - thread, detached, - reinterpret_cast(stack_base), stack_size, - reinterpret_cast(tls_base), tls_size); -} - -void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) { - __asan::ThreadCreateHook(handle, status != 0); -} - -void __sanitizer_thread_start_hook(void *handle, uptr self) { - __asan::ThreadStartHook(handle, self); -} - -void __sanitizer_thread_exit_hook(void *handle, uptr self) { - __asan::ThreadExitHook(handle, self); -} - -void __sanitizer_exit() { - __asan::HandleExit(); -} -} // "C" - -#endif // SANITIZER_RTEMS diff --git a/lib/asan/asan_rtems.cpp b/lib/asan/asan_rtems.cpp new file mode 100644 index 000000000000..ecd568c5981b --- /dev/null +++ b/lib/asan/asan_rtems.cpp @@ -0,0 +1,258 @@ +//===-- asan_rtems.cpp ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// RTEMS-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_rtems.h" +#if SANITIZER_RTEMS + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +#include +#include + +namespace __asan { + +static void ResetShadowMemory() { + uptr shadow_start = SHADOW_OFFSET; + uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32); + uptr gap_start = MEM_TO_SHADOW(shadow_start); + uptr gap_end = MEM_TO_SHADOW(shadow_end); + + REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start); + REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start); +} + +void InitializeShadowMemory() { + kHighMemEnd = 0; + kMidMemBeg = 0; + kMidMemEnd = 0; + + ResetShadowMemory(); +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +void InitializeAsanInterceptors() {} +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} + +// RTEMS only support static linking; it sufficies to return with no +// error. +void *AsanDoesNotSupportStaticLinkage() { return nullptr; } + +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +void EarlyInit() { + // Provide early initialization of shadow memory so that + // instrumented code running before full initialzation will not + // report spurious errors. + ResetShadowMemory(); +} + +// We can use a plain thread_local variable for TSD. +static thread_local void *per_thread; + +void *AsanTSDGet() { return per_thread; } + +void AsanTSDSet(void *tsd) { per_thread = tsd; } + +// There's no initialization needed, and the passed-in destructor +// will never be called. Instead, our own thread destruction hook +// (below) will call AsanThread::TSDDtor directly. +void AsanTSDInit(void (*destructor)(void *tsd)) { + DCHECK(destructor == &PlatformTSDDtor); +} + +void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } + +// +// Thread registration. We provide an API similar to the Fushia port. +// + +struct AsanThread::InitOptions { + uptr stack_bottom, stack_size, tls_bottom, tls_size; +}; + +// Shared setup between thread creation and startup for the initial thread. +static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, + uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + // In lieu of AsanThread::Create. + AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__); + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + + // On other systems, AsanThread::Init() is called from the new + // thread itself. But on RTEMS we already know the stack address + // range beforehand, so we can do most of the setup right now. + const AsanThread::InitOptions options = {stack_bottom, stack_size, + tls_bottom, tls_size}; + thread->Init(&options); + return thread; +} + +// This gets the same arguments passed to Init by CreateAsanThread, above. +// We're in the creator thread before the new thread is actually started, but +// its stack and tls address range are already known. +void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { + DCHECK_NE(GetCurrentThread(), this); + DCHECK_NE(GetCurrentThread(), nullptr); + CHECK_NE(options->stack_bottom, 0); + CHECK_NE(options->stack_size, 0); + stack_bottom_ = options->stack_bottom; + stack_top_ = options->stack_bottom + options->stack_size; + tls_begin_ = options->tls_bottom; + tls_end_ = options->tls_bottom + options->tls_size; +} + +// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the +// main thread on RTEMS does not require special treatment; its AsanThread is +// already created by the provided hooks. This function simply looks up and +// returns the created thread. +AsanThread *CreateMainThread() { + return GetThreadContextByTidLocked(0)->thread; +} + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +static void *BeforeThreadCreateHook(uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) StopInitOrderChecking(); + + GET_STACK_TRACE_THREAD; + u32 parent_tid = GetCurrentTidOrInvalid(); + + return CreateAsanThread(&stack, parent_tid, user_id, detached, + stack_bottom, stack_size, tls_bottom, tls_size); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by BeforeThreadCreateHook (above). +static void ThreadCreateHook(void *hook, bool aborted) { + AsanThread *thread = static_cast(hook); + if (!aborted) { + // The thread was created successfully. + // ThreadStartHook is already running in the new thread. + } else { + // The thread wasn't created after all. + // Clean up everything we set up in BeforeThreadCreateHook. + asanThreadRegistry().FinishThread(thread->tid()); + UnmapOrDie(thread, sizeof(AsanThread)); + } +} + +// This is called (1) in the newly-created thread before it runs anything else, +// with the pointer returned by BeforeThreadCreateHook (above). (2) before a +// thread restart. +static void ThreadStartHook(void *hook, uptr os_id) { + if (!hook) + return; + + AsanThread *thread = static_cast(hook); + SetCurrentThread(thread); + + ThreadStatus status = + asanThreadRegistry().GetThreadLocked(thread->tid())->status; + DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning); + // Determine whether we are starting or restarting the thread. + if (status == ThreadStatusCreated) { + // In lieu of AsanThread::ThreadStart. + asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular, + nullptr); + } else { + // In a thread restart, a thread may resume execution at an + // arbitrary function entry point, with its stack and TLS state + // reset. We unpoison the stack in that case. + PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0); + } +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +static void ThreadExitHook(void *hook, uptr os_id) { + AsanThread *thread = static_cast(hook); + if (thread) + AsanThread::TSDDtor(thread->context()); +} + +static void HandleExit() { + // Disable ASan by setting it to uninitialized. Also reset the + // shadow memory to avoid reporting errors after the run-time has + // been desroyed. + if (asan_inited) { + asan_inited = false; + ResetShadowMemory(); + } +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} +} // namespace __asan + +// These are declared (in extern "C") by . +// The system runtime will call our definitions directly. + +extern "C" { +void __sanitizer_early_init() { + __asan::EarlyInit(); +} + +void *__sanitizer_before_thread_create_hook(uptr thread, bool detached, + const char *name, + void *stack_base, size_t stack_size, + void *tls_base, size_t tls_size) { + return __asan::BeforeThreadCreateHook( + thread, detached, + reinterpret_cast(stack_base), stack_size, + reinterpret_cast(tls_base), tls_size); +} + +void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) { + __asan::ThreadCreateHook(handle, status != 0); +} + +void __sanitizer_thread_start_hook(void *handle, uptr self) { + __asan::ThreadStartHook(handle, self); +} + +void __sanitizer_thread_exit_hook(void *handle, uptr self) { + __asan::ThreadExitHook(handle, self); +} + +void __sanitizer_exit() { + __asan::HandleExit(); +} +} // "C" + +#endif // SANITIZER_RTEMS diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc deleted file mode 100644 index db8dcd0689a5..000000000000 --- a/lib/asan/asan_rtl.cc +++ /dev/null @@ -1,626 +0,0 @@ -//===-- asan_rtl.cc -------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Main file of the ASan run-time library. -//===----------------------------------------------------------------------===// - -#include "asan_activation.h" -#include "asan_allocator.h" -#include "asan_interceptors.h" -#include "asan_interface_internal.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_stats.h" -#include "asan_suppressions.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_symbolizer.h" -#include "lsan/lsan_common.h" -#include "ubsan/ubsan_init.h" -#include "ubsan/ubsan_platform.h" - -uptr __asan_shadow_memory_dynamic_address; // Global interface symbol. -int __asan_option_detect_stack_use_after_return; // Global interface symbol. -uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. - -namespace __asan { - -uptr AsanMappingProfile[kAsanMappingProfileSize]; - -static void AsanDie() { - static atomic_uint32_t num_calls; - if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { - // Don't die twice - run a busy loop. - while (1) { } - } - if (common_flags()->print_module_map >= 1) PrintModuleMap(); - if (flags()->sleep_before_dying) { - Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); - SleepForSeconds(flags()->sleep_before_dying); - } - if (flags()->unmap_shadow_on_exit) { - if (kMidMemBeg) { - UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); - UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); - } else { - if (kHighShadowEnd) - UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); - } - } -} - -static void AsanCheckFailed(const char *file, int line, const char *cond, - u64 v1, u64 v2) { - Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, - line, cond, (uptr)v1, (uptr)v2); - - // Print a stack trace the first time we come here. Otherwise, we probably - // failed a CHECK during symbolization. - static atomic_uint32_t num_calls; - if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) { - PRINT_CURRENT_STACK_CHECK(); - } - - Die(); -} - -// -------------------------- Globals --------------------- {{{1 -int asan_inited; -bool asan_init_is_running; - -#if !ASAN_FIXED_MAPPING -uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; -#endif - -// -------------------------- Misc ---------------- {{{1 -void ShowStatsAndAbort() { - __asan_print_accumulated_stats(); - Die(); -} - -// --------------- LowLevelAllocateCallbac ---------- {{{1 -static void OnLowLevelAllocate(uptr ptr, uptr size) { - PoisonShadow(ptr, size, kAsanInternalHeapMagic); -} - -// -------------------------- Run-time entry ------------------- {{{1 -// exported functions -#define ASAN_REPORT_ERROR(type, is_write, size) \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## size(uptr addr) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ -} \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ -} \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## size ## _noabort(uptr addr) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ -} \ - -ASAN_REPORT_ERROR(load, false, 1) -ASAN_REPORT_ERROR(load, false, 2) -ASAN_REPORT_ERROR(load, false, 4) -ASAN_REPORT_ERROR(load, false, 8) -ASAN_REPORT_ERROR(load, false, 16) -ASAN_REPORT_ERROR(store, true, 1) -ASAN_REPORT_ERROR(store, true, 2) -ASAN_REPORT_ERROR(store, true, 4) -ASAN_REPORT_ERROR(store, true, 8) -ASAN_REPORT_ERROR(store, true, 16) - -#define ASAN_REPORT_ERROR_N(type, is_write) \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ -} \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ -} \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ -} \ - -ASAN_REPORT_ERROR_N(load, false) -ASAN_REPORT_ERROR_N(store, true) - -#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ - if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \ - return; \ - uptr sp = MEM_TO_SHADOW(addr); \ - uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ - : *reinterpret_cast(sp); \ - if (UNLIKELY(s)) { \ - if (UNLIKELY(size >= SHADOW_GRANULARITY || \ - ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ - (s8)s)) { \ - if (__asan_test_only_reported_buggy_pointer) { \ - *__asan_test_only_reported_buggy_pointer = addr; \ - } else { \ - GET_CALLER_PC_BP_SP; \ - ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \ - fatal); \ - } \ - } \ - } - -#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ - extern "C" NOINLINE INTERFACE_ATTRIBUTE \ - void __asan_##type##size(uptr addr) { \ - ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \ - } \ - extern "C" NOINLINE INTERFACE_ATTRIBUTE \ - void __asan_exp_##type##size(uptr addr, u32 exp) { \ - ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \ - } \ - extern "C" NOINLINE INTERFACE_ATTRIBUTE \ - void __asan_##type##size ## _noabort(uptr addr) { \ - ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \ - } \ - -ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) -ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2) -ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4) -ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8) -ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16) -ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1) -ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2) -ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4) -ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) -ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_loadN(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, false, size, 0, true); - } -} - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, false, size, exp, true); - } -} - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_loadN_noabort(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, false, size, 0, false); - } -} - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_storeN(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, true, size, 0, true); - } -} - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, true, size, exp, true); - } -} - -extern "C" -NOINLINE INTERFACE_ATTRIBUTE -void __asan_storeN_noabort(uptr addr, uptr size) { - if (__asan_region_is_poisoned(addr, size)) { - GET_CALLER_PC_BP_SP; - ReportGenericError(pc, bp, sp, addr, true, size, 0, false); - } -} - -// Force the linker to keep the symbols for various ASan interface functions. -// We want to keep those in the executable in order to let the instrumented -// dynamic libraries access the symbol even if it is not used by the executable -// itself. This should help if the build system is removing dead code at link -// time. -static NOINLINE void force_interface_symbols() { - volatile int fake_condition = 0; // prevent dead condition elimination. - // __asan_report_* functions are noreturn, so we need a switch to prevent - // the compiler from removing any of them. - // clang-format off - switch (fake_condition) { - case 1: __asan_report_load1(0); break; - case 2: __asan_report_load2(0); break; - case 3: __asan_report_load4(0); break; - case 4: __asan_report_load8(0); break; - case 5: __asan_report_load16(0); break; - case 6: __asan_report_load_n(0, 0); break; - case 7: __asan_report_store1(0); break; - case 8: __asan_report_store2(0); break; - case 9: __asan_report_store4(0); break; - case 10: __asan_report_store8(0); break; - case 11: __asan_report_store16(0); break; - case 12: __asan_report_store_n(0, 0); break; - case 13: __asan_report_exp_load1(0, 0); break; - case 14: __asan_report_exp_load2(0, 0); break; - case 15: __asan_report_exp_load4(0, 0); break; - case 16: __asan_report_exp_load8(0, 0); break; - case 17: __asan_report_exp_load16(0, 0); break; - case 18: __asan_report_exp_load_n(0, 0, 0); break; - case 19: __asan_report_exp_store1(0, 0); break; - case 20: __asan_report_exp_store2(0, 0); break; - case 21: __asan_report_exp_store4(0, 0); break; - case 22: __asan_report_exp_store8(0, 0); break; - case 23: __asan_report_exp_store16(0, 0); break; - case 24: __asan_report_exp_store_n(0, 0, 0); break; - case 25: __asan_register_globals(nullptr, 0); break; - case 26: __asan_unregister_globals(nullptr, 0); break; - case 27: __asan_set_death_callback(nullptr); break; - case 28: __asan_set_error_report_callback(nullptr); break; - case 29: __asan_handle_no_return(); break; - case 30: __asan_address_is_poisoned(nullptr); break; - case 31: __asan_poison_memory_region(nullptr, 0); break; - case 32: __asan_unpoison_memory_region(nullptr, 0); break; - case 34: __asan_before_dynamic_init(nullptr); break; - case 35: __asan_after_dynamic_init(); break; - case 36: __asan_poison_stack_memory(0, 0); break; - case 37: __asan_unpoison_stack_memory(0, 0); break; - case 38: __asan_region_is_poisoned(0, 0); break; - case 39: __asan_describe_address(0); break; - case 40: __asan_set_shadow_00(0, 0); break; - case 41: __asan_set_shadow_f1(0, 0); break; - case 42: __asan_set_shadow_f2(0, 0); break; - case 43: __asan_set_shadow_f3(0, 0); break; - case 44: __asan_set_shadow_f5(0, 0); break; - case 45: __asan_set_shadow_f8(0, 0); break; - } - // clang-format on -} - -static void asan_atexit() { - Printf("AddressSanitizer exit stats:\n"); - __asan_print_accumulated_stats(); - // Print AsanMappingProfile. - for (uptr i = 0; i < kAsanMappingProfileSize; i++) { - if (AsanMappingProfile[i] == 0) continue; - Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); - } -} - -static void InitializeHighMemEnd() { -#if !SANITIZER_MYRIAD2 -#if !ASAN_FIXED_MAPPING - kHighMemEnd = GetMaxUserVirtualAddress(); - // Increase kHighMemEnd to make sure it's properly - // aligned together with kHighMemBeg: - kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; -#endif // !ASAN_FIXED_MAPPING - CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); -#endif // !SANITIZER_MYRIAD2 -} - -void PrintAddressSpaceLayout() { - if (kHighMemBeg) { - Printf("|| `[%p, %p]` || HighMem ||\n", - (void*)kHighMemBeg, (void*)kHighMemEnd); - Printf("|| `[%p, %p]` || HighShadow ||\n", - (void*)kHighShadowBeg, (void*)kHighShadowEnd); - } - if (kMidMemBeg) { - Printf("|| `[%p, %p]` || ShadowGap3 ||\n", - (void*)kShadowGap3Beg, (void*)kShadowGap3End); - Printf("|| `[%p, %p]` || MidMem ||\n", - (void*)kMidMemBeg, (void*)kMidMemEnd); - Printf("|| `[%p, %p]` || ShadowGap2 ||\n", - (void*)kShadowGap2Beg, (void*)kShadowGap2End); - Printf("|| `[%p, %p]` || MidShadow ||\n", - (void*)kMidShadowBeg, (void*)kMidShadowEnd); - } - Printf("|| `[%p, %p]` || ShadowGap ||\n", - (void*)kShadowGapBeg, (void*)kShadowGapEnd); - if (kLowShadowBeg) { - Printf("|| `[%p, %p]` || LowShadow ||\n", - (void*)kLowShadowBeg, (void*)kLowShadowEnd); - Printf("|| `[%p, %p]` || LowMem ||\n", - (void*)kLowMemBeg, (void*)kLowMemEnd); - } - Printf("MemToShadow(shadow): %p %p", - (void*)MEM_TO_SHADOW(kLowShadowBeg), - (void*)MEM_TO_SHADOW(kLowShadowEnd)); - if (kHighMemBeg) { - Printf(" %p %p", - (void*)MEM_TO_SHADOW(kHighShadowBeg), - (void*)MEM_TO_SHADOW(kHighShadowEnd)); - } - if (kMidMemBeg) { - Printf(" %p %p", - (void*)MEM_TO_SHADOW(kMidShadowBeg), - (void*)MEM_TO_SHADOW(kMidShadowEnd)); - } - Printf("\n"); - Printf("redzone=%zu\n", (uptr)flags()->redzone); - Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); - Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); - Printf("thread_local_quarantine_size_kb=%zuK\n", - (uptr)flags()->thread_local_quarantine_size_kb); - Printf("malloc_context_size=%zu\n", - (uptr)common_flags()->malloc_context_size); - - Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); - Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); - Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); - CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); - if (kMidMemBeg) - CHECK(kMidShadowBeg > kLowShadowEnd && - kMidMemBeg > kMidShadowEnd && - kHighShadowBeg > kMidMemEnd); -} - -#if defined(__thumb__) && defined(__linux__) -#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL -#endif - -#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL -static bool UNUSED __local_asan_dyninit = [] { - MaybeStartBackgroudThread(); - SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); - - return false; -}(); -#endif - -static void AsanInitInternal() { - if (LIKELY(asan_inited)) return; - SanitizerToolName = "AddressSanitizer"; - CHECK(!asan_init_is_running && "ASan init calls itself!"); - asan_init_is_running = true; - - CacheBinaryName(); - CheckASLR(); - - // Initialize flags. This must be done early, because most of the - // initialization steps look at flags(). - InitializeFlags(); - - // Stop performing init at this point if we are being loaded via - // dlopen() and the platform supports it. - if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) { - asan_init_is_running = false; - VReport(1, "AddressSanitizer init is being performed for dlopen().\n"); - return; - } - - AsanCheckIncompatibleRT(); - AsanCheckDynamicRTPrereqs(); - AvoidCVE_2016_2143(); - - SetCanPoisonMemory(flags()->poison_heap); - SetMallocContextSize(common_flags()->malloc_context_size); - - InitializePlatformExceptionHandlers(); - - InitializeHighMemEnd(); - - // Make sure we are not statically linked. - AsanDoesNotSupportStaticLinkage(); - - // Install tool-specific callbacks in sanitizer_common. - AddDieCallback(AsanDie); - SetCheckFailedCallback(AsanCheckFailed); - SetPrintfAndReportCallback(AppendToErrorMessageBuffer); - - __sanitizer_set_report_path(common_flags()->log_path); - - __asan_option_detect_stack_use_after_return = - flags()->detect_stack_use_after_return; - - __sanitizer::InitializePlatformEarly(); - - // Re-exec ourselves if we need to set additional env or command line args. - MaybeReexec(); - - // Setup internal allocator callback. - SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); - SetLowLevelAllocateCallback(OnLowLevelAllocate); - - InitializeAsanInterceptors(); - - // Enable system log ("adb logcat") on Android. - // Doing this before interceptors are initialized crashes in: - // AsanInitInternal -> android_log_write -> __interceptor_strcmp - AndroidLogInit(); - - ReplaceSystemMalloc(); - - DisableCoreDumperIfNecessary(); - - InitializeShadowMemory(); - - AsanTSDInit(PlatformTSDDtor); - InstallDeadlySignalHandlers(AsanOnDeadlySignal); - - AllocatorOptions allocator_options; - allocator_options.SetFrom(flags(), common_flags()); - InitializeAllocator(allocator_options); - -#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL - MaybeStartBackgroudThread(); - SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); -#endif - - // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited - // should be set to 1 prior to initializing the threads. - asan_inited = 1; - asan_init_is_running = false; - - if (flags()->atexit) - Atexit(asan_atexit); - - InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); - - // Now that ASan runtime is (mostly) initialized, deactivate it if - // necessary, so that it can be re-activated when requested. - if (flags()->start_deactivated) - AsanDeactivate(); - - // interceptors - InitTlsSize(); - - // Create main thread. - AsanThread *main_thread = CreateMainThread(); - CHECK_EQ(0, main_thread->tid()); - force_interface_symbols(); // no-op. - SanitizerInitializeUnwinder(); - - if (CAN_SANITIZE_LEAKS) { - __lsan::InitCommonLsan(); - if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { - if (flags()->halt_on_error) - Atexit(__lsan::DoLeakCheck); - else - Atexit(__lsan::DoRecoverableLeakCheckVoid); - } - } - -#if CAN_SANITIZE_UB - __ubsan::InitAsPlugin(); -#endif - - InitializeSuppressions(); - - if (CAN_SANITIZE_LEAKS) { - // LateInitialize() calls dlsym, which can allocate an error string buffer - // in the TLS. Let's ignore the allocation to avoid reporting a leak. - __lsan::ScopedInterceptorDisabler disabler; - Symbolizer::LateInitialize(); - } else { - Symbolizer::LateInitialize(); - } - - VReport(1, "AddressSanitizer Init done\n"); - - if (flags()->sleep_after_init) { - Report("Sleeping for %d second(s)\n", flags()->sleep_after_init); - SleepForSeconds(flags()->sleep_after_init); - } -} - -// Initialize as requested from some part of ASan runtime library (interceptors, -// allocator, etc). -void AsanInitFromRtl() { - AsanInitInternal(); -} - -#if ASAN_DYNAMIC -// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable -// (and thus normal initializers from .preinit_array or modules haven't run). - -class AsanInitializer { -public: // NOLINT - AsanInitializer() { - AsanInitFromRtl(); - } -}; - -static AsanInitializer asan_initializer; -#endif // ASAN_DYNAMIC - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -void NOINLINE __asan_handle_no_return() { - if (asan_init_is_running) - return; - - int local_stack; - AsanThread *curr_thread = GetCurrentThread(); - uptr PageSize = GetPageSizeCached(); - uptr top, bottom; - if (curr_thread) { - top = curr_thread->stack_top(); - bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); - } else if (SANITIZER_RTEMS) { - // Give up On RTEMS. - return; - } else { - CHECK(!SANITIZER_FUCHSIA); - // If we haven't seen this thread, try asking the OS for stack bounds. - uptr tls_addr, tls_size, stack_size; - GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, - &tls_size); - top = bottom + stack_size; - } - static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M - if (top - bottom > kMaxExpectedCleanupSize) { - static bool reported_warning = false; - if (reported_warning) - return; - reported_warning = true; - Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " - "stack top: %p; bottom %p; size: %p (%zd)\n" - "False positive error reports may follow\n" - "For details see " - "https://github.com/google/sanitizers/issues/189\n", - top, bottom, top - bottom, top - bottom); - return; - } - PoisonShadow(bottom, top - bottom, 0); - if (curr_thread && curr_thread->has_fake_stack()) - curr_thread->fake_stack()->HandleNoReturn(); -} - -extern "C" void *__asan_extra_spill_area() { - AsanThread *t = GetCurrentThread(); - CHECK(t); - return t->extra_spill_area(); -} - -void __asan_handle_vfork(void *sp) { - AsanThread *t = GetCurrentThread(); - CHECK(t); - uptr bottom = t->stack_bottom(); - PoisonShadow(bottom, (uptr)sp - bottom, 0); -} - -void NOINLINE __asan_set_death_callback(void (*callback)(void)) { - SetUserDieCallback(callback); -} - -// Initialize as requested from instrumented application code. -// We use this call as a trigger to wake up ASan from deactivated state. -void __asan_init() { - AsanActivate(); - AsanInitInternal(); -} - -void __asan_version_mismatch_check() { - // Do nothing. -} diff --git a/lib/asan/asan_rtl.cpp b/lib/asan/asan_rtl.cpp new file mode 100644 index 000000000000..594d7752eea6 --- /dev/null +++ b/lib/asan/asan_rtl.cpp @@ -0,0 +1,626 @@ +//===-- asan_rtl.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Main file of the ASan run-time library. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_interface_internal.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_stats.h" +#include "asan_suppressions.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "lsan/lsan_common.h" +#include "ubsan/ubsan_init.h" +#include "ubsan/ubsan_platform.h" + +uptr __asan_shadow_memory_dynamic_address; // Global interface symbol. +int __asan_option_detect_stack_use_after_return; // Global interface symbol. +uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. + +namespace __asan { + +uptr AsanMappingProfile[kAsanMappingProfileSize]; + +static void AsanDie() { + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { + // Don't die twice - run a busy loop. + while (1) { } + } + if (common_flags()->print_module_map >= 1) PrintModuleMap(); + if (flags()->sleep_before_dying) { + Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); + SleepForSeconds(flags()->sleep_before_dying); + } + if (flags()->unmap_shadow_on_exit) { + if (kMidMemBeg) { + UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); + UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); + } else { + if (kHighShadowEnd) + UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + } + } +} + +static void AsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + + // Print a stack trace the first time we come here. Otherwise, we probably + // failed a CHECK during symbolization. + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) { + PRINT_CURRENT_STACK_CHECK(); + } + + Die(); +} + +// -------------------------- Globals --------------------- {{{1 +int asan_inited; +bool asan_init_is_running; + +#if !ASAN_FIXED_MAPPING +uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; +#endif + +// -------------------------- Misc ---------------- {{{1 +void ShowStatsAndAbort() { + __asan_print_accumulated_stats(); + Die(); +} + +// --------------- LowLevelAllocateCallbac ---------- {{{1 +static void OnLowLevelAllocate(uptr ptr, uptr size) { + PoisonShadow(ptr, size, kAsanInternalHeapMagic); +} + +// -------------------------- Run-time entry ------------------- {{{1 +// exported functions +#define ASAN_REPORT_ERROR(type, is_write, size) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size ## _noabort(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ +} \ + +ASAN_REPORT_ERROR(load, false, 1) +ASAN_REPORT_ERROR(load, false, 2) +ASAN_REPORT_ERROR(load, false, 4) +ASAN_REPORT_ERROR(load, false, 8) +ASAN_REPORT_ERROR(load, false, 16) +ASAN_REPORT_ERROR(store, true, 1) +ASAN_REPORT_ERROR(store, true, 2) +ASAN_REPORT_ERROR(store, true, 4) +ASAN_REPORT_ERROR(store, true, 8) +ASAN_REPORT_ERROR(store, true, 16) + +#define ASAN_REPORT_ERROR_N(type, is_write) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \ +} \ + +ASAN_REPORT_ERROR_N(load, false) +ASAN_REPORT_ERROR_N(store, true) + +#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ + if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \ + return; \ + uptr sp = MEM_TO_SHADOW(addr); \ + uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ + : *reinterpret_cast(sp); \ + if (UNLIKELY(s)) { \ + if (UNLIKELY(size >= SHADOW_GRANULARITY || \ + ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \ + (s8)s)) { \ + if (__asan_test_only_reported_buggy_pointer) { \ + *__asan_test_only_reported_buggy_pointer = addr; \ + } else { \ + GET_CALLER_PC_BP_SP; \ + ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \ + fatal); \ + } \ + } \ + } + +#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_exp_##type##size(uptr addr, u32 exp) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size ## _noabort(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \ + } \ + +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8) +ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) +ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, 0, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, exp, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN_noabort(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, false, size, 0, false); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, 0, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, exp, true); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN_noabort(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + ReportGenericError(pc, bp, sp, addr, true, size, 0, false); + } +} + +// Force the linker to keep the symbols for various ASan interface functions. +// We want to keep those in the executable in order to let the instrumented +// dynamic libraries access the symbol even if it is not used by the executable +// itself. This should help if the build system is removing dead code at link +// time. +static NOINLINE void force_interface_symbols() { + volatile int fake_condition = 0; // prevent dead condition elimination. + // __asan_report_* functions are noreturn, so we need a switch to prevent + // the compiler from removing any of them. + // clang-format off + switch (fake_condition) { + case 1: __asan_report_load1(0); break; + case 2: __asan_report_load2(0); break; + case 3: __asan_report_load4(0); break; + case 4: __asan_report_load8(0); break; + case 5: __asan_report_load16(0); break; + case 6: __asan_report_load_n(0, 0); break; + case 7: __asan_report_store1(0); break; + case 8: __asan_report_store2(0); break; + case 9: __asan_report_store4(0); break; + case 10: __asan_report_store8(0); break; + case 11: __asan_report_store16(0); break; + case 12: __asan_report_store_n(0, 0); break; + case 13: __asan_report_exp_load1(0, 0); break; + case 14: __asan_report_exp_load2(0, 0); break; + case 15: __asan_report_exp_load4(0, 0); break; + case 16: __asan_report_exp_load8(0, 0); break; + case 17: __asan_report_exp_load16(0, 0); break; + case 18: __asan_report_exp_load_n(0, 0, 0); break; + case 19: __asan_report_exp_store1(0, 0); break; + case 20: __asan_report_exp_store2(0, 0); break; + case 21: __asan_report_exp_store4(0, 0); break; + case 22: __asan_report_exp_store8(0, 0); break; + case 23: __asan_report_exp_store16(0, 0); break; + case 24: __asan_report_exp_store_n(0, 0, 0); break; + case 25: __asan_register_globals(nullptr, 0); break; + case 26: __asan_unregister_globals(nullptr, 0); break; + case 27: __asan_set_death_callback(nullptr); break; + case 28: __asan_set_error_report_callback(nullptr); break; + case 29: __asan_handle_no_return(); break; + case 30: __asan_address_is_poisoned(nullptr); break; + case 31: __asan_poison_memory_region(nullptr, 0); break; + case 32: __asan_unpoison_memory_region(nullptr, 0); break; + case 34: __asan_before_dynamic_init(nullptr); break; + case 35: __asan_after_dynamic_init(); break; + case 36: __asan_poison_stack_memory(0, 0); break; + case 37: __asan_unpoison_stack_memory(0, 0); break; + case 38: __asan_region_is_poisoned(0, 0); break; + case 39: __asan_describe_address(0); break; + case 40: __asan_set_shadow_00(0, 0); break; + case 41: __asan_set_shadow_f1(0, 0); break; + case 42: __asan_set_shadow_f2(0, 0); break; + case 43: __asan_set_shadow_f3(0, 0); break; + case 44: __asan_set_shadow_f5(0, 0); break; + case 45: __asan_set_shadow_f8(0, 0); break; + } + // clang-format on +} + +static void asan_atexit() { + Printf("AddressSanitizer exit stats:\n"); + __asan_print_accumulated_stats(); + // Print AsanMappingProfile. + for (uptr i = 0; i < kAsanMappingProfileSize; i++) { + if (AsanMappingProfile[i] == 0) continue; + Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); + } +} + +static void InitializeHighMemEnd() { +#if !SANITIZER_MYRIAD2 +#if !ASAN_FIXED_MAPPING + kHighMemEnd = GetMaxUserVirtualAddress(); + // Increase kHighMemEnd to make sure it's properly + // aligned together with kHighMemBeg: + kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; +#endif // !ASAN_FIXED_MAPPING + CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); +#endif // !SANITIZER_MYRIAD2 +} + +void PrintAddressSpaceLayout() { + if (kHighMemBeg) { + Printf("|| `[%p, %p]` || HighMem ||\n", + (void*)kHighMemBeg, (void*)kHighMemEnd); + Printf("|| `[%p, %p]` || HighShadow ||\n", + (void*)kHighShadowBeg, (void*)kHighShadowEnd); + } + if (kMidMemBeg) { + Printf("|| `[%p, %p]` || ShadowGap3 ||\n", + (void*)kShadowGap3Beg, (void*)kShadowGap3End); + Printf("|| `[%p, %p]` || MidMem ||\n", + (void*)kMidMemBeg, (void*)kMidMemEnd); + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", + (void*)kShadowGap2Beg, (void*)kShadowGap2End); + Printf("|| `[%p, %p]` || MidShadow ||\n", + (void*)kMidShadowBeg, (void*)kMidShadowEnd); + } + Printf("|| `[%p, %p]` || ShadowGap ||\n", + (void*)kShadowGapBeg, (void*)kShadowGapEnd); + if (kLowShadowBeg) { + Printf("|| `[%p, %p]` || LowShadow ||\n", + (void*)kLowShadowBeg, (void*)kLowShadowEnd); + Printf("|| `[%p, %p]` || LowMem ||\n", + (void*)kLowMemBeg, (void*)kLowMemEnd); + } + Printf("MemToShadow(shadow): %p %p", + (void*)MEM_TO_SHADOW(kLowShadowBeg), + (void*)MEM_TO_SHADOW(kLowShadowEnd)); + if (kHighMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kHighShadowBeg), + (void*)MEM_TO_SHADOW(kHighShadowEnd)); + } + if (kMidMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kMidShadowBeg), + (void*)MEM_TO_SHADOW(kMidShadowEnd)); + } + Printf("\n"); + Printf("redzone=%zu\n", (uptr)flags()->redzone); + Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); + Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); + Printf("thread_local_quarantine_size_kb=%zuK\n", + (uptr)flags()->thread_local_quarantine_size_kb); + Printf("malloc_context_size=%zu\n", + (uptr)common_flags()->malloc_context_size); + + Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); + CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + if (kMidMemBeg) + CHECK(kMidShadowBeg > kLowShadowEnd && + kMidMemBeg > kMidShadowEnd && + kHighShadowBeg > kMidMemEnd); +} + +#if defined(__thumb__) && defined(__linux__) +#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL +#endif + +#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL +static bool UNUSED __local_asan_dyninit = [] { + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); + + return false; +}(); +#endif + +static void AsanInitInternal() { + if (LIKELY(asan_inited)) return; + SanitizerToolName = "AddressSanitizer"; + CHECK(!asan_init_is_running && "ASan init calls itself!"); + asan_init_is_running = true; + + CacheBinaryName(); + + // Initialize flags. This must be done early, because most of the + // initialization steps look at flags(). + InitializeFlags(); + + // Stop performing init at this point if we are being loaded via + // dlopen() and the platform supports it. + if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) { + asan_init_is_running = false; + VReport(1, "AddressSanitizer init is being performed for dlopen().\n"); + return; + } + + AsanCheckIncompatibleRT(); + AsanCheckDynamicRTPrereqs(); + AvoidCVE_2016_2143(); + + SetCanPoisonMemory(flags()->poison_heap); + SetMallocContextSize(common_flags()->malloc_context_size); + + InitializePlatformExceptionHandlers(); + + InitializeHighMemEnd(); + + // Make sure we are not statically linked. + AsanDoesNotSupportStaticLinkage(); + + // Install tool-specific callbacks in sanitizer_common. + AddDieCallback(AsanDie); + SetCheckFailedCallback(AsanCheckFailed); + SetPrintfAndReportCallback(AppendToErrorMessageBuffer); + + __sanitizer_set_report_path(common_flags()->log_path); + + __asan_option_detect_stack_use_after_return = + flags()->detect_stack_use_after_return; + + __sanitizer::InitializePlatformEarly(); + + // Re-exec ourselves if we need to set additional env or command line args. + MaybeReexec(); + + // Setup internal allocator callback. + SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); + SetLowLevelAllocateCallback(OnLowLevelAllocate); + + InitializeAsanInterceptors(); + CheckASLR(); + + // Enable system log ("adb logcat") on Android. + // Doing this before interceptors are initialized crashes in: + // AsanInitInternal -> android_log_write -> __interceptor_strcmp + AndroidLogInit(); + + ReplaceSystemMalloc(); + + DisableCoreDumperIfNecessary(); + + InitializeShadowMemory(); + + AsanTSDInit(PlatformTSDDtor); + InstallDeadlySignalHandlers(AsanOnDeadlySignal); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); + +#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); +#endif + + // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited + // should be set to 1 prior to initializing the threads. + asan_inited = 1; + asan_init_is_running = false; + + if (flags()->atexit) + Atexit(asan_atexit); + + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + // Now that ASan runtime is (mostly) initialized, deactivate it if + // necessary, so that it can be re-activated when requested. + if (flags()->start_deactivated) + AsanDeactivate(); + + // interceptors + InitTlsSize(); + + // Create main thread. + AsanThread *main_thread = CreateMainThread(); + CHECK_EQ(0, main_thread->tid()); + force_interface_symbols(); // no-op. + SanitizerInitializeUnwinder(); + + if (CAN_SANITIZE_LEAKS) { + __lsan::InitCommonLsan(); + if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { + if (flags()->halt_on_error) + Atexit(__lsan::DoLeakCheck); + else + Atexit(__lsan::DoRecoverableLeakCheckVoid); + } + } + +#if CAN_SANITIZE_UB + __ubsan::InitAsPlugin(); +#endif + + InitializeSuppressions(); + + if (CAN_SANITIZE_LEAKS) { + // LateInitialize() calls dlsym, which can allocate an error string buffer + // in the TLS. Let's ignore the allocation to avoid reporting a leak. + __lsan::ScopedInterceptorDisabler disabler; + Symbolizer::LateInitialize(); + } else { + Symbolizer::LateInitialize(); + } + + VReport(1, "AddressSanitizer Init done\n"); + + if (flags()->sleep_after_init) { + Report("Sleeping for %d second(s)\n", flags()->sleep_after_init); + SleepForSeconds(flags()->sleep_after_init); + } +} + +// Initialize as requested from some part of ASan runtime library (interceptors, +// allocator, etc). +void AsanInitFromRtl() { + AsanInitInternal(); +} + +#if ASAN_DYNAMIC +// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable +// (and thus normal initializers from .preinit_array or modules haven't run). + +class AsanInitializer { + public: + AsanInitializer() { + AsanInitFromRtl(); + } +}; + +static AsanInitializer asan_initializer; +#endif // ASAN_DYNAMIC + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +void NOINLINE __asan_handle_no_return() { + if (asan_init_is_running) + return; + + int local_stack; + AsanThread *curr_thread = GetCurrentThread(); + uptr PageSize = GetPageSizeCached(); + uptr top, bottom; + if (curr_thread) { + top = curr_thread->stack_top(); + bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + } else if (SANITIZER_RTEMS) { + // Give up On RTEMS. + return; + } else { + CHECK(!SANITIZER_FUCHSIA); + // If we haven't seen this thread, try asking the OS for stack bounds. + uptr tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, + &tls_size); + top = bottom + stack_size; + } + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (top - bottom > kMaxExpectedCleanupSize) { + static bool reported_warning = false; + if (reported_warning) + return; + reported_warning = true; + Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " + "stack top: %p; bottom %p; size: %p (%zd)\n" + "False positive error reports may follow\n" + "For details see " + "https://github.com/google/sanitizers/issues/189\n", + top, bottom, top - bottom, top - bottom); + return; + } + PoisonShadow(bottom, top - bottom, 0); + if (curr_thread && curr_thread->has_fake_stack()) + curr_thread->fake_stack()->HandleNoReturn(); +} + +extern "C" void *__asan_extra_spill_area() { + AsanThread *t = GetCurrentThread(); + CHECK(t); + return t->extra_spill_area(); +} + +void __asan_handle_vfork(void *sp) { + AsanThread *t = GetCurrentThread(); + CHECK(t); + uptr bottom = t->stack_bottom(); + PoisonShadow(bottom, (uptr)sp - bottom, 0); +} + +void NOINLINE __asan_set_death_callback(void (*callback)(void)) { + SetUserDieCallback(callback); +} + +// Initialize as requested from instrumented application code. +// We use this call as a trigger to wake up ASan from deactivated state. +void __asan_init() { + AsanActivate(); + AsanInitInternal(); +} + +void __asan_version_mismatch_check() { + // Do nothing. +} diff --git a/lib/asan/asan_scariness_score.h b/lib/asan/asan_scariness_score.h index 9e7ba47d82dc..3932973c225e 100644 --- a/lib/asan/asan_scariness_score.h +++ b/lib/asan/asan_scariness_score.h @@ -43,7 +43,7 @@ struct ScarinessScoreBase { internal_strlcat(descr, "-", sizeof(descr)); internal_strlcat(descr, reason, sizeof(descr)); score += add_to_score; - }; + } int GetScore() const { return score; } const char *GetDescription() const { return descr; } void Print() const { diff --git a/lib/asan/asan_shadow_setup.cc b/lib/asan/asan_shadow_setup.cc deleted file mode 100644 index 9cfa4e2bd65d..000000000000 --- a/lib/asan/asan_shadow_setup.cc +++ /dev/null @@ -1,164 +0,0 @@ -//===-- asan_shadow_setup.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Set up the shadow memory. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" - -// asan_fuchsia.cc and asan_rtems.cc have their own -// InitializeShadowMemory implementation. -#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS - -#include "asan_internal.h" -#include "asan_mapping.h" - -namespace __asan { - -// ---------------------- mmap -------------------- {{{1 -// Reserve memory range [beg, end]. -// We need to use inclusive range because end+1 may not be representable. -void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { - CHECK_EQ((beg % GetMmapGranularity()), 0); - CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); - uptr size = end - beg + 1; - DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - if (!MmapFixedNoReserve(beg, size, name)) { - Report( - "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " - "Perhaps you're using ulimit -v\n", - size); - Abort(); - } - if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size); - if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); -} - -static void ProtectGap(uptr addr, uptr size) { - if (!flags()->protect_shadow_gap) { - // The shadow gap is unprotected, so there is a chance that someone - // is actually using this memory. Which means it needs a shadow... - uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached()); - uptr GapShadowEnd = - RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1; - if (Verbosity()) - Printf( - "protect_shadow_gap=0:" - " not protecting shadow gap, allocating gap's shadow\n" - "|| `[%p, %p]` || ShadowGap's shadow ||\n", - GapShadowBeg, GapShadowEnd); - ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd, - "unprotected gap shadow"); - return; - } - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; - // A few pages at the start of the address space can not be protected. - // But we really want to protect as much as possible, to prevent this memory - // being returned as a result of a non-FIXED mmap(). - if (addr == kZeroBaseShadowStart) { - uptr step = GetMmapGranularity(); - while (size > step && addr < kZeroBaseMaxShadowStart) { - addr += step; - size -= step; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; - } - } - - Report( - "ERROR: Failed to protect the shadow gap. " - "ASan cannot proceed correctly. ABORTING.\n"); - DumpProcessMap(); - Die(); -} - -static void MaybeReportLinuxPIEBug() { -#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__)) - Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n"); - Report( - "See https://github.com/google/sanitizers/issues/856 for possible " - "workarounds.\n"); -#endif -} - -void InitializeShadowMemory() { - // Set the shadow memory address to uninitialized. - __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; - - uptr shadow_start = kLowShadowBeg; - // Detect if a dynamic shadow address must used and find a available location - // when necessary. When dynamic address is used, the macro |kLowShadowBeg| - // expands to |__asan_shadow_memory_dynamic_address| which is - // |kDefaultShadowSentinel|. - bool full_shadow_is_available = false; - if (shadow_start == kDefaultShadowSentinel) { - __asan_shadow_memory_dynamic_address = 0; - CHECK_EQ(0, kLowShadowBeg); - shadow_start = FindDynamicShadowStart(); - if (SANITIZER_LINUX) full_shadow_is_available = true; - } - // Update the shadow memory address (potentially) used by instrumentation. - __asan_shadow_memory_dynamic_address = shadow_start; - - if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); - - if (!full_shadow_is_available) - full_shadow_is_available = - MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); - -#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ - !ASAN_FIXED_MAPPING - if (!full_shadow_is_available) { - kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; - kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; - } -#endif - - if (Verbosity()) PrintAddressSpaceLayout(); - - if (full_shadow_is_available) { - // mmap the low shadow plus at least one page at the left. - if (kLowShadowBeg) - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); - // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); - // protect the gap. - ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); - CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); - } else if (kMidMemBeg && - MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && - MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { - CHECK(kLowShadowBeg != kLowShadowEnd); - // mmap the low shadow plus at least one page at the left. - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); - // mmap the mid shadow. - ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); - // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); - // protect the gaps. - ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); - ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); - ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); - } else { - Report( - "Shadow memory range interleaves with an existing memory mapping. " - "ASan cannot proceed correctly. ABORTING.\n"); - Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", - shadow_start, kHighShadowEnd); - MaybeReportLinuxPIEBug(); - DumpProcessMap(); - Die(); - } -} - -} // namespace __asan - -#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS diff --git a/lib/asan/asan_shadow_setup.cpp b/lib/asan/asan_shadow_setup.cpp new file mode 100644 index 000000000000..17324932a86f --- /dev/null +++ b/lib/asan/asan_shadow_setup.cpp @@ -0,0 +1,163 @@ +//===-- asan_shadow_setup.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Set up the shadow memory. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +// asan_fuchsia.cpp and asan_rtems.cpp have their own +// InitializeShadowMemory implementation. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +#include "asan_internal.h" +#include "asan_mapping.h" + +namespace __asan { + +// ---------------------- mmap -------------------- {{{1 +// Reserve memory range [beg, end]. +// We need to use inclusive range because end+1 may not be representable. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { + CHECK_EQ((beg % GetMmapGranularity()), 0); + CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + if (!MmapFixedSuperNoReserve(beg, size, name)) { + Report( + "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", + size); + Abort(); + } + if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); +} + +static void ProtectGap(uptr addr, uptr size) { + if (!flags()->protect_shadow_gap) { + // The shadow gap is unprotected, so there is a chance that someone + // is actually using this memory. Which means it needs a shadow... + uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached()); + uptr GapShadowEnd = + RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1; + if (Verbosity()) + Printf( + "protect_shadow_gap=0:" + " not protecting shadow gap, allocating gap's shadow\n" + "|| `[%p, %p]` || ShadowGap's shadow ||\n", + GapShadowBeg, GapShadowEnd); + ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd, + "unprotected gap shadow"); + return; + } + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == kZeroBaseShadowStart) { + uptr step = GetMmapGranularity(); + while (size > step && addr < kZeroBaseMaxShadowStart) { + addr += step; + size -= step; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + } + } + + Report( + "ERROR: Failed to protect the shadow gap. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); +} + +static void MaybeReportLinuxPIEBug() { +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__)) + Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n"); + Report( + "See https://github.com/google/sanitizers/issues/856 for possible " + "workarounds.\n"); +#endif +} + +void InitializeShadowMemory() { + // Set the shadow memory address to uninitialized. + __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel; + + uptr shadow_start = kLowShadowBeg; + // Detect if a dynamic shadow address must used and find a available location + // when necessary. When dynamic address is used, the macro |kLowShadowBeg| + // expands to |__asan_shadow_memory_dynamic_address| which is + // |kDefaultShadowSentinel|. + bool full_shadow_is_available = false; + if (shadow_start == kDefaultShadowSentinel) { + __asan_shadow_memory_dynamic_address = 0; + CHECK_EQ(0, kLowShadowBeg); + shadow_start = FindDynamicShadowStart(); + if (SANITIZER_LINUX) full_shadow_is_available = true; + } + // Update the shadow memory address (potentially) used by instrumentation. + __asan_shadow_memory_dynamic_address = shadow_start; + + if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); + + if (!full_shadow_is_available) + full_shadow_is_available = + MemoryRangeIsAvailable(shadow_start, kHighShadowEnd); + +#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \ + !ASAN_FIXED_MAPPING + if (!full_shadow_is_available) { + kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; + kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; + } +#endif + + if (Verbosity()) PrintAddressSpaceLayout(); + + if (full_shadow_is_available) { + // mmap the low shadow plus at least one page at the left. + if (kLowShadowBeg) + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); + // protect the gap. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); + } else if (kMidMemBeg && + MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && + MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { + CHECK(kLowShadowBeg != kLowShadowEnd); + // mmap the low shadow plus at least one page at the left. + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); + // mmap the mid shadow. + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); + // protect the gaps. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); + ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); + } else { + Report( + "Shadow memory range interleaves with an existing memory mapping. " + "ASan cannot proceed correctly. ABORTING.\n"); + Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", + shadow_start, kHighShadowEnd); + MaybeReportLinuxPIEBug(); + DumpProcessMap(); + Die(); + } +} + +} // namespace __asan + +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc deleted file mode 100644 index b244da4fa0ad..000000000000 --- a/lib/asan/asan_stack.cc +++ /dev/null @@ -1,88 +0,0 @@ -//===-- asan_stack.cc -----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Code for ASan stack trace. -//===----------------------------------------------------------------------===// -#include "asan_internal.h" -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_atomic.h" - -namespace __asan { - -static atomic_uint32_t malloc_context_size; - -void SetMallocContextSize(u32 size) { - atomic_store(&malloc_context_size, size, memory_order_release); -} - -u32 GetMallocContextSize() { - return atomic_load(&malloc_context_size, memory_order_acquire); -} - -namespace { - -// ScopedUnwinding is a scope for stacktracing member of a context -class ScopedUnwinding { - public: - explicit ScopedUnwinding(AsanThread *t) : thread(t) { - if (thread) { - can_unwind = !thread->isUnwinding(); - thread->setUnwinding(true); - } - } - ~ScopedUnwinding() { - if (thread) - thread->setUnwinding(false); - } - - bool CanUnwind() const { return can_unwind; } - - private: - AsanThread *thread = nullptr; - bool can_unwind = true; -}; - -} // namespace - -} // namespace __asan - -void __sanitizer::BufferedStackTrace::UnwindImpl( - uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { - using namespace __asan; - size = 0; - if (UNLIKELY(!asan_inited)) - return; - request_fast = StackTrace::WillUseFastUnwind(request_fast); - AsanThread *t = GetCurrentThread(); - ScopedUnwinding unwind_scope(t); - if (!unwind_scope.CanUnwind()) - return; - if (request_fast) { - if (t) { - Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), - true); - } - return; - } - if (SANITIZER_MIPS && t && - !IsValidFrame(bp, t->stack_top(), t->stack_bottom())) - return; - Unwind(max_depth, pc, bp, context, 0, 0, false); -} - -// ------------------ Interface -------------- {{{1 - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_print_stack_trace() { - using namespace __asan; - PRINT_CURRENT_STACK(); -} -} // extern "C" diff --git a/lib/asan/asan_stack.cpp b/lib/asan/asan_stack.cpp new file mode 100644 index 000000000000..b7f4e6aeeab0 --- /dev/null +++ b/lib/asan/asan_stack.cpp @@ -0,0 +1,88 @@ +//===-- asan_stack.cpp ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code for ASan stack trace. +//===----------------------------------------------------------------------===// +#include "asan_internal.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" + +namespace __asan { + +static atomic_uint32_t malloc_context_size; + +void SetMallocContextSize(u32 size) { + atomic_store(&malloc_context_size, size, memory_order_release); +} + +u32 GetMallocContextSize() { + return atomic_load(&malloc_context_size, memory_order_acquire); +} + +namespace { + +// ScopedUnwinding is a scope for stacktracing member of a context +class ScopedUnwinding { + public: + explicit ScopedUnwinding(AsanThread *t) : thread(t) { + if (thread) { + can_unwind = !thread->isUnwinding(); + thread->setUnwinding(true); + } + } + ~ScopedUnwinding() { + if (thread) + thread->setUnwinding(false); + } + + bool CanUnwind() const { return can_unwind; } + + private: + AsanThread *thread = nullptr; + bool can_unwind = true; +}; + +} // namespace + +} // namespace __asan + +void __sanitizer::BufferedStackTrace::UnwindImpl( + uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { + using namespace __asan; + size = 0; + if (UNLIKELY(!asan_inited)) + return; + request_fast = StackTrace::WillUseFastUnwind(request_fast); + AsanThread *t = GetCurrentThread(); + ScopedUnwinding unwind_scope(t); + if (!unwind_scope.CanUnwind()) + return; + if (request_fast) { + if (t) { + Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), + true); + } + return; + } + if (SANITIZER_MIPS && t && + !IsValidFrame(bp, t->stack_top(), t->stack_bottom())) + return; + Unwind(max_depth, pc, bp, context, 0, 0, false); +} + +// ------------------ Interface -------------- {{{1 + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + using namespace __asan; + PRINT_CURRENT_STACK(); +} +} // extern "C" diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h index 3a4b3cefc5de..4089d3d7340e 100644 --- a/lib/asan/asan_stack.h +++ b/lib/asan/asan_stack.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_stack.cc. +// ASan-private header for asan_stack.cpp. //===----------------------------------------------------------------------===// #ifndef ASAN_STACK_H diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc deleted file mode 100644 index 2f996ce63ccc..000000000000 --- a/lib/asan/asan_stats.cc +++ /dev/null @@ -1,173 +0,0 @@ -//===-- asan_stats.cc -----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Code related to statistics collected by AddressSanitizer. -//===----------------------------------------------------------------------===// -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_stats.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_stackdepot.h" - -namespace __asan { - -AsanStats::AsanStats() { - Clear(); -} - -void AsanStats::Clear() { - CHECK(REAL(memset)); - REAL(memset)(this, 0, sizeof(AsanStats)); -} - -static void PrintMallocStatsArray(const char *prefix, - uptr (&array)[kNumberOfSizeClasses]) { - Printf("%s", prefix); - for (uptr i = 0; i < kNumberOfSizeClasses; i++) { - if (!array[i]) continue; - Printf("%zu:%zu; ", i, array[i]); - } - Printf("\n"); -} - -void AsanStats::Print() { - Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n", - malloced>>20, malloced_redzones>>20, mallocs); - Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs); - Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); - Printf("Stats: %zuM really freed by %zu calls\n", - really_freed>>20, real_frees); - Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n", - (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, - mmaps, munmaps); - - PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); - Printf("Stats: malloc large: %zu\n", malloc_large); -} - -void AsanStats::MergeFrom(const AsanStats *stats) { - uptr *dst_ptr = reinterpret_cast(this); - const uptr *src_ptr = reinterpret_cast(stats); - uptr num_fields = sizeof(*this) / sizeof(uptr); - for (uptr i = 0; i < num_fields; i++) - dst_ptr[i] += src_ptr[i]; -} - -static BlockingMutex print_lock(LINKER_INITIALIZED); - -static AsanStats unknown_thread_stats(LINKER_INITIALIZED); -static AsanStats dead_threads_stats(LINKER_INITIALIZED); -static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED); -// Required for malloc_zone_statistics() on OS X. This can't be stored in -// per-thread AsanStats. -static uptr max_malloced_memory; - -static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) { - AsanStats *accumulated_stats = reinterpret_cast(arg); - AsanThreadContext *tctx = static_cast(tctx_base); - if (AsanThread *t = tctx->thread) - accumulated_stats->MergeFrom(&t->stats()); -} - -static void GetAccumulatedStats(AsanStats *stats) { - stats->Clear(); - { - ThreadRegistryLock l(&asanThreadRegistry()); - asanThreadRegistry() - .RunCallbackForEachThreadLocked(MergeThreadStats, stats); - } - stats->MergeFrom(&unknown_thread_stats); - { - BlockingMutexLock lock(&dead_threads_stats_lock); - stats->MergeFrom(&dead_threads_stats); - } - // This is not very accurate: we may miss allocation peaks that happen - // between two updates of accumulated_stats_. For more accurate bookkeeping - // the maximum should be updated on every malloc(), which is unacceptable. - if (max_malloced_memory < stats->malloced) { - max_malloced_memory = stats->malloced; - } -} - -void FlushToDeadThreadStats(AsanStats *stats) { - BlockingMutexLock lock(&dead_threads_stats_lock); - dead_threads_stats.MergeFrom(stats); - stats->Clear(); -} - -void FillMallocStatistics(AsanMallocStats *malloc_stats) { - AsanStats stats; - GetAccumulatedStats(&stats); - malloc_stats->blocks_in_use = stats.mallocs; - malloc_stats->size_in_use = stats.malloced; - malloc_stats->max_size_in_use = max_malloced_memory; - malloc_stats->size_allocated = stats.mmaped; -} - -AsanStats &GetCurrentThreadStats() { - AsanThread *t = GetCurrentThread(); - return (t) ? t->stats() : unknown_thread_stats; -} - -static void PrintAccumulatedStats() { - AsanStats stats; - GetAccumulatedStats(&stats); - // Use lock to keep reports from mixing up. - BlockingMutexLock lock(&print_lock); - stats.Print(); - StackDepotStats *stack_depot_stats = StackDepotGetStats(); - Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", - stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); - PrintInternalAllocatorStats(); -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -uptr __sanitizer_get_current_allocated_bytes() { - AsanStats stats; - GetAccumulatedStats(&stats); - uptr malloced = stats.malloced; - uptr freed = stats.freed; - // Return sane value if malloced < freed due to racy - // way we update accumulated stats. - return (malloced > freed) ? malloced - freed : 1; -} - -uptr __sanitizer_get_heap_size() { - AsanStats stats; - GetAccumulatedStats(&stats); - return stats.mmaped - stats.munmaped; -} - -uptr __sanitizer_get_free_bytes() { - AsanStats stats; - GetAccumulatedStats(&stats); - uptr total_free = stats.mmaped - - stats.munmaped - + stats.really_freed; - uptr total_used = stats.malloced - + stats.malloced_redzones; - // Return sane value if total_free < total_used due to racy - // way we update accumulated stats. - return (total_free > total_used) ? total_free - total_used : 1; -} - -uptr __sanitizer_get_unmapped_bytes() { - return 0; -} - -void __asan_print_accumulated_stats() { - PrintAccumulatedStats(); -} diff --git a/lib/asan/asan_stats.cpp b/lib/asan/asan_stats.cpp new file mode 100644 index 000000000000..00ded8f5ef50 --- /dev/null +++ b/lib/asan/asan_stats.cpp @@ -0,0 +1,173 @@ +//===-- asan_stats.cpp ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Code related to statistics collected by AddressSanitizer. +//===----------------------------------------------------------------------===// +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_stats.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __asan { + +AsanStats::AsanStats() { + Clear(); +} + +void AsanStats::Clear() { + CHECK(REAL(memset)); + REAL(memset)(this, 0, sizeof(AsanStats)); +} + +static void PrintMallocStatsArray(const char *prefix, + uptr (&array)[kNumberOfSizeClasses]) { + Printf("%s", prefix); + for (uptr i = 0; i < kNumberOfSizeClasses; i++) { + if (!array[i]) continue; + Printf("%zu:%zu; ", i, array[i]); + } + Printf("\n"); +} + +void AsanStats::Print() { + Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n", + malloced>>20, malloced_redzones>>20, mallocs); + Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs); + Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees); + Printf("Stats: %zuM really freed by %zu calls\n", + really_freed>>20, real_frees); + Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n", + (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, + mmaps, munmaps); + + PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); + Printf("Stats: malloc large: %zu\n", malloc_large); +} + +void AsanStats::MergeFrom(const AsanStats *stats) { + uptr *dst_ptr = reinterpret_cast(this); + const uptr *src_ptr = reinterpret_cast(stats); + uptr num_fields = sizeof(*this) / sizeof(uptr); + for (uptr i = 0; i < num_fields; i++) + dst_ptr[i] += src_ptr[i]; +} + +static BlockingMutex print_lock(LINKER_INITIALIZED); + +static AsanStats unknown_thread_stats(LINKER_INITIALIZED); +static AsanStats dead_threads_stats(LINKER_INITIALIZED); +static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED); +// Required for malloc_zone_statistics() on OS X. This can't be stored in +// per-thread AsanStats. +static uptr max_malloced_memory; + +static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) { + AsanStats *accumulated_stats = reinterpret_cast(arg); + AsanThreadContext *tctx = static_cast(tctx_base); + if (AsanThread *t = tctx->thread) + accumulated_stats->MergeFrom(&t->stats()); +} + +static void GetAccumulatedStats(AsanStats *stats) { + stats->Clear(); + { + ThreadRegistryLock l(&asanThreadRegistry()); + asanThreadRegistry() + .RunCallbackForEachThreadLocked(MergeThreadStats, stats); + } + stats->MergeFrom(&unknown_thread_stats); + { + BlockingMutexLock lock(&dead_threads_stats_lock); + stats->MergeFrom(&dead_threads_stats); + } + // This is not very accurate: we may miss allocation peaks that happen + // between two updates of accumulated_stats_. For more accurate bookkeeping + // the maximum should be updated on every malloc(), which is unacceptable. + if (max_malloced_memory < stats->malloced) { + max_malloced_memory = stats->malloced; + } +} + +void FlushToDeadThreadStats(AsanStats *stats) { + BlockingMutexLock lock(&dead_threads_stats_lock); + dead_threads_stats.MergeFrom(stats); + stats->Clear(); +} + +void FillMallocStatistics(AsanMallocStats *malloc_stats) { + AsanStats stats; + GetAccumulatedStats(&stats); + malloc_stats->blocks_in_use = stats.mallocs; + malloc_stats->size_in_use = stats.malloced; + malloc_stats->max_size_in_use = max_malloced_memory; + malloc_stats->size_allocated = stats.mmaped; +} + +AsanStats &GetCurrentThreadStats() { + AsanThread *t = GetCurrentThread(); + return (t) ? t->stats() : unknown_thread_stats; +} + +static void PrintAccumulatedStats() { + AsanStats stats; + GetAccumulatedStats(&stats); + // Use lock to keep reports from mixing up. + BlockingMutexLock lock(&print_lock); + stats.Print(); + StackDepotStats *stack_depot_stats = StackDepotGetStats(); + Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", + stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); + PrintInternalAllocatorStats(); +} + +} // namespace __asan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +uptr __sanitizer_get_current_allocated_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr malloced = stats.malloced; + uptr freed = stats.freed; + // Return sane value if malloced < freed due to racy + // way we update accumulated stats. + return (malloced > freed) ? malloced - freed : 1; +} + +uptr __sanitizer_get_heap_size() { + AsanStats stats; + GetAccumulatedStats(&stats); + return stats.mmaped - stats.munmaped; +} + +uptr __sanitizer_get_free_bytes() { + AsanStats stats; + GetAccumulatedStats(&stats); + uptr total_free = stats.mmaped + - stats.munmaped + + stats.really_freed; + uptr total_used = stats.malloced + + stats.malloced_redzones; + // Return sane value if total_free < total_used due to racy + // way we update accumulated stats. + return (total_free > total_used) ? total_free - total_used : 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 0; +} + +void __asan_print_accumulated_stats() { + PrintAccumulatedStats(); +} diff --git a/lib/asan/asan_suppressions.cc b/lib/asan/asan_suppressions.cc deleted file mode 100644 index 118853e61b79..000000000000 --- a/lib/asan/asan_suppressions.cc +++ /dev/null @@ -1,104 +0,0 @@ -//===-- asan_suppressions.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Issue suppression and suppression-related functions. -//===----------------------------------------------------------------------===// - -#include "asan_suppressions.h" - -#include "asan_stack.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_suppressions.h" -#include "sanitizer_common/sanitizer_symbolizer.h" - -namespace __asan { - -ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; -static SuppressionContext *suppression_ctx = nullptr; -static const char kInterceptorName[] = "interceptor_name"; -static const char kInterceptorViaFunction[] = "interceptor_via_fun"; -static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; -static const char kODRViolation[] = "odr_violation"; -static const char *kSuppressionTypes[] = { - kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, - kODRViolation}; - -SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) { - return ""; -} - -void InitializeSuppressions() { - CHECK_EQ(nullptr, suppression_ctx); - suppression_ctx = new (suppression_placeholder) // NOLINT - SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); - suppression_ctx->ParseFromFile(flags()->suppressions); - if (&__asan_default_suppressions) - suppression_ctx->Parse(__asan_default_suppressions()); -} - -bool IsInterceptorSuppressed(const char *interceptor_name) { - CHECK(suppression_ctx); - Suppression *s; - // Match "interceptor_name" suppressions. - return suppression_ctx->Match(interceptor_name, kInterceptorName, &s); -} - -bool HaveStackTraceBasedSuppressions() { - CHECK(suppression_ctx); - return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) || - suppression_ctx->HasSuppressionType(kInterceptorViaLibrary); -} - -bool IsODRViolationSuppressed(const char *global_var_name) { - CHECK(suppression_ctx); - Suppression *s; - // Match "odr_violation" suppressions. - return suppression_ctx->Match(global_var_name, kODRViolation, &s); -} - -bool IsStackTraceSuppressed(const StackTrace *stack) { - if (!HaveStackTraceBasedSuppressions()) - return false; - - CHECK(suppression_ctx); - Symbolizer *symbolizer = Symbolizer::GetOrInit(); - Suppression *s; - for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { - uptr addr = stack->trace[i]; - - if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) { - // Match "interceptor_via_lib" suppressions. - if (const char *module_name = symbolizer->GetModuleNameForPc(addr)) - if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) - return true; - } - - if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { - SymbolizedStack *frames = symbolizer->SymbolizePC(addr); - CHECK(frames); - for (SymbolizedStack *cur = frames; cur; cur = cur->next) { - const char *function_name = cur->info.function; - if (!function_name) { - continue; - } - // Match "interceptor_via_fun" suppressions. - if (suppression_ctx->Match(function_name, kInterceptorViaFunction, - &s)) { - frames->ClearAll(); - return true; - } - } - frames->ClearAll(); - } - } - return false; -} - -} // namespace __asan diff --git a/lib/asan/asan_suppressions.cpp b/lib/asan/asan_suppressions.cpp new file mode 100644 index 000000000000..8cb2c3e3b9b6 --- /dev/null +++ b/lib/asan/asan_suppressions.cpp @@ -0,0 +1,104 @@ +//===-- asan_suppressions.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Issue suppression and suppression-related functions. +//===----------------------------------------------------------------------===// + +#include "asan_suppressions.h" + +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kInterceptorName[] = "interceptor_name"; +static const char kInterceptorViaFunction[] = "interceptor_via_fun"; +static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; +static const char kODRViolation[] = "odr_violation"; +static const char *kSuppressionTypes[] = { + kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, + kODRViolation}; + +SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) { + return ""; +} + +void InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); + if (&__asan_default_suppressions) + suppression_ctx->Parse(__asan_default_suppressions()); +} + +bool IsInterceptorSuppressed(const char *interceptor_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "interceptor_name" suppressions. + return suppression_ctx->Match(interceptor_name, kInterceptorName, &s); +} + +bool HaveStackTraceBasedSuppressions() { + CHECK(suppression_ctx); + return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) || + suppression_ctx->HasSuppressionType(kInterceptorViaLibrary); +} + +bool IsODRViolationSuppressed(const char *global_var_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "odr_violation" suppressions. + return suppression_ctx->Match(global_var_name, kODRViolation, &s); +} + +bool IsStackTraceSuppressed(const StackTrace *stack) { + if (!HaveStackTraceBasedSuppressions()) + return false; + + CHECK(suppression_ctx); + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + Suppression *s; + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + + if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) { + // Match "interceptor_via_lib" suppressions. + if (const char *module_name = symbolizer->GetModuleNameForPc(addr)) + if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) + return true; + } + + if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { + SymbolizedStack *frames = symbolizer->SymbolizePC(addr); + CHECK(frames); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match "interceptor_via_fun" suppressions. + if (suppression_ctx->Match(function_name, kInterceptorViaFunction, + &s)) { + frames->ClearAll(); + return true; + } + } + frames->ClearAll(); + } + } + return false; +} + +} // namespace __asan diff --git a/lib/asan/asan_suppressions.h b/lib/asan/asan_suppressions.h index 9bf297602cfa..121d4ddf1875 100644 --- a/lib/asan/asan_suppressions.h +++ b/lib/asan/asan_suppressions.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_suppressions.cc. +// ASan-private header for asan_suppressions.cpp. //===----------------------------------------------------------------------===// #ifndef ASAN_SUPPRESSIONS_H #define ASAN_SUPPRESSIONS_H diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc deleted file mode 100644 index e63561c2243f..000000000000 --- a/lib/asan/asan_thread.cc +++ /dev/null @@ -1,535 +0,0 @@ -//===-- asan_thread.cc ----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Thread-related code. -//===----------------------------------------------------------------------===// -#include "asan_allocator.h" -#include "asan_interceptors.h" -#include "asan_poisoning.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "asan_mapping.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" -#include "lsan/lsan_common.h" - -namespace __asan { - -// AsanThreadContext implementation. - -void AsanThreadContext::OnCreated(void *arg) { - CreateThreadContextArgs *args = static_cast(arg); - if (args->stack) - stack_id = StackDepotPut(*args->stack); - thread = args->thread; - thread->set_context(this); -} - -void AsanThreadContext::OnFinished() { - // Drop the link to the AsanThread object. - thread = nullptr; -} - -// MIPS requires aligned address -static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; -static ThreadRegistry *asan_thread_registry; - -static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); -static LowLevelAllocator allocator_for_thread_context; - -static ThreadContextBase *GetAsanThreadContext(u32 tid) { - BlockingMutexLock lock(&mu_for_thread_context); - return new(allocator_for_thread_context) AsanThreadContext(tid); -} - -ThreadRegistry &asanThreadRegistry() { - static bool initialized; - // Don't worry about thread_safety - this should be called when there is - // a single thread. - if (!initialized) { - // Never reuse ASan threads: we store pointer to AsanThreadContext - // in TSD and can't reliably tell when no more TSD destructors will - // be called. It would be wrong to reuse AsanThreadContext for another - // thread before all TSD destructors will be called for it. - asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( - GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); - initialized = true; - } - return *asan_thread_registry; -} - -AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { - return static_cast( - asanThreadRegistry().GetThreadLocked(tid)); -} - -// AsanThread implementation. - -AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, - u32 parent_tid, StackTrace *stack, - bool detached) { - uptr PageSize = GetPageSizeCached(); - uptr size = RoundUpTo(sizeof(AsanThread), PageSize); - AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); - thread->start_routine_ = start_routine; - thread->arg_ = arg; - AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; - asanThreadRegistry().CreateThread(*reinterpret_cast(thread), detached, - parent_tid, &args); - - return thread; -} - -void AsanThread::TSDDtor(void *tsd) { - AsanThreadContext *context = (AsanThreadContext*)tsd; - VReport(1, "T%d TSDDtor\n", context->tid); - if (context->thread) - context->thread->Destroy(); -} - -void AsanThread::Destroy() { - int tid = this->tid(); - VReport(1, "T%d exited\n", tid); - - malloc_storage().CommitBack(); - if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); - asanThreadRegistry().FinishThread(tid); - FlushToDeadThreadStats(&stats_); - // We also clear the shadow on thread destruction because - // some code may still be executing in later TSD destructors - // and we don't want it to have any poisoned stack. - ClearShadowForThreadStackAndTLS(); - DeleteFakeStack(tid); - uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); - UnmapOrDie(this, size); - DTLS_Destroy(); -} - -void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, - uptr size) { - if (atomic_load(&stack_switching_, memory_order_relaxed)) { - Report("ERROR: starting fiber switch while in fiber switch\n"); - Die(); - } - - next_stack_bottom_ = bottom; - next_stack_top_ = bottom + size; - atomic_store(&stack_switching_, 1, memory_order_release); - - FakeStack *current_fake_stack = fake_stack_; - if (fake_stack_save) - *fake_stack_save = fake_stack_; - fake_stack_ = nullptr; - SetTLSFakeStack(nullptr); - // if fake_stack_save is null, the fiber will die, delete the fakestack - if (!fake_stack_save && current_fake_stack) - current_fake_stack->Destroy(this->tid()); -} - -void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, - uptr *bottom_old, - uptr *size_old) { - if (!atomic_load(&stack_switching_, memory_order_relaxed)) { - Report("ERROR: finishing a fiber switch that has not started\n"); - Die(); - } - - if (fake_stack_save) { - SetTLSFakeStack(fake_stack_save); - fake_stack_ = fake_stack_save; - } - - if (bottom_old) - *bottom_old = stack_bottom_; - if (size_old) - *size_old = stack_top_ - stack_bottom_; - stack_bottom_ = next_stack_bottom_; - stack_top_ = next_stack_top_; - atomic_store(&stack_switching_, 0, memory_order_release); - next_stack_top_ = 0; - next_stack_bottom_ = 0; -} - -inline AsanThread::StackBounds AsanThread::GetStackBounds() const { - if (!atomic_load(&stack_switching_, memory_order_acquire)) { - // Make sure the stack bounds are fully initialized. - if (stack_bottom_ >= stack_top_) return {0, 0}; - return {stack_bottom_, stack_top_}; - } - char local; - const uptr cur_stack = (uptr)&local; - // Note: need to check next stack first, because FinishSwitchFiber - // may be in process of overwriting stack_top_/bottom_. But in such case - // we are already on the next stack. - if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) - return {next_stack_bottom_, next_stack_top_}; - return {stack_bottom_, stack_top_}; -} - -uptr AsanThread::stack_top() { - return GetStackBounds().top; -} - -uptr AsanThread::stack_bottom() { - return GetStackBounds().bottom; -} - -uptr AsanThread::stack_size() { - const auto bounds = GetStackBounds(); - return bounds.top - bounds.bottom; -} - -// We want to create the FakeStack lazyly on the first use, but not eralier -// than the stack size is known and the procedure has to be async-signal safe. -FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { - uptr stack_size = this->stack_size(); - if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. - return nullptr; - uptr old_val = 0; - // fake_stack_ has 3 states: - // 0 -- not initialized - // 1 -- being initialized - // ptr -- initialized - // This CAS checks if the state was 0 and if so changes it to state 1, - // if that was successful, it initializes the pointer. - if (atomic_compare_exchange_strong( - reinterpret_cast(&fake_stack_), &old_val, 1UL, - memory_order_relaxed)) { - uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); - CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); - stack_size_log = - Min(stack_size_log, static_cast(flags()->max_uar_stack_size_log)); - stack_size_log = - Max(stack_size_log, static_cast(flags()->min_uar_stack_size_log)); - fake_stack_ = FakeStack::Create(stack_size_log); - SetTLSFakeStack(fake_stack_); - return fake_stack_; - } - return nullptr; -} - -void AsanThread::Init(const InitOptions *options) { - next_stack_top_ = next_stack_bottom_ = 0; - atomic_store(&stack_switching_, false, memory_order_release); - CHECK_EQ(this->stack_size(), 0U); - SetThreadStackAndTls(options); - if (stack_top_ != stack_bottom_) { - CHECK_GT(this->stack_size(), 0U); - CHECK(AddrIsInMem(stack_bottom_)); - CHECK(AddrIsInMem(stack_top_ - 1)); - } - ClearShadowForThreadStackAndTLS(); - fake_stack_ = nullptr; - if (__asan_option_detect_stack_use_after_return) - AsyncSignalSafeLazyInitFakeStack(); - int local = 0; - VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), - (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, - &local); -} - -// Fuchsia and RTEMS don't use ThreadStart. -// asan_fuchsia.c/asan_rtems.c define CreateMainThread and -// SetThreadStackAndTls. -#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS - -thread_return_t AsanThread::ThreadStart( - tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { - Init(); - asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr); - if (signal_thread_is_registered) - atomic_store(signal_thread_is_registered, 1, memory_order_release); - - if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); - - if (!start_routine_) { - // start_routine_ == 0 if we're on the main thread or on one of the - // OS X libdispatch worker threads. But nobody is supposed to call - // ThreadStart() for the worker threads. - CHECK_EQ(tid(), 0); - return 0; - } - - thread_return_t res = start_routine_(arg_); - - // On POSIX systems we defer this to the TSD destructor. LSan will consider - // the thread's memory as non-live from the moment we call Destroy(), even - // though that memory might contain pointers to heap objects which will be - // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before - // the TSD destructors have run might cause false positives in LSan. - if (!SANITIZER_POSIX) - this->Destroy(); - - return res; -} - -AsanThread *CreateMainThread() { - AsanThread *main_thread = AsanThread::Create( - /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, - /* stack */ nullptr, /* detached */ true); - SetCurrentThread(main_thread); - main_thread->ThreadStart(internal_getpid(), - /* signal_thread_is_registered */ nullptr); - return main_thread; -} - -// This implementation doesn't use the argument, which is just passed down -// from the caller of Init (which see, above). It's only there to support -// OS-specific implementations that need more information passed through. -void AsanThread::SetThreadStackAndTls(const InitOptions *options) { - DCHECK_EQ(options, nullptr); - uptr tls_size = 0; - uptr stack_size = 0; - GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_, - &tls_size); - stack_top_ = stack_bottom_ + stack_size; - tls_end_ = tls_begin_ + tls_size; - dtls_ = DTLS_Get(); - - if (stack_top_ != stack_bottom_) { - int local; - CHECK(AddrIsInStack((uptr)&local)); - } -} - -#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS - -void AsanThread::ClearShadowForThreadStackAndTLS() { - if (stack_top_ != stack_bottom_) - PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); - if (tls_begin_ != tls_end_) { - uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY); - uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY); - FastPoisonShadowPartialRightRedzone(tls_begin_aligned, - tls_end_ - tls_begin_aligned, - tls_end_aligned - tls_end_, 0); - } -} - -bool AsanThread::GetStackFrameAccessByAddr(uptr addr, - StackFrameAccess *access) { - if (stack_top_ == stack_bottom_) - return false; - - uptr bottom = 0; - if (AddrIsInStack(addr)) { - bottom = stack_bottom(); - } else if (has_fake_stack()) { - bottom = fake_stack()->AddrIsInFakeStack(addr); - CHECK(bottom); - access->offset = addr - bottom; - access->frame_pc = ((uptr*)bottom)[2]; - access->frame_descr = (const char *)((uptr*)bottom)[1]; - return true; - } - uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. - uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY); - u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); - u8 *shadow_bottom = (u8*)MemToShadow(bottom); - - while (shadow_ptr >= shadow_bottom && - *shadow_ptr != kAsanStackLeftRedzoneMagic) { - shadow_ptr--; - mem_ptr -= SHADOW_GRANULARITY; - } - - while (shadow_ptr >= shadow_bottom && - *shadow_ptr == kAsanStackLeftRedzoneMagic) { - shadow_ptr--; - mem_ptr -= SHADOW_GRANULARITY; - } - - if (shadow_ptr < shadow_bottom) { - return false; - } - - uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY); - CHECK(ptr[0] == kCurrentStackFrameMagic); - access->offset = addr - (uptr)ptr; - access->frame_pc = ptr[2]; - access->frame_descr = (const char*)ptr[1]; - return true; -} - -uptr AsanThread::GetStackVariableShadowStart(uptr addr) { - uptr bottom = 0; - if (AddrIsInStack(addr)) { - bottom = stack_bottom(); - } else if (has_fake_stack()) { - bottom = fake_stack()->AddrIsInFakeStack(addr); - CHECK(bottom); - } else - return 0; - - uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. - u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); - u8 *shadow_bottom = (u8*)MemToShadow(bottom); - - while (shadow_ptr >= shadow_bottom && - (*shadow_ptr != kAsanStackLeftRedzoneMagic && - *shadow_ptr != kAsanStackMidRedzoneMagic && - *shadow_ptr != kAsanStackRightRedzoneMagic)) - shadow_ptr--; - - return (uptr)shadow_ptr + 1; -} - -bool AsanThread::AddrIsInStack(uptr addr) { - const auto bounds = GetStackBounds(); - return addr >= bounds.bottom && addr < bounds.top; -} - -static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, - void *addr) { - AsanThreadContext *tctx = static_cast(tctx_base); - AsanThread *t = tctx->thread; - if (!t) return false; - if (t->AddrIsInStack((uptr)addr)) return true; - if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr)) - return true; - return false; -} - -AsanThread *GetCurrentThread() { - if (SANITIZER_RTEMS && !asan_inited) - return nullptr; - - AsanThreadContext *context = - reinterpret_cast(AsanTSDGet()); - if (!context) { - if (SANITIZER_ANDROID) { - // On Android, libc constructor is called _after_ asan_init, and cleans up - // TSD. Try to figure out if this is still the main thread by the stack - // address. We are not entirely sure that we have correct main thread - // limits, so only do this magic on Android, and only if the found thread - // is the main thread. - AsanThreadContext *tctx = GetThreadContextByTidLocked(0); - if (tctx && ThreadStackContainsAddress(tctx, &context)) { - SetCurrentThread(tctx->thread); - return tctx->thread; - } - } - return nullptr; - } - return context->thread; -} - -void SetCurrentThread(AsanThread *t) { - CHECK(t->context()); - VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), - (void *)GetThreadSelf()); - // Make sure we do not reset the current AsanThread. - CHECK_EQ(0, AsanTSDGet()); - AsanTSDSet(t->context()); - CHECK_EQ(t->context(), AsanTSDGet()); -} - -u32 GetCurrentTidOrInvalid() { - AsanThread *t = GetCurrentThread(); - return t ? t->tid() : kInvalidTid; -} - -AsanThread *FindThreadByStackAddress(uptr addr) { - asanThreadRegistry().CheckLocked(); - AsanThreadContext *tctx = static_cast( - asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, - (void *)addr)); - return tctx ? tctx->thread : nullptr; -} - -void EnsureMainThreadIDIsCorrect() { - AsanThreadContext *context = - reinterpret_cast(AsanTSDGet()); - if (context && (context->tid == 0)) - context->os_id = GetTid(); -} - -__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { - __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( - __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); - if (!context) return nullptr; - return context->thread; -} -} // namespace __asan - -// --- Implementation of LSan-specific functions --- {{{1 -namespace __lsan { -bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, - uptr *tls_begin, uptr *tls_end, uptr *cache_begin, - uptr *cache_end, DTLS **dtls) { - __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); - if (!t) return false; - *stack_begin = t->stack_bottom(); - *stack_end = t->stack_top(); - *tls_begin = t->tls_begin(); - *tls_end = t->tls_end(); - // ASan doesn't keep allocator caches in TLS, so these are unused. - *cache_begin = 0; - *cache_end = 0; - *dtls = t->dtls(); - return true; -} - -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) { - __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); - if (t && t->has_fake_stack()) - t->fake_stack()->ForEachFakeFrame(callback, arg); -} - -void LockThreadRegistry() { - __asan::asanThreadRegistry().Lock(); -} - -void UnlockThreadRegistry() { - __asan::asanThreadRegistry().Unlock(); -} - -ThreadRegistry *GetThreadRegistryLocked() { - __asan::asanThreadRegistry().CheckLocked(); - return &__asan::asanThreadRegistry(); -} - -void EnsureMainThreadIDIsCorrect() { - __asan::EnsureMainThreadIDIsCorrect(); -} -} // namespace __lsan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, - uptr size) { - AsanThread *t = GetCurrentThread(); - if (!t) { - VReport(1, "__asan_start_switch_fiber called from unknown thread\n"); - return; - } - t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_finish_switch_fiber(void* fakestack, - const void **bottom_old, - uptr *size_old) { - AsanThread *t = GetCurrentThread(); - if (!t) { - VReport(1, "__asan_finish_switch_fiber called from unknown thread\n"); - return; - } - t->FinishSwitchFiber((FakeStack*)fakestack, - (uptr*)bottom_old, - (uptr*)size_old); -} -} diff --git a/lib/asan/asan_thread.cpp b/lib/asan/asan_thread.cpp new file mode 100644 index 000000000000..6734d9a1668c --- /dev/null +++ b/lib/asan/asan_thread.cpp @@ -0,0 +1,536 @@ +//===-- asan_thread.cpp ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Thread-related code. +//===----------------------------------------------------------------------===// +#include "asan_allocator.h" +#include "asan_interceptors.h" +#include "asan_poisoning.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "asan_mapping.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// AsanThreadContext implementation. + +void AsanThreadContext::OnCreated(void *arg) { + CreateThreadContextArgs *args = static_cast(arg); + if (args->stack) + stack_id = StackDepotPut(*args->stack); + thread = args->thread; + thread->set_context(this); +} + +void AsanThreadContext::OnFinished() { + // Drop the link to the AsanThread object. + thread = nullptr; +} + +// MIPS requires aligned address +static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)]; +static ThreadRegistry *asan_thread_registry; + +static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED); +static LowLevelAllocator allocator_for_thread_context; + +static ThreadContextBase *GetAsanThreadContext(u32 tid) { + BlockingMutexLock lock(&mu_for_thread_context); + return new(allocator_for_thread_context) AsanThreadContext(tid); +} + +ThreadRegistry &asanThreadRegistry() { + static bool initialized; + // Don't worry about thread_safety - this should be called when there is + // a single thread. + if (!initialized) { + // Never reuse ASan threads: we store pointer to AsanThreadContext + // in TSD and can't reliably tell when no more TSD destructors will + // be called. It would be wrong to reuse AsanThreadContext for another + // thread before all TSD destructors will be called for it. + asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( + GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); + initialized = true; + } + return *asan_thread_registry; +} + +AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { + return static_cast( + asanThreadRegistry().GetThreadLocked(tid)); +} + +// AsanThread implementation. + +AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, + bool detached) { + uptr PageSize = GetPageSizeCached(); + uptr size = RoundUpTo(sizeof(AsanThread), PageSize); + AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); + thread->start_routine_ = start_routine; + thread->arg_ = arg; + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + asanThreadRegistry().CreateThread(*reinterpret_cast(thread), detached, + parent_tid, &args); + + return thread; +} + +void AsanThread::TSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + VReport(1, "T%d TSDDtor\n", context->tid); + if (context->thread) + context->thread->Destroy(); +} + +void AsanThread::Destroy() { + int tid = this->tid(); + VReport(1, "T%d exited\n", tid); + + malloc_storage().CommitBack(); + if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); + asanThreadRegistry().FinishThread(tid); + FlushToDeadThreadStats(&stats_); + // We also clear the shadow on thread destruction because + // some code may still be executing in later TSD destructors + // and we don't want it to have any poisoned stack. + ClearShadowForThreadStackAndTLS(); + DeleteFakeStack(tid); + uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); + UnmapOrDie(this, size); + DTLS_Destroy(); +} + +void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, + uptr size) { + if (atomic_load(&stack_switching_, memory_order_relaxed)) { + Report("ERROR: starting fiber switch while in fiber switch\n"); + Die(); + } + + next_stack_bottom_ = bottom; + next_stack_top_ = bottom + size; + atomic_store(&stack_switching_, 1, memory_order_release); + + FakeStack *current_fake_stack = fake_stack_; + if (fake_stack_save) + *fake_stack_save = fake_stack_; + fake_stack_ = nullptr; + SetTLSFakeStack(nullptr); + // if fake_stack_save is null, the fiber will die, delete the fakestack + if (!fake_stack_save && current_fake_stack) + current_fake_stack->Destroy(this->tid()); +} + +void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, + uptr *bottom_old, + uptr *size_old) { + if (!atomic_load(&stack_switching_, memory_order_relaxed)) { + Report("ERROR: finishing a fiber switch that has not started\n"); + Die(); + } + + if (fake_stack_save) { + SetTLSFakeStack(fake_stack_save); + fake_stack_ = fake_stack_save; + } + + if (bottom_old) + *bottom_old = stack_bottom_; + if (size_old) + *size_old = stack_top_ - stack_bottom_; + stack_bottom_ = next_stack_bottom_; + stack_top_ = next_stack_top_; + atomic_store(&stack_switching_, 0, memory_order_release); + next_stack_top_ = 0; + next_stack_bottom_ = 0; +} + +inline AsanThread::StackBounds AsanThread::GetStackBounds() const { + if (!atomic_load(&stack_switching_, memory_order_acquire)) { + // Make sure the stack bounds are fully initialized. + if (stack_bottom_ >= stack_top_) return {0, 0}; + return {stack_bottom_, stack_top_}; + } + char local; + const uptr cur_stack = (uptr)&local; + // Note: need to check next stack first, because FinishSwitchFiber + // may be in process of overwriting stack_top_/bottom_. But in such case + // we are already on the next stack. + if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) + return {next_stack_bottom_, next_stack_top_}; + return {stack_bottom_, stack_top_}; +} + +uptr AsanThread::stack_top() { + return GetStackBounds().top; +} + +uptr AsanThread::stack_bottom() { + return GetStackBounds().bottom; +} + +uptr AsanThread::stack_size() { + const auto bounds = GetStackBounds(); + return bounds.top - bounds.bottom; +} + +// We want to create the FakeStack lazyly on the first use, but not eralier +// than the stack size is known and the procedure has to be async-signal safe. +FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { + uptr stack_size = this->stack_size(); + if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. + return nullptr; + uptr old_val = 0; + // fake_stack_ has 3 states: + // 0 -- not initialized + // 1 -- being initialized + // ptr -- initialized + // This CAS checks if the state was 0 and if so changes it to state 1, + // if that was successful, it initializes the pointer. + if (atomic_compare_exchange_strong( + reinterpret_cast(&fake_stack_), &old_val, 1UL, + memory_order_relaxed)) { + uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size)); + CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); + stack_size_log = + Min(stack_size_log, static_cast(flags()->max_uar_stack_size_log)); + stack_size_log = + Max(stack_size_log, static_cast(flags()->min_uar_stack_size_log)); + fake_stack_ = FakeStack::Create(stack_size_log); + SetTLSFakeStack(fake_stack_); + return fake_stack_; + } + return nullptr; +} + +void AsanThread::Init(const InitOptions *options) { + next_stack_top_ = next_stack_bottom_ = 0; + atomic_store(&stack_switching_, false, memory_order_release); + CHECK_EQ(this->stack_size(), 0U); + SetThreadStackAndTls(options); + if (stack_top_ != stack_bottom_) { + CHECK_GT(this->stack_size(), 0U); + CHECK(AddrIsInMem(stack_bottom_)); + CHECK(AddrIsInMem(stack_top_ - 1)); + } + ClearShadowForThreadStackAndTLS(); + fake_stack_ = nullptr; + if (__asan_option_detect_stack_use_after_return) + AsyncSignalSafeLazyInitFakeStack(); + int local = 0; + VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), + (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, + &local); +} + +// Fuchsia and RTEMS don't use ThreadStart. +// asan_fuchsia.c/asan_rtems.c define CreateMainThread and +// SetThreadStackAndTls. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +thread_return_t AsanThread::ThreadStart( + tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { + Init(); + asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr); + if (signal_thread_is_registered) + atomic_store(signal_thread_is_registered, 1, memory_order_release); + + if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); + + if (!start_routine_) { + // start_routine_ == 0 if we're on the main thread or on one of the + // OS X libdispatch worker threads. But nobody is supposed to call + // ThreadStart() for the worker threads. + CHECK_EQ(tid(), 0); + return 0; + } + + thread_return_t res = start_routine_(arg_); + + // On POSIX systems we defer this to the TSD destructor. LSan will consider + // the thread's memory as non-live from the moment we call Destroy(), even + // though that memory might contain pointers to heap objects which will be + // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before + // the TSD destructors have run might cause false positives in LSan. + if (!SANITIZER_POSIX) + this->Destroy(); + + return res; +} + +AsanThread *CreateMainThread() { + AsanThread *main_thread = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, + /* stack */ nullptr, /* detached */ true); + SetCurrentThread(main_thread); + main_thread->ThreadStart(internal_getpid(), + /* signal_thread_is_registered */ nullptr); + return main_thread; +} + +// This implementation doesn't use the argument, which is just passed down +// from the caller of Init (which see, above). It's only there to support +// OS-specific implementations that need more information passed through. +void AsanThread::SetThreadStackAndTls(const InitOptions *options) { + DCHECK_EQ(options, nullptr); + uptr tls_size = 0; + uptr stack_size = 0; + GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_, + &tls_size); + stack_top_ = stack_bottom_ + stack_size; + tls_end_ = tls_begin_ + tls_size; + dtls_ = DTLS_Get(); + + if (stack_top_ != stack_bottom_) { + int local; + CHECK(AddrIsInStack((uptr)&local)); + } +} + +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS + +void AsanThread::ClearShadowForThreadStackAndTLS() { + if (stack_top_ != stack_bottom_) + PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); + if (tls_begin_ != tls_end_) { + uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY); + uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone(tls_begin_aligned, + tls_end_ - tls_begin_aligned, + tls_end_aligned - tls_end_, 0); + } +} + +bool AsanThread::GetStackFrameAccessByAddr(uptr addr, + StackFrameAccess *access) { + if (stack_top_ == stack_bottom_) + return false; + + uptr bottom = 0; + if (AddrIsInStack(addr)) { + bottom = stack_bottom(); + } else if (has_fake_stack()) { + bottom = fake_stack()->AddrIsInFakeStack(addr); + CHECK(bottom); + access->offset = addr - bottom; + access->frame_pc = ((uptr*)bottom)[2]; + access->frame_descr = (const char *)((uptr*)bottom)[1]; + return true; + } + uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. + uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY); + u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); + u8 *shadow_bottom = (u8*)MemToShadow(bottom); + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr != kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + mem_ptr -= SHADOW_GRANULARITY; + } + + while (shadow_ptr >= shadow_bottom && + *shadow_ptr == kAsanStackLeftRedzoneMagic) { + shadow_ptr--; + mem_ptr -= SHADOW_GRANULARITY; + } + + if (shadow_ptr < shadow_bottom) { + return false; + } + + uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY); + CHECK(ptr[0] == kCurrentStackFrameMagic); + access->offset = addr - (uptr)ptr; + access->frame_pc = ptr[2]; + access->frame_descr = (const char*)ptr[1]; + return true; +} + +uptr AsanThread::GetStackVariableShadowStart(uptr addr) { + uptr bottom = 0; + if (AddrIsInStack(addr)) { + bottom = stack_bottom(); + } else if (has_fake_stack()) { + bottom = fake_stack()->AddrIsInFakeStack(addr); + CHECK(bottom); + } else { + return 0; + } + + uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr. + u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr); + u8 *shadow_bottom = (u8*)MemToShadow(bottom); + + while (shadow_ptr >= shadow_bottom && + (*shadow_ptr != kAsanStackLeftRedzoneMagic && + *shadow_ptr != kAsanStackMidRedzoneMagic && + *shadow_ptr != kAsanStackRightRedzoneMagic)) + shadow_ptr--; + + return (uptr)shadow_ptr + 1; +} + +bool AsanThread::AddrIsInStack(uptr addr) { + const auto bounds = GetStackBounds(); + return addr >= bounds.bottom && addr < bounds.top; +} + +static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, + void *addr) { + AsanThreadContext *tctx = static_cast(tctx_base); + AsanThread *t = tctx->thread; + if (!t) return false; + if (t->AddrIsInStack((uptr)addr)) return true; + if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr)) + return true; + return false; +} + +AsanThread *GetCurrentThread() { + if (SANITIZER_RTEMS && !asan_inited) + return nullptr; + + AsanThreadContext *context = + reinterpret_cast(AsanTSDGet()); + if (!context) { + if (SANITIZER_ANDROID) { + // On Android, libc constructor is called _after_ asan_init, and cleans up + // TSD. Try to figure out if this is still the main thread by the stack + // address. We are not entirely sure that we have correct main thread + // limits, so only do this magic on Android, and only if the found thread + // is the main thread. + AsanThreadContext *tctx = GetThreadContextByTidLocked(0); + if (tctx && ThreadStackContainsAddress(tctx, &context)) { + SetCurrentThread(tctx->thread); + return tctx->thread; + } + } + return nullptr; + } + return context->thread; +} + +void SetCurrentThread(AsanThread *t) { + CHECK(t->context()); + VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(), + (void *)GetThreadSelf()); + // Make sure we do not reset the current AsanThread. + CHECK_EQ(0, AsanTSDGet()); + AsanTSDSet(t->context()); + CHECK_EQ(t->context(), AsanTSDGet()); +} + +u32 GetCurrentTidOrInvalid() { + AsanThread *t = GetCurrentThread(); + return t ? t->tid() : kInvalidTid; +} + +AsanThread *FindThreadByStackAddress(uptr addr) { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *tctx = static_cast( + asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, + (void *)addr)); + return tctx ? tctx->thread : nullptr; +} + +void EnsureMainThreadIDIsCorrect() { + AsanThreadContext *context = + reinterpret_cast(AsanTSDGet()); + if (context && (context->tid == 0)) + context->os_id = GetTid(); +} + +__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { + __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( + __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); + if (!context) return nullptr; + return context->thread; +} +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, uptr *cache_begin, + uptr *cache_end, DTLS **dtls) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (!t) return false; + *stack_begin = t->stack_bottom(); + *stack_end = t->stack_top(); + *tls_begin = t->tls_begin(); + *tls_end = t->tls_end(); + // ASan doesn't keep allocator caches in TLS, so these are unused. + *cache_begin = 0; + *cache_end = 0; + *dtls = t->dtls(); + return true; +} + +void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, + void *arg) { + __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); + if (t && t->has_fake_stack()) + t->fake_stack()->ForEachFakeFrame(callback, arg); +} + +void LockThreadRegistry() { + __asan::asanThreadRegistry().Lock(); +} + +void UnlockThreadRegistry() { + __asan::asanThreadRegistry().Unlock(); +} + +ThreadRegistry *GetThreadRegistryLocked() { + __asan::asanThreadRegistry().CheckLocked(); + return &__asan::asanThreadRegistry(); +} + +void EnsureMainThreadIDIsCorrect() { + __asan::EnsureMainThreadIDIsCorrect(); +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, + uptr size) { + AsanThread *t = GetCurrentThread(); + if (!t) { + VReport(1, "__asan_start_switch_fiber called from unknown thread\n"); + return; + } + t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_finish_switch_fiber(void* fakestack, + const void **bottom_old, + uptr *size_old) { + AsanThread *t = GetCurrentThread(); + if (!t) { + VReport(1, "__asan_finish_switch_fiber called from unknown thread\n"); + return; + } + t->FinishSwitchFiber((FakeStack*)fakestack, + (uptr*)bottom_old, + (uptr*)size_old); +} +} diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h index d725e88864eb..c503f507059d 100644 --- a/lib/asan/asan_thread.h +++ b/lib/asan/asan_thread.h @@ -8,7 +8,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_thread.cc. +// ASan-private header for asan_thread.cpp. //===----------------------------------------------------------------------===// #ifndef ASAN_THREAD_H diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc deleted file mode 100644 index f7601f3301ea..000000000000 --- a/lib/asan/asan_win.cc +++ /dev/null @@ -1,401 +0,0 @@ -//===-- asan_win.cc -------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Windows-specific details. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_WINDOWS -#define WIN32_LEAN_AND_MEAN -#include - -#include - -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_win.h" -#include "sanitizer_common/sanitizer_win_defs.h" - -using namespace __asan; // NOLINT - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -int __asan_should_detect_stack_use_after_return() { - __asan_init(); - return __asan_option_detect_stack_use_after_return; -} - -SANITIZER_INTERFACE_ATTRIBUTE -uptr __asan_get_shadow_memory_dynamic_address() { - __asan_init(); - return __asan_shadow_memory_dynamic_address; -} -} // extern "C" - -// ---------------------- Windows-specific interceptors ---------------- {{{ -static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; -static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler; - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) { - EXCEPTION_RECORD *exception_record = info->ExceptionRecord; - CONTEXT *context = info->ContextRecord; - - // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. - - SignalContext sig(exception_record, context); - ReportDeadlySignal(sig); - UNREACHABLE("returned from reporting deadly signal"); -} - -// Wrapper SEH Handler. If the exception should be handled by asan, we call -// __asan_unhandled_exception_filter, otherwise, we execute the user provided -// exception handler or the default. -static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { - DWORD exception_code = info->ExceptionRecord->ExceptionCode; - if (__sanitizer::IsHandledDeadlyException(exception_code)) - return __asan_unhandled_exception_filter(info); - if (user_seh_handler) - return user_seh_handler(info); - // Bubble out to the default exception filter. - if (default_seh_handler) - return default_seh_handler(info); - return EXCEPTION_CONTINUE_SEARCH; -} - -INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter, - LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) { - CHECK(REAL(SetUnhandledExceptionFilter)); - if (ExceptionFilter == &SEHHandler) - return REAL(SetUnhandledExceptionFilter)(ExceptionFilter); - // We record the user provided exception handler to be called for all the - // exceptions unhandled by asan. - Swap(ExceptionFilter, user_seh_handler); - return ExceptionFilter; -} - -INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) { - CHECK(REAL(RtlRaiseException)); - // This is a noreturn function, unless it's one of the exceptions raised to - // communicate with the debugger, such as the one from OutputDebugString. - if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C) - __asan_handle_no_return(); - REAL(RtlRaiseException)(ExceptionRecord); -} - -INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { - CHECK(REAL(RaiseException)); - __asan_handle_no_return(); - REAL(RaiseException)(a, b, c, d); -} - -#ifdef _WIN64 - -INTERCEPTOR_WINAPI(EXCEPTION_DISPOSITION, __C_specific_handler, - _EXCEPTION_RECORD *a, void *b, _CONTEXT *c, - _DISPATCHER_CONTEXT *d) { // NOLINT - CHECK(REAL(__C_specific_handler)); - __asan_handle_no_return(); - return REAL(__C_specific_handler)(a, b, c, d); -} - -#else - -INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler3)); - __asan_handle_no_return(); - return REAL(_except_handler3)(a, b, c, d); -} - -#if ASAN_DYNAMIC -// This handler is named differently in -MT and -MD CRTs. -#define _except_handler4 _except_handler4_common -#endif -INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler4)); - __asan_handle_no_return(); - return REAL(_except_handler4)(a, b, c, d); -} -#endif - -static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { - AsanThread *t = (AsanThread *)arg; - SetCurrentThread(t); - return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); -} - -INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security, - SIZE_T stack_size, LPTHREAD_START_ROUTINE start_routine, - void *arg, DWORD thr_flags, DWORD *tid) { - // Strict init-order checking is thread-hostile. - if (flags()->strict_init_order) - StopInitOrderChecking(); - GET_STACK_TRACE_THREAD; - // FIXME: The CreateThread interceptor is not the same as a pthread_create - // one. This is a bandaid fix for PR22025. - bool detached = false; // FIXME: how can we determine it on Windows? - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = - AsanThread::Create(start_routine, arg, current_tid, &stack, detached); - return REAL(CreateThread)(security, stack_size, asan_thread_start, t, - thr_flags, tid); -} - -// }}} - -namespace __asan { - -void InitializePlatformInterceptors() { - // The interceptors were not designed to be removable, so we have to keep this - // module alive for the life of the process. - HMODULE pinned; - CHECK(GetModuleHandleExW( - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_PIN, - (LPCWSTR)&InitializePlatformInterceptors, &pinned)); - - ASAN_INTERCEPT_FUNC(CreateThread); - ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); - -#ifdef _WIN64 - ASAN_INTERCEPT_FUNC(__C_specific_handler); -#else - ASAN_INTERCEPT_FUNC(_except_handler3); - ASAN_INTERCEPT_FUNC(_except_handler4); -#endif - - // Try to intercept kernel32!RaiseException, and if that fails, intercept - // ntdll!RtlRaiseException instead. - if (!::__interception::OverrideFunction("RaiseException", - (uptr)WRAP(RaiseException), - (uptr *)&REAL(RaiseException))) { - CHECK(::__interception::OverrideFunction("RtlRaiseException", - (uptr)WRAP(RtlRaiseException), - (uptr *)&REAL(RtlRaiseException))); - } -} - -void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { - UNIMPLEMENTED(); -} - -// ---------------------- TSD ---------------- {{{ -static bool tsd_key_inited = false; - -static __declspec(thread) void *fake_tsd = 0; - -// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/ns-winternl-_teb -// "[This structure may be altered in future versions of Windows. Applications -// should use the alternate functions listed in this topic.]" -typedef struct _TEB { - PVOID Reserved1[12]; - // PVOID ThreadLocalStoragePointer; is here, at the last field in Reserved1. - PVOID ProcessEnvironmentBlock; - PVOID Reserved2[399]; - BYTE Reserved3[1952]; - PVOID TlsSlots[64]; - BYTE Reserved4[8]; - PVOID Reserved5[26]; - PVOID ReservedForOle; - PVOID Reserved6[4]; - PVOID TlsExpansionSlots; -} TEB, *PTEB; - -constexpr size_t TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET = 11; -BOOL IsTlsInitialized() { - PTEB teb = (PTEB)NtCurrentTeb(); - return teb->Reserved1[TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET] != - nullptr; -} - -void AsanTSDInit(void (*destructor)(void *tsd)) { - // FIXME: we're ignoring the destructor for now. - tsd_key_inited = true; -} - -void *AsanTSDGet() { - CHECK(tsd_key_inited); - return IsTlsInitialized() ? fake_tsd : nullptr; -} - -void AsanTSDSet(void *tsd) { - CHECK(tsd_key_inited); - fake_tsd = tsd; -} - -void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } -// }}} - -// ---------------------- Various stuff ---------------- {{{ -void *AsanDoesNotSupportStaticLinkage() { -#if defined(_DEBUG) -#error Please build the runtime with a non-debug CRT: /MD or /MT -#endif - return 0; -} - -uptr FindDynamicShadowStart() { - uptr granularity = GetMmapGranularity(); - uptr alignment = 8 * granularity; - uptr left_padding = granularity; - uptr space_size = kHighShadowEnd + left_padding; - uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, - granularity, nullptr, nullptr); - CHECK_NE((uptr)0, shadow_start); - CHECK(IsAligned(shadow_start, alignment)); - return shadow_start; -} - -void AsanCheckDynamicRTPrereqs() {} - -void AsanCheckIncompatibleRT() {} - -void ReadContextStack(void *context, uptr *stack, uptr *ssize) { - UNIMPLEMENTED(); -} - -void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } - -#if SANITIZER_WINDOWS64 -// Exception handler for dealing with shadow memory. -static LONG CALLBACK -ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { - uptr page_size = GetPageSizeCached(); - // Only handle access violations. - if (exception_pointers->ExceptionRecord->ExceptionCode != - EXCEPTION_ACCESS_VIOLATION || - exception_pointers->ExceptionRecord->NumberParameters < 2) { - __asan_handle_no_return(); - return EXCEPTION_CONTINUE_SEARCH; - } - - // Only handle access violations that land within the shadow memory. - uptr addr = - (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]); - - // Check valid shadow range. - if (!AddrIsInShadow(addr)) { - __asan_handle_no_return(); - return EXCEPTION_CONTINUE_SEARCH; - } - - // This is an access violation while trying to read from the shadow. Commit - // the relevant page and let execution continue. - - // Determine the address of the page that is being accessed. - uptr page = RoundDownTo(addr, page_size); - - // Commit the page. - uptr result = - (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); - if (result != page) - return EXCEPTION_CONTINUE_SEARCH; - - // The page mapping succeeded, so continue execution as usual. - return EXCEPTION_CONTINUE_EXECUTION; -} - -#endif - -void InitializePlatformExceptionHandlers() { -#if SANITIZER_WINDOWS64 - // On Win64, we map memory on demand with access violation handler. - // Install our exception handler. - CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler)); -#endif -} - -bool IsSystemHeapAddress(uptr addr) { - return ::HeapValidate(GetProcessHeap(), 0, (void *)addr) != FALSE; -} - -// We want to install our own exception handler (EH) to print helpful reports -// on access violations and whatnot. Unfortunately, the CRT initializers assume -// they are run before any user code and drop any previously-installed EHs on -// the floor, so we can't install our handler inside __asan_init. -// (See crt0dat.c in the CRT sources for the details) -// -// Things get even more complicated with the dynamic runtime, as it finishes its -// initialization before the .exe module CRT begins to initialize. -// -// For the static runtime (-MT), it's enough to put a callback to -// __asan_set_seh_filter in the last section for C initializers. -// -// For the dynamic runtime (-MD), we want link the same -// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter -// will be called for each instrumented module. This ensures that at least one -// __asan_set_seh_filter call happens after the .exe module CRT is initialized. -extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __asan_set_seh_filter() { - // We should only store the previous handler if it's not our own handler in - // order to avoid loops in the EH chain. - auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler); - if (prev_seh_handler != &SEHHandler) - default_seh_handler = prev_seh_handler; - return 0; -} - -bool HandleDlopenInit() { - // Not supported on this platform. - static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, - "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); - return false; -} - -#if !ASAN_DYNAMIC -// The CRT runs initializers in this order: -// - C initializers, from XIA to XIZ -// - C++ initializers, from XCA to XCZ -// Prior to 2015, the CRT set the unhandled exception filter at priority XIY, -// near the end of C initialization. Starting in 2015, it was moved to the -// beginning of C++ initialization. We set our priority to XCAB to run -// immediately after the CRT runs. This way, our exception filter is called -// first and we can delegate to their filter if appropriate. -#pragma section(".CRT$XCAB", long, read) // NOLINT -__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() = - __asan_set_seh_filter; - -// Piggyback on the TLS initialization callback directory to initialize asan as -// early as possible. Initializers in .CRT$XL* are called directly by ntdll, -// which run before the CRT. Users also add code to .CRT$XLC, so it's important -// to run our initializers first. -static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) { - if (reason == DLL_PROCESS_ATTACH) - __asan_init(); -} - -#pragma section(".CRT$XLAB", long, read) // NOLINT -__declspec(allocate(".CRT$XLAB")) void(NTAPI *__asan_tls_init)( - void *, unsigned long, void *) = asan_thread_init; -#endif - -static void NTAPI asan_thread_exit(void *module, DWORD reason, void *reserved) { - if (reason == DLL_THREAD_DETACH) { - // Unpoison the thread's stack because the memory may be re-used. - NT_TIB *tib = (NT_TIB *)NtCurrentTeb(); - uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit; - __asan_unpoison_memory_region(tib->StackLimit, stackSize); - } -} - -#pragma section(".CRT$XLY", long, read) // NOLINT -__declspec(allocate(".CRT$XLY")) void(NTAPI *__asan_tls_exit)( - void *, unsigned long, void *) = asan_thread_exit; - -WIN_FORCE_LINK(__asan_dso_reg_hook) - -// }}} -} // namespace __asan - -#endif // SANITIZER_WINDOWS diff --git a/lib/asan/asan_win.cpp b/lib/asan/asan_win.cpp new file mode 100644 index 000000000000..417892aaedd8 --- /dev/null +++ b/lib/asan/asan_win.cpp @@ -0,0 +1,401 @@ +//===-- asan_win.cpp ------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Windows-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS +#define WIN32_LEAN_AND_MEAN +#include + +#include + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_win.h" +#include "sanitizer_common/sanitizer_win_defs.h" + +using namespace __asan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +int __asan_should_detect_stack_use_after_return() { + __asan_init(); + return __asan_option_detect_stack_use_after_return; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __asan_get_shadow_memory_dynamic_address() { + __asan_init(); + return __asan_shadow_memory_dynamic_address; +} +} // extern "C" + +// ---------------------- Windows-specific interceptors ---------------- {{{ +static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; +static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler; + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) { + EXCEPTION_RECORD *exception_record = info->ExceptionRecord; + CONTEXT *context = info->ContextRecord; + + // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. + + SignalContext sig(exception_record, context); + ReportDeadlySignal(sig); + UNREACHABLE("returned from reporting deadly signal"); +} + +// Wrapper SEH Handler. If the exception should be handled by asan, we call +// __asan_unhandled_exception_filter, otherwise, we execute the user provided +// exception handler or the default. +static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { + DWORD exception_code = info->ExceptionRecord->ExceptionCode; + if (__sanitizer::IsHandledDeadlyException(exception_code)) + return __asan_unhandled_exception_filter(info); + if (user_seh_handler) + return user_seh_handler(info); + // Bubble out to the default exception filter. + if (default_seh_handler) + return default_seh_handler(info); + return EXCEPTION_CONTINUE_SEARCH; +} + +INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter, + LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) { + CHECK(REAL(SetUnhandledExceptionFilter)); + if (ExceptionFilter == &SEHHandler) + return REAL(SetUnhandledExceptionFilter)(ExceptionFilter); + // We record the user provided exception handler to be called for all the + // exceptions unhandled by asan. + Swap(ExceptionFilter, user_seh_handler); + return ExceptionFilter; +} + +INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) { + CHECK(REAL(RtlRaiseException)); + // This is a noreturn function, unless it's one of the exceptions raised to + // communicate with the debugger, such as the one from OutputDebugString. + if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C) + __asan_handle_no_return(); + REAL(RtlRaiseException)(ExceptionRecord); +} + +INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { + CHECK(REAL(RaiseException)); + __asan_handle_no_return(); + REAL(RaiseException)(a, b, c, d); +} + +#ifdef _WIN64 + +INTERCEPTOR_WINAPI(EXCEPTION_DISPOSITION, __C_specific_handler, + _EXCEPTION_RECORD *a, void *b, _CONTEXT *c, + _DISPATCHER_CONTEXT *d) { + CHECK(REAL(__C_specific_handler)); + __asan_handle_no_return(); + return REAL(__C_specific_handler)(a, b, c, d); +} + +#else + +INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler3)); + __asan_handle_no_return(); + return REAL(_except_handler3)(a, b, c, d); +} + +#if ASAN_DYNAMIC +// This handler is named differently in -MT and -MD CRTs. +#define _except_handler4 _except_handler4_common +#endif +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler4)); + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} +#endif + +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + AsanThread *t = (AsanThread *)arg; + SetCurrentThread(t); + return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); +} + +INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security, + SIZE_T stack_size, LPTHREAD_START_ROUTINE start_routine, + void *arg, DWORD thr_flags, DWORD *tid) { + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + // FIXME: The CreateThread interceptor is not the same as a pthread_create + // one. This is a bandaid fix for PR22025. + bool detached = false; // FIXME: how can we determine it on Windows? + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + return REAL(CreateThread)(security, stack_size, asan_thread_start, t, + thr_flags, tid); +} + +// }}} + +namespace __asan { + +void InitializePlatformInterceptors() { + // The interceptors were not designed to be removable, so we have to keep this + // module alive for the life of the process. + HMODULE pinned; + CHECK(GetModuleHandleExW( + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_PIN, + (LPCWSTR)&InitializePlatformInterceptors, &pinned)); + + ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter); + +#ifdef _WIN64 + ASAN_INTERCEPT_FUNC(__C_specific_handler); +#else + ASAN_INTERCEPT_FUNC(_except_handler3); + ASAN_INTERCEPT_FUNC(_except_handler4); +#endif + + // Try to intercept kernel32!RaiseException, and if that fails, intercept + // ntdll!RtlRaiseException instead. + if (!::__interception::OverrideFunction("RaiseException", + (uptr)WRAP(RaiseException), + (uptr *)&REAL(RaiseException))) { + CHECK(::__interception::OverrideFunction("RtlRaiseException", + (uptr)WRAP(RtlRaiseException), + (uptr *)&REAL(RtlRaiseException))); + } +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +// ---------------------- TSD ---------------- {{{ +static bool tsd_key_inited = false; + +static __declspec(thread) void *fake_tsd = 0; + +// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/ns-winternl-_teb +// "[This structure may be altered in future versions of Windows. Applications +// should use the alternate functions listed in this topic.]" +typedef struct _TEB { + PVOID Reserved1[12]; + // PVOID ThreadLocalStoragePointer; is here, at the last field in Reserved1. + PVOID ProcessEnvironmentBlock; + PVOID Reserved2[399]; + BYTE Reserved3[1952]; + PVOID TlsSlots[64]; + BYTE Reserved4[8]; + PVOID Reserved5[26]; + PVOID ReservedForOle; + PVOID Reserved6[4]; + PVOID TlsExpansionSlots; +} TEB, *PTEB; + +constexpr size_t TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET = 11; +BOOL IsTlsInitialized() { + PTEB teb = (PTEB)NtCurrentTeb(); + return teb->Reserved1[TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET] != + nullptr; +} + +void AsanTSDInit(void (*destructor)(void *tsd)) { + // FIXME: we're ignoring the destructor for now. + tsd_key_inited = true; +} + +void *AsanTSDGet() { + CHECK(tsd_key_inited); + return IsTlsInitialized() ? fake_tsd : nullptr; +} + +void AsanTSDSet(void *tsd) { + CHECK(tsd_key_inited); + fake_tsd = tsd; +} + +void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } +// }}} + +// ---------------------- Various stuff ---------------- {{{ +void *AsanDoesNotSupportStaticLinkage() { +#if defined(_DEBUG) +#error Please build the runtime with a non-debug CRT: /MD or /MT +#endif + return 0; +} + +uptr FindDynamicShadowStart() { + uptr granularity = GetMmapGranularity(); + uptr alignment = 8 * granularity; + uptr left_padding = granularity; + uptr space_size = kHighShadowEnd + left_padding; + uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, + granularity, nullptr, nullptr); + CHECK_NE((uptr)0, shadow_start); + CHECK(IsAligned(shadow_start, alignment)); + return shadow_start; +} + +void AsanCheckDynamicRTPrereqs() {} + +void AsanCheckIncompatibleRT() {} + +void ReadContextStack(void *context, uptr *stack, uptr *ssize) { + UNIMPLEMENTED(); +} + +void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } + +#if SANITIZER_WINDOWS64 +// Exception handler for dealing with shadow memory. +static LONG CALLBACK +ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { + uptr page_size = GetPageSizeCached(); + // Only handle access violations. + if (exception_pointers->ExceptionRecord->ExceptionCode != + EXCEPTION_ACCESS_VIOLATION || + exception_pointers->ExceptionRecord->NumberParameters < 2) { + __asan_handle_no_return(); + return EXCEPTION_CONTINUE_SEARCH; + } + + // Only handle access violations that land within the shadow memory. + uptr addr = + (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]); + + // Check valid shadow range. + if (!AddrIsInShadow(addr)) { + __asan_handle_no_return(); + return EXCEPTION_CONTINUE_SEARCH; + } + + // This is an access violation while trying to read from the shadow. Commit + // the relevant page and let execution continue. + + // Determine the address of the page that is being accessed. + uptr page = RoundDownTo(addr, page_size); + + // Commit the page. + uptr result = + (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); + if (result != page) + return EXCEPTION_CONTINUE_SEARCH; + + // The page mapping succeeded, so continue execution as usual. + return EXCEPTION_CONTINUE_EXECUTION; +} + +#endif + +void InitializePlatformExceptionHandlers() { +#if SANITIZER_WINDOWS64 + // On Win64, we map memory on demand with access violation handler. + // Install our exception handler. + CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler)); +#endif +} + +bool IsSystemHeapAddress(uptr addr) { + return ::HeapValidate(GetProcessHeap(), 0, (void *)addr) != FALSE; +} + +// We want to install our own exception handler (EH) to print helpful reports +// on access violations and whatnot. Unfortunately, the CRT initializers assume +// they are run before any user code and drop any previously-installed EHs on +// the floor, so we can't install our handler inside __asan_init. +// (See crt0dat.c in the CRT sources for the details) +// +// Things get even more complicated with the dynamic runtime, as it finishes its +// initialization before the .exe module CRT begins to initialize. +// +// For the static runtime (-MT), it's enough to put a callback to +// __asan_set_seh_filter in the last section for C initializers. +// +// For the dynamic runtime (-MD), we want link the same +// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter +// will be called for each instrumented module. This ensures that at least one +// __asan_set_seh_filter call happens after the .exe module CRT is initialized. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __asan_set_seh_filter() { + // We should only store the previous handler if it's not our own handler in + // order to avoid loops in the EH chain. + auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler); + if (prev_seh_handler != &SEHHandler) + default_seh_handler = prev_seh_handler; + return 0; +} + +bool HandleDlopenInit() { + // Not supported on this platform. + static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN, + "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false"); + return false; +} + +#if !ASAN_DYNAMIC +// The CRT runs initializers in this order: +// - C initializers, from XIA to XIZ +// - C++ initializers, from XCA to XCZ +// Prior to 2015, the CRT set the unhandled exception filter at priority XIY, +// near the end of C initialization. Starting in 2015, it was moved to the +// beginning of C++ initialization. We set our priority to XCAB to run +// immediately after the CRT runs. This way, our exception filter is called +// first and we can delegate to their filter if appropriate. +#pragma section(".CRT$XCAB", long, read) +__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() = + __asan_set_seh_filter; + +// Piggyback on the TLS initialization callback directory to initialize asan as +// early as possible. Initializers in .CRT$XL* are called directly by ntdll, +// which run before the CRT. Users also add code to .CRT$XLC, so it's important +// to run our initializers first. +static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) { + if (reason == DLL_PROCESS_ATTACH) + __asan_init(); +} + +#pragma section(".CRT$XLAB", long, read) +__declspec(allocate(".CRT$XLAB")) void(NTAPI *__asan_tls_init)( + void *, unsigned long, void *) = asan_thread_init; +#endif + +static void NTAPI asan_thread_exit(void *module, DWORD reason, void *reserved) { + if (reason == DLL_THREAD_DETACH) { + // Unpoison the thread's stack because the memory may be re-used. + NT_TIB *tib = (NT_TIB *)NtCurrentTeb(); + uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit; + __asan_unpoison_memory_region(tib->StackLimit, stackSize); + } +} + +#pragma section(".CRT$XLY", long, read) +__declspec(allocate(".CRT$XLY")) void(NTAPI *__asan_tls_exit)( + void *, unsigned long, void *) = asan_thread_exit; + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +// }}} +} // namespace __asan + +#endif // SANITIZER_WINDOWS diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc deleted file mode 100644 index 47b3948c5aea..000000000000 --- a/lib/asan/asan_win_dll_thunk.cc +++ /dev/null @@ -1,152 +0,0 @@ -//===-- asan_win_dll_thunk.cc ---------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// This file defines a family of thunks that should be statically linked into -// the DLLs that have ASan instrumentation in order to delegate the calls to the -// shared runtime that lives in the main binary. -// See https://github.com/google/sanitizers/issues/209 for the details. -//===----------------------------------------------------------------------===// - -#ifdef SANITIZER_DLL_THUNK -#include "asan_init_version.h" -#include "interception/interception.h" -#include "sanitizer_common/sanitizer_win_defs.h" -#include "sanitizer_common/sanitizer_win_dll_thunk.h" -#include "sanitizer_common/sanitizer_platform_interceptors.h" - -// ASan own interface functions. -#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name) -#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) -#include "asan_interface.inc" - -// Memory allocation functions. -INTERCEPT_WRAP_V_W(free) -INTERCEPT_WRAP_V_W(_free_base) -INTERCEPT_WRAP_V_WW(_free_dbg) - -INTERCEPT_WRAP_W_W(malloc) -INTERCEPT_WRAP_W_W(_malloc_base) -INTERCEPT_WRAP_W_WWWW(_malloc_dbg) - -INTERCEPT_WRAP_W_WW(calloc) -INTERCEPT_WRAP_W_WW(_calloc_base) -INTERCEPT_WRAP_W_WWWWW(_calloc_dbg) -INTERCEPT_WRAP_W_WWW(_calloc_impl) - -INTERCEPT_WRAP_W_WW(realloc) -INTERCEPT_WRAP_W_WW(_realloc_base) -INTERCEPT_WRAP_W_WWW(_realloc_dbg) -INTERCEPT_WRAP_W_WWW(_recalloc) -INTERCEPT_WRAP_W_WWW(_recalloc_base) - -INTERCEPT_WRAP_W_W(_msize) -INTERCEPT_WRAP_W_W(_msize_base) -INTERCEPT_WRAP_W_W(_expand) -INTERCEPT_WRAP_W_W(_expand_dbg) - -// TODO(timurrrr): Might want to add support for _aligned_* allocation -// functions to detect a bit more bugs. Those functions seem to wrap malloc(). - -// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc). - -INTERCEPT_LIBRARY_FUNCTION(atoi); -INTERCEPT_LIBRARY_FUNCTION(atol); -INTERCEPT_LIBRARY_FUNCTION(frexp); -INTERCEPT_LIBRARY_FUNCTION(longjmp); -#if SANITIZER_INTERCEPT_MEMCHR -INTERCEPT_LIBRARY_FUNCTION(memchr); -#endif -INTERCEPT_LIBRARY_FUNCTION(memcmp); -INTERCEPT_LIBRARY_FUNCTION(memcpy); -INTERCEPT_LIBRARY_FUNCTION(memmove); -INTERCEPT_LIBRARY_FUNCTION(memset); -INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT -INTERCEPT_LIBRARY_FUNCTION(strchr); -INTERCEPT_LIBRARY_FUNCTION(strcmp); -INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT -INTERCEPT_LIBRARY_FUNCTION(strcspn); -INTERCEPT_LIBRARY_FUNCTION(strdup); -INTERCEPT_LIBRARY_FUNCTION(strlen); -INTERCEPT_LIBRARY_FUNCTION(strncat); -INTERCEPT_LIBRARY_FUNCTION(strncmp); -INTERCEPT_LIBRARY_FUNCTION(strncpy); -INTERCEPT_LIBRARY_FUNCTION(strnlen); -INTERCEPT_LIBRARY_FUNCTION(strpbrk); -INTERCEPT_LIBRARY_FUNCTION(strrchr); -INTERCEPT_LIBRARY_FUNCTION(strspn); -INTERCEPT_LIBRARY_FUNCTION(strstr); -INTERCEPT_LIBRARY_FUNCTION(strtok); -INTERCEPT_LIBRARY_FUNCTION(strtol); -INTERCEPT_LIBRARY_FUNCTION(wcslen); -INTERCEPT_LIBRARY_FUNCTION(wcsnlen); - -#ifdef _WIN64 -INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler); -#else -INTERCEPT_LIBRARY_FUNCTION(_except_handler3); -// _except_handler4 checks -GS cookie which is different for each module, so we -// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4). -INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { - __asan_handle_no_return(); - return REAL(_except_handler4)(a, b, c, d); -} -#endif - -// Windows specific functions not included in asan_interface.inc. -INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return) -INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address) -INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter) - -using namespace __sanitizer; - -extern "C" { -int __asan_option_detect_stack_use_after_return; -uptr __asan_shadow_memory_dynamic_address; -} // extern "C" - -static int asan_dll_thunk_init() { - typedef void (*fntype)(); - static fntype fn = 0; - // asan_dll_thunk_init is expected to be called by only one thread. - if (fn) return 0; - - // Ensure all interception was executed. - __dll_thunk_init(); - - fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init"); - fn(); - __asan_option_detect_stack_use_after_return = - (__asan_should_detect_stack_use_after_return() != 0); - __asan_shadow_memory_dynamic_address = - (uptr)__asan_get_shadow_memory_dynamic_address(); - -#ifndef _WIN64 - INTERCEPT_FUNCTION(_except_handler4); -#endif - // In DLLs, the callbacks are expected to return 0, - // otherwise CRT initialization fails. - return 0; -} - -#pragma section(".CRT$XIB", long, read) // NOLINT -__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init; - -static void WINAPI asan_thread_init(void *mod, unsigned long reason, - void *reserved) { - if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init(); -} - -#pragma section(".CRT$XLAB", long, read) // NOLINT -__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *, - unsigned long, void *) = asan_thread_init; - -WIN_FORCE_LINK(__asan_dso_reg_hook) - -#endif // SANITIZER_DLL_THUNK diff --git a/lib/asan/asan_win_dll_thunk.cpp b/lib/asan/asan_win_dll_thunk.cpp new file mode 100644 index 000000000000..a5671cc9dffd --- /dev/null +++ b/lib/asan/asan_win_dll_thunk.cpp @@ -0,0 +1,152 @@ +//===-- asan_win_dll_thunk.cpp --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines a family of thunks that should be statically linked into +// the DLLs that have ASan instrumentation in order to delegate the calls to the +// shared runtime that lives in the main binary. +// See https://github.com/google/sanitizers/issues/209 for the details. +//===----------------------------------------------------------------------===// + +#ifdef SANITIZER_DLL_THUNK +#include "asan_init_version.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_win_defs.h" +#include "sanitizer_common/sanitizer_win_dll_thunk.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" + +// ASan own interface functions. +#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) +#include "asan_interface.inc" + +// Memory allocation functions. +INTERCEPT_WRAP_V_W(free) +INTERCEPT_WRAP_V_W(_free_base) +INTERCEPT_WRAP_V_WW(_free_dbg) + +INTERCEPT_WRAP_W_W(malloc) +INTERCEPT_WRAP_W_W(_malloc_base) +INTERCEPT_WRAP_W_WWWW(_malloc_dbg) + +INTERCEPT_WRAP_W_WW(calloc) +INTERCEPT_WRAP_W_WW(_calloc_base) +INTERCEPT_WRAP_W_WWWWW(_calloc_dbg) +INTERCEPT_WRAP_W_WWW(_calloc_impl) + +INTERCEPT_WRAP_W_WW(realloc) +INTERCEPT_WRAP_W_WW(_realloc_base) +INTERCEPT_WRAP_W_WWW(_realloc_dbg) +INTERCEPT_WRAP_W_WWW(_recalloc) +INTERCEPT_WRAP_W_WWW(_recalloc_base) + +INTERCEPT_WRAP_W_W(_msize) +INTERCEPT_WRAP_W_W(_msize_base) +INTERCEPT_WRAP_W_W(_expand) +INTERCEPT_WRAP_W_W(_expand_dbg) + +// TODO(timurrrr): Might want to add support for _aligned_* allocation +// functions to detect a bit more bugs. Those functions seem to wrap malloc(). + +// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp) + +INTERCEPT_LIBRARY_FUNCTION(atoi); +INTERCEPT_LIBRARY_FUNCTION(atol); +INTERCEPT_LIBRARY_FUNCTION(frexp); +INTERCEPT_LIBRARY_FUNCTION(longjmp); +#if SANITIZER_INTERCEPT_MEMCHR +INTERCEPT_LIBRARY_FUNCTION(memchr); +#endif +INTERCEPT_LIBRARY_FUNCTION(memcmp); +INTERCEPT_LIBRARY_FUNCTION(memcpy); +INTERCEPT_LIBRARY_FUNCTION(memmove); +INTERCEPT_LIBRARY_FUNCTION(memset); +INTERCEPT_LIBRARY_FUNCTION(strcat); +INTERCEPT_LIBRARY_FUNCTION(strchr); +INTERCEPT_LIBRARY_FUNCTION(strcmp); +INTERCEPT_LIBRARY_FUNCTION(strcpy); +INTERCEPT_LIBRARY_FUNCTION(strcspn); +INTERCEPT_LIBRARY_FUNCTION(strdup); +INTERCEPT_LIBRARY_FUNCTION(strlen); +INTERCEPT_LIBRARY_FUNCTION(strncat); +INTERCEPT_LIBRARY_FUNCTION(strncmp); +INTERCEPT_LIBRARY_FUNCTION(strncpy); +INTERCEPT_LIBRARY_FUNCTION(strnlen); +INTERCEPT_LIBRARY_FUNCTION(strpbrk); +INTERCEPT_LIBRARY_FUNCTION(strrchr); +INTERCEPT_LIBRARY_FUNCTION(strspn); +INTERCEPT_LIBRARY_FUNCTION(strstr); +INTERCEPT_LIBRARY_FUNCTION(strtok); +INTERCEPT_LIBRARY_FUNCTION(strtol); +INTERCEPT_LIBRARY_FUNCTION(wcslen); +INTERCEPT_LIBRARY_FUNCTION(wcsnlen); + +#ifdef _WIN64 +INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler); +#else +INTERCEPT_LIBRARY_FUNCTION(_except_handler3); +// _except_handler4 checks -GS cookie which is different for each module, so we +// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4). +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} +#endif + +// Windows specific functions not included in asan_interface.inc. +INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return) +INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address) +INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter) + +using namespace __sanitizer; + +extern "C" { +int __asan_option_detect_stack_use_after_return; +uptr __asan_shadow_memory_dynamic_address; +} // extern "C" + +static int asan_dll_thunk_init() { + typedef void (*fntype)(); + static fntype fn = 0; + // asan_dll_thunk_init is expected to be called by only one thread. + if (fn) return 0; + + // Ensure all interception was executed. + __dll_thunk_init(); + + fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init"); + fn(); + __asan_option_detect_stack_use_after_return = + (__asan_should_detect_stack_use_after_return() != 0); + __asan_shadow_memory_dynamic_address = + (uptr)__asan_get_shadow_memory_dynamic_address(); + +#ifndef _WIN64 + INTERCEPT_FUNCTION(_except_handler4); +#endif + // In DLLs, the callbacks are expected to return 0, + // otherwise CRT initialization fails. + return 0; +} + +#pragma section(".CRT$XIB", long, read) +__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init; + +static void WINAPI asan_thread_init(void *mod, unsigned long reason, + void *reserved) { + if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init(); +} + +#pragma section(".CRT$XLAB", long, read) +__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *, + unsigned long, void *) = asan_thread_init; + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +#endif // SANITIZER_DLL_THUNK diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc deleted file mode 100644 index cf4a59842c4f..000000000000 --- a/lib/asan/asan_win_dynamic_runtime_thunk.cc +++ /dev/null @@ -1,130 +0,0 @@ -//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// This file defines things that need to be present in the application modules -// to interact with the ASan DLL runtime correctly and can't be implemented -// using the default "import library" generated when linking the DLL RTL. -// -// This includes: -// - creating weak aliases to default implementation imported from asan dll. -// - forwarding the detect_stack_use_after_return runtime option -// - working around deficiencies of the MD runtime -// - installing a custom SEH handler -// -//===----------------------------------------------------------------------===// - -#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK -#define SANITIZER_IMPORT_INTERFACE 1 -#include "sanitizer_common/sanitizer_win_defs.h" -#define WIN32_LEAN_AND_MEAN -#include - -// Define weak alias for all weak functions imported from asan dll. -#define INTERFACE_FUNCTION(Name) -#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name) -#include "asan_interface.inc" - -// First, declare CRT sections we'll be using in this file -#pragma section(".CRT$XIB", long, read) // NOLINT -#pragma section(".CRT$XID", long, read) // NOLINT -#pragma section(".CRT$XCAB", long, read) // NOLINT -#pragma section(".CRT$XTW", long, read) // NOLINT -#pragma section(".CRT$XTY", long, read) // NOLINT -#pragma section(".CRT$XLAB", long, read) // NOLINT - -//////////////////////////////////////////////////////////////////////////////// -// Define a copy of __asan_option_detect_stack_use_after_return that should be -// used when linking an MD runtime with a set of object files on Windows. -// -// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return', -// so normally we would just dllimport it. Unfortunately, the dllimport -// attribute adds __imp_ prefix to the symbol name of a variable. -// Since in general we don't know if a given TU is going to be used -// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows -// just to work around this issue, let's clone the variable that is constant -// after initialization anyways. -extern "C" { -__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); -int __asan_option_detect_stack_use_after_return; - -__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address(); -void* __asan_shadow_memory_dynamic_address; -} - -static int InitializeClonedVariables() { - __asan_option_detect_stack_use_after_return = - __asan_should_detect_stack_use_after_return(); - __asan_shadow_memory_dynamic_address = - __asan_get_shadow_memory_dynamic_address(); - return 0; -} - -static void NTAPI asan_thread_init(void *mod, unsigned long reason, - void *reserved) { - if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables(); -} - -// Our cloned variables must be initialized before C/C++ constructors. If TLS -// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB -// initializer is needed as a backup. -__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() = - InitializeClonedVariables; -__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *, - unsigned long, void *) = asan_thread_init; - -//////////////////////////////////////////////////////////////////////////////// -// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL -// unload or on exit. ASan relies on LLVM global_dtors to call -// __asan_unregister_globals on these events, which unfortunately doesn't work -// with the MD runtime, see PR22545 for the details. -// To work around this, for each DLL we schedule a call to UnregisterGlobals -// using atexit() that calls a small subset of C terminators -// where LLVM global_dtors is placed. Fingers crossed, no other C terminators -// are there. -extern "C" int __cdecl atexit(void (__cdecl *f)(void)); -extern "C" void __cdecl _initterm(void *a, void *b); - -namespace { -__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0; -__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0; - -void UnregisterGlobals() { - _initterm(&before_global_dtors, &after_global_dtors); -} - -int ScheduleUnregisterGlobals() { - return atexit(UnregisterGlobals); -} -} // namespace - -// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after -// atexit() is initialized (.CRT$XIC). As this is executed before C++ -// initializers (think ctors for globals), UnregisterGlobals gets executed after -// dtors for C++ globals. -__declspec(allocate(".CRT$XID")) -int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals; - -//////////////////////////////////////////////////////////////////////////////// -// ASan SEH handling. -// We need to set the ASan-specific SEH handler at the end of CRT initialization -// of each module (see also asan_win.cc). -extern "C" { -__declspec(dllimport) int __asan_set_seh_filter(); -static int SetSEHFilter() { return __asan_set_seh_filter(); } - -// Unfortunately, putting a pointer to __asan_set_seh_filter into -// __asan_intercept_seh gets optimized out, so we have to use an extra function. -__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() = - SetSEHFilter; -} - -WIN_FORCE_LINK(__asan_dso_reg_hook) - -#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cpp b/lib/asan/asan_win_dynamic_runtime_thunk.cpp new file mode 100644 index 000000000000..f0b5ec9eef7f --- /dev/null +++ b/lib/asan/asan_win_dynamic_runtime_thunk.cpp @@ -0,0 +1,130 @@ +//===-- asan_win_dynamic_runtime_thunk.cpp --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This file defines things that need to be present in the application modules +// to interact with the ASan DLL runtime correctly and can't be implemented +// using the default "import library" generated when linking the DLL RTL. +// +// This includes: +// - creating weak aliases to default implementation imported from asan dll. +// - forwarding the detect_stack_use_after_return runtime option +// - working around deficiencies of the MD runtime +// - installing a custom SEH handler +// +//===----------------------------------------------------------------------===// + +#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK +#define SANITIZER_IMPORT_INTERFACE 1 +#include "sanitizer_common/sanitizer_win_defs.h" +#define WIN32_LEAN_AND_MEAN +#include + +// Define weak alias for all weak functions imported from asan dll. +#define INTERFACE_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name) +#include "asan_interface.inc" + +// First, declare CRT sections we'll be using in this file +#pragma section(".CRT$XIB", long, read) +#pragma section(".CRT$XID", long, read) +#pragma section(".CRT$XCAB", long, read) +#pragma section(".CRT$XTW", long, read) +#pragma section(".CRT$XTY", long, read) +#pragma section(".CRT$XLAB", long, read) + +//////////////////////////////////////////////////////////////////////////////// +// Define a copy of __asan_option_detect_stack_use_after_return that should be +// used when linking an MD runtime with a set of object files on Windows. +// +// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return', +// so normally we would just dllimport it. Unfortunately, the dllimport +// attribute adds __imp_ prefix to the symbol name of a variable. +// Since in general we don't know if a given TU is going to be used +// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows +// just to work around this issue, let's clone the variable that is constant +// after initialization anyways. +extern "C" { +__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); +int __asan_option_detect_stack_use_after_return; + +__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address(); +void* __asan_shadow_memory_dynamic_address; +} + +static int InitializeClonedVariables() { + __asan_option_detect_stack_use_after_return = + __asan_should_detect_stack_use_after_return(); + __asan_shadow_memory_dynamic_address = + __asan_get_shadow_memory_dynamic_address(); + return 0; +} + +static void NTAPI asan_thread_init(void *mod, unsigned long reason, + void *reserved) { + if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables(); +} + +// Our cloned variables must be initialized before C/C++ constructors. If TLS +// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB +// initializer is needed as a backup. +__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() = + InitializeClonedVariables; +__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *, + unsigned long, void *) = asan_thread_init; + +//////////////////////////////////////////////////////////////////////////////// +// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL +// unload or on exit. ASan relies on LLVM global_dtors to call +// __asan_unregister_globals on these events, which unfortunately doesn't work +// with the MD runtime, see PR22545 for the details. +// To work around this, for each DLL we schedule a call to UnregisterGlobals +// using atexit() that calls a small subset of C terminators +// where LLVM global_dtors is placed. Fingers crossed, no other C terminators +// are there. +extern "C" int __cdecl atexit(void (__cdecl *f)(void)); +extern "C" void __cdecl _initterm(void *a, void *b); + +namespace { +__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0; +__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0; + +void UnregisterGlobals() { + _initterm(&before_global_dtors, &after_global_dtors); +} + +int ScheduleUnregisterGlobals() { + return atexit(UnregisterGlobals); +} +} // namespace + +// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after +// atexit() is initialized (.CRT$XIC). As this is executed before C++ +// initializers (think ctors for globals), UnregisterGlobals gets executed after +// dtors for C++ globals. +__declspec(allocate(".CRT$XID")) +int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals; + +//////////////////////////////////////////////////////////////////////////////// +// ASan SEH handling. +// We need to set the ASan-specific SEH handler at the end of CRT initialization +// of each module (see also asan_win.cpp). +extern "C" { +__declspec(dllimport) int __asan_set_seh_filter(); +static int SetSEHFilter() { return __asan_set_seh_filter(); } + +// Unfortunately, putting a pointer to __asan_set_seh_filter into +// __asan_intercept_seh gets optimized out, so we have to use an extra function. +__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() = + SetSEHFilter; +} + +WIN_FORCE_LINK(__asan_dso_reg_hook) + +#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK diff --git a/lib/asan/asan_win_weak_interception.cc b/lib/asan/asan_win_weak_interception.cc deleted file mode 100644 index 19965ca473b4..000000000000 --- a/lib/asan/asan_win_weak_interception.cc +++ /dev/null @@ -1,22 +0,0 @@ -//===-- asan_win_weak_interception.cc -------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// This module should be included in Address Sanitizer when it is implemented as -// a shared library on Windows (dll), in order to delegate the calls of weak -// functions to the implementation in the main executable when a strong -// definition is provided. -//===----------------------------------------------------------------------===// -#ifdef SANITIZER_DYNAMIC -#include "sanitizer_common/sanitizer_win_weak_interception.h" -#include "asan_interface_internal.h" -// Check if strong definitions for weak functions are present in the main -// executable. If that is the case, override dll functions to point to strong -// implementations. -#define INTERFACE_FUNCTION(Name) -#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) -#include "asan_interface.inc" -#endif // SANITIZER_DYNAMIC diff --git a/lib/asan/asan_win_weak_interception.cpp b/lib/asan/asan_win_weak_interception.cpp new file mode 100644 index 000000000000..62534e12e2a6 --- /dev/null +++ b/lib/asan/asan_win_weak_interception.cpp @@ -0,0 +1,22 @@ +//===-- asan_win_weak_interception.cpp ------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// This module should be included in Address Sanitizer when it is implemented as +// a shared library on Windows (dll), in order to delegate the calls of weak +// functions to the implementation in the main executable when a strong +// definition is provided. +//===----------------------------------------------------------------------===// +#ifdef SANITIZER_DYNAMIC +#include "sanitizer_common/sanitizer_win_weak_interception.h" +#include "asan_interface_internal.h" +// Check if strong definitions for weak functions are present in the main +// executable. If that is the case, override dll functions to point to strong +// implementations. +#define INTERFACE_FUNCTION(Name) +#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) +#include "asan_interface.inc" +#endif // SANITIZER_DYNAMIC diff --git a/lib/builtins/aarch64/fp_mode.c b/lib/builtins/aarch64/fp_mode.c new file mode 100644 index 000000000000..5a413689d2c8 --- /dev/null +++ b/lib/builtins/aarch64/fp_mode.c @@ -0,0 +1,59 @@ +//===----- lib/aarch64/fp_mode.c - Floaing-point mode utilities ---*- C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include "../fp_mode.h" + +#define AARCH64_TONEAREST 0x0 +#define AARCH64_UPWARD 0x1 +#define AARCH64_DOWNWARD 0x2 +#define AARCH64_TOWARDZERO 0x3 +#define AARCH64_RMODE_MASK (AARCH64_TONEAREST | AARCH64_UPWARD | \ + AARCH64_DOWNWARD | AARCH64_TOWARDZERO) +#define AARCH64_RMODE_SHIFT 22 + +#define AARCH64_INEXACT 0x10 + +#ifndef __ARM_FP +// For soft float targets, allow changing rounding mode by overriding the weak +// __aarch64_fe_default_rmode symbol. +FE_ROUND_MODE __attribute__((weak)) __aarch64_fe_default_rmode = FE_TONEAREST; +#endif + +FE_ROUND_MODE __fe_getround() { +#ifdef __ARM_FP + uint64_t fpcr; + __asm__ __volatile__("mrs %0, fpcr" : "=r" (fpcr)); + fpcr = fpcr >> AARCH64_RMODE_SHIFT & AARCH64_RMODE_MASK; + switch (fpcr) { + case AARCH64_UPWARD: + return FE_UPWARD; + case AARCH64_DOWNWARD: + return FE_DOWNWARD; + case AARCH64_TOWARDZERO: + return FE_TOWARDZERO; + case AARCH64_TONEAREST: + default: + return FE_TONEAREST; + } +#else + return __aarch64_fe_default_rmode; +#endif +} + +int __fe_raise_inexact() { +#ifdef __ARM_FP + uint64_t fpsr; + __asm__ __volatile__("mrs %0, fpsr" : "=r" (fpsr)); + __asm__ __volatile__("msr fpsr, %0" : : "ri" (fpsr | AARCH64_INEXACT)); + return 0; +#else + return 0; +#endif +} diff --git a/lib/builtins/adddf3.c b/lib/builtins/adddf3.c index f2727fafcabe..26f11bfa2216 100644 --- a/lib/builtins/adddf3.c +++ b/lib/builtins/adddf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements double-precision soft-float addition with the IEEE-754 -// default rounding (to nearest, ties to even). +// This file implements double-precision soft-float addition. // //===----------------------------------------------------------------------===// diff --git a/lib/builtins/addsf3.c b/lib/builtins/addsf3.c index 8fe8622aadd9..9f1d517c1fa1 100644 --- a/lib/builtins/addsf3.c +++ b/lib/builtins/addsf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements single-precision soft-float addition with the IEEE-754 -// default rounding (to nearest, ties to even). +// This file implements single-precision soft-float addition. // //===----------------------------------------------------------------------===// diff --git a/lib/builtins/addtf3.c b/lib/builtins/addtf3.c index 570472a14554..86e4f4cfc3fc 100644 --- a/lib/builtins/addtf3.c +++ b/lib/builtins/addtf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements quad-precision soft-float addition with the IEEE-754 -// default rounding (to nearest, ties to even). +// This file implements quad-precision soft-float addition. // //===----------------------------------------------------------------------===// @@ -17,7 +16,7 @@ #if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) #include "fp_add_impl.inc" -COMPILER_RT_ABI long double __addtf3(long double a, long double b) { +COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b) { return __addXf3__(a, b); } diff --git a/lib/builtins/arm/fp_mode.c b/lib/builtins/arm/fp_mode.c new file mode 100644 index 000000000000..300b71935ad4 --- /dev/null +++ b/lib/builtins/arm/fp_mode.c @@ -0,0 +1,59 @@ +//===----- lib/arm/fp_mode.c - Floaing-point mode utilities -------*- C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include "../fp_mode.h" + +#define ARM_TONEAREST 0x0 +#define ARM_UPWARD 0x1 +#define ARM_DOWNWARD 0x2 +#define ARM_TOWARDZERO 0x3 +#define ARM_RMODE_MASK (ARM_TONEAREST | ARM_UPWARD | \ + ARM_DOWNWARD | ARM_TOWARDZERO) +#define ARM_RMODE_SHIFT 22 + +#define ARM_INEXACT 0x1000 + +#ifndef __ARM_FP +// For soft float targets, allow changing rounding mode by overriding the weak +// __arm_fe_default_rmode symbol. +FE_ROUND_MODE __attribute__((weak)) __arm_fe_default_rmode = FE_TONEAREST; +#endif + +FE_ROUND_MODE __fe_getround() { +#ifdef __ARM_FP + uint32_t fpscr; + __asm__ __volatile__("vmrs %0, fpscr" : "=r" (fpscr)); + fpscr = fpscr >> ARM_RMODE_SHIFT & ARM_RMODE_MASK; + switch (fpscr) { + case ARM_UPWARD: + return FE_UPWARD; + case ARM_DOWNWARD: + return FE_DOWNWARD; + case ARM_TOWARDZERO: + return FE_TOWARDZERO; + case ARM_TONEAREST: + default: + return FE_TONEAREST; + } +#else + return __arm_fe_default_rmode; +#endif +} + +int __fe_raise_inexact() { +#ifdef __ARM_FP + uint32_t fpscr; + __asm__ __volatile__("vmrs %0, fpscr" : "=r" (fpscr)); + __asm__ __volatile__("vmsr fpscr, %0" : : "ri" (fpscr | ARM_INEXACT)); + return 0; +#else + return 0; +#endif +} diff --git a/lib/builtins/atomic.c b/lib/builtins/atomic.c index 0f82803a6416..32b3a0f9ad23 100644 --- a/lib/builtins/atomic.c +++ b/lib/builtins/atomic.c @@ -51,9 +51,11 @@ static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1; //////////////////////////////////////////////////////////////////////////////// #ifdef __FreeBSD__ #include -#include +// clang-format off #include +#include #include +// clang-format on typedef struct _usem Lock; __inline static void unlock(Lock *l) { __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE); diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c index 76dc1968cc7e..80d3b2f9f17d 100644 --- a/lib/builtins/clear_cache.c +++ b/lib/builtins/clear_cache.c @@ -23,8 +23,10 @@ uintptr_t GetCurrentProcess(void); #endif #if defined(__FreeBSD__) && defined(__arm__) -#include +// clang-format off #include +#include +// clang-format on #endif #if defined(__NetBSD__) && defined(__arm__) @@ -32,54 +34,16 @@ uintptr_t GetCurrentProcess(void); #endif #if defined(__OpenBSD__) && defined(__mips__) -#include +// clang-format off #include +#include +// clang-format on #endif #if defined(__linux__) && defined(__mips__) #include #include #include -#if defined(__ANDROID__) && defined(__LP64__) -// clear_mips_cache - Invalidates instruction cache for Mips. -static void clear_mips_cache(const void *Addr, size_t Size) { - __asm__ volatile( - ".set push\n" - ".set noreorder\n" - ".set noat\n" - "beq %[Size], $zero, 20f\n" // If size == 0, branch around. - "nop\n" - "daddu %[Size], %[Addr], %[Size]\n" // Calculate end address + 1 - "rdhwr $v0, $1\n" // Get step size for SYNCI. - // $1 is $HW_SYNCI_Step - "beq $v0, $zero, 20f\n" // If no caches require - // synchronization, branch - // around. - "nop\n" - "10:\n" - "synci 0(%[Addr])\n" // Synchronize all caches around - // address. - "daddu %[Addr], %[Addr], $v0\n" // Add step size. - "sltu $at, %[Addr], %[Size]\n" // Compare current with end - // address. - "bne $at, $zero, 10b\n" // Branch if more to do. - "nop\n" - "sync\n" // Clear memory hazards. - "20:\n" - "bal 30f\n" - "nop\n" - "30:\n" - "daddiu $ra, $ra, 12\n" // $ra has a value of $pc here. - // Add offset of 12 to point to the - // instruction after the last nop. - // - "jr.hb $ra\n" // Return, clearing instruction - // hazards. - "nop\n" - ".set pop\n" - : [ Addr ] "+r"(Addr), [ Size ] "+r"(Size)::"at", "ra", "v0", "memory"); -} -#endif #endif // The compiler generates calls to __clear_cache() when creating @@ -123,17 +87,7 @@ void __clear_cache(void *start, void *end) { #elif defined(__linux__) && defined(__mips__) const uintptr_t start_int = (uintptr_t)start; const uintptr_t end_int = (uintptr_t)end; -#if defined(__ANDROID__) && defined(__LP64__) - // Call synci implementation for short address range. - const uintptr_t address_range_limit = 256; - if ((end_int - start_int) <= address_range_limit) { - clear_mips_cache(start, (end_int - start_int)); - } else { - syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); - } -#else syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); -#endif #elif defined(__mips__) && defined(__OpenBSD__) cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE); #elif defined(__aarch64__) && !defined(__APPLE__) @@ -173,6 +127,16 @@ void __clear_cache(void *start, void *end) { for (uintptr_t line = start_line; line < end_line; line += line_size) __asm__ volatile("icbi 0, %0" : : "r"(line)); __asm__ volatile("isync"); +#elif defined(__sparc__) + const size_t dword_size = 8; + const size_t len = (uintptr_t)end - (uintptr_t)start; + + const uintptr_t mask = ~(dword_size - 1); + const uintptr_t start_dword = ((uintptr_t)start) & mask; + const uintptr_t end_dword = ((uintptr_t)start + len + dword_size - 1) & mask; + + for (uintptr_t dword = start_dword; dword < end_dword; dword += dword_size) + __asm__ volatile("flush %0" : : "r"(dword)); #else #if __APPLE__ // On Darwin, sys_icache_invalidate() provides this functionality diff --git a/lib/builtins/cpu_model.c b/lib/builtins/cpu_model.c index f953aed959e5..cdeb03794ecc 100644 --- a/lib/builtins/cpu_model.c +++ b/lib/builtins/cpu_model.c @@ -121,7 +121,8 @@ enum ProcessorFeatures { FEATURE_GFNI, FEATURE_VPCLMULQDQ, FEATURE_AVX512VNNI, - FEATURE_AVX512BITALG + FEATURE_AVX512BITALG, + FEATURE_AVX512BF16 }; // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max). @@ -415,8 +416,8 @@ static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, default: // Unknown family 6 CPU. break; - break; } + break; default: break; // Unknown. } @@ -543,7 +544,7 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, setFeature(FEATURE_BMI); if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX) setFeature(FEATURE_AVX2); - if (HasLeaf7 && ((EBX >> 9) & 1)) + if (HasLeaf7 && ((EBX >> 8) & 1)) setFeature(FEATURE_BMI2); if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) setFeature(FEATURE_AVX512F); @@ -582,6 +583,11 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) setFeature(FEATURE_AVX5124FMAPS); + bool HasLeaf7Subleaf1 = + MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX); + if (HasLeaf7Subleaf1 && ((EAX >> 5) & 1) && HasAVX512Save) + setFeature(FEATURE_AVX512BF16); + unsigned MaxExtLevel; getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); diff --git a/lib/builtins/divtf3.c b/lib/builtins/divtf3.c index 6e61d2e31b75..ce462d4d46c1 100644 --- a/lib/builtins/divtf3.c +++ b/lib/builtins/divtf3.c @@ -213,7 +213,7 @@ COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) { // Round. absResult += round; // Insert the sign and return. - const long double result = fromRep(absResult | quotientSign); + const fp_t result = fromRep(absResult | quotientSign); return result; } } diff --git a/lib/builtins/emutls.c b/lib/builtins/emutls.c index da58feb7b906..e0aa19155f7d 100644 --- a/lib/builtins/emutls.c +++ b/lib/builtins/emutls.c @@ -26,12 +26,23 @@ #define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0 #endif +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC raises a warning about a nonstandard extension being used for the 0 +// sized element in this array. Disable this for warn-as-error builds. +#pragma warning(push) +#pragma warning(disable : 4206) +#endif + typedef struct emutls_address_array { uintptr_t skip_destructor_rounds; uintptr_t size; // number of elements in the 'data' array void *data[]; } emutls_address_array; +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif + static void emutls_shutdown(emutls_address_array *array); #ifndef _WIN32 diff --git a/lib/builtins/extenddftf2.c b/lib/builtins/extenddftf2.c index 849a39da1915..ddf470ecd629 100644 --- a/lib/builtins/extenddftf2.c +++ b/lib/builtins/extenddftf2.c @@ -14,7 +14,7 @@ #define DST_QUAD #include "fp_extend_impl.inc" -COMPILER_RT_ABI long double __extenddftf2(double a) { +COMPILER_RT_ABI fp_t __extenddftf2(double a) { return __extendXfYf2__(a); } diff --git a/lib/builtins/extendsftf2.c b/lib/builtins/extendsftf2.c index c6368406dde1..cf1fd2face20 100644 --- a/lib/builtins/extendsftf2.c +++ b/lib/builtins/extendsftf2.c @@ -14,7 +14,7 @@ #define DST_QUAD #include "fp_extend_impl.inc" -COMPILER_RT_ABI long double __extendsftf2(float a) { +COMPILER_RT_ABI fp_t __extendsftf2(float a) { return __extendXfYf2__(a); } diff --git a/lib/builtins/fixunsxfdi.c b/lib/builtins/fixunsxfdi.c index 75c4f093794f..097a4e55e931 100644 --- a/lib/builtins/fixunsxfdi.c +++ b/lib/builtins/fixunsxfdi.c @@ -25,6 +25,13 @@ // eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm // mmmm mmmm mmmm +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC throws a warning about 'unitialized variable use' here, +// disable it for builds that warn-as-error +#pragma warning(push) +#pragma warning(disable : 4700) +#endif + COMPILER_RT_ABI du_int __fixunsxfdi(long double a) { long_double_bits fb; fb.f = a; @@ -36,4 +43,8 @@ COMPILER_RT_ABI du_int __fixunsxfdi(long double a) { return fb.u.low.all >> (63 - e); } +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) #endif + +#endif //!_ARCH_PPC diff --git a/lib/builtins/fixunsxfsi.c b/lib/builtins/fixunsxfsi.c index 1432d8ba92d2..3bc1288d38a1 100644 --- a/lib/builtins/fixunsxfsi.c +++ b/lib/builtins/fixunsxfsi.c @@ -25,6 +25,13 @@ // eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm // mmmm mmmm mmmm +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC throws a warning about 'unitialized variable use' here, +// disable it for builds that warn-as-error +#pragma warning(push) +#pragma warning(disable : 4700) +#endif + COMPILER_RT_ABI su_int __fixunsxfsi(long double a) { long_double_bits fb; fb.f = a; @@ -36,4 +43,8 @@ COMPILER_RT_ABI su_int __fixunsxfsi(long double a) { return fb.u.low.s.high >> (31 - e); } +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif + #endif // !_ARCH_PPC diff --git a/lib/builtins/fixxfdi.c b/lib/builtins/fixxfdi.c index 4783c0101740..a7a0464feb9d 100644 --- a/lib/builtins/fixxfdi.c +++ b/lib/builtins/fixxfdi.c @@ -24,6 +24,13 @@ // eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm // mmmm mmmm mmmm +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC throws a warning about 'unitialized variable use' here, +// disable it for builds that warn-as-error +#pragma warning(push) +#pragma warning(disable : 4700) +#endif + COMPILER_RT_ABI di_int __fixxfdi(long double a) { const di_int di_max = (di_int)((~(du_int)0) / 2); const di_int di_min = -di_max - 1; @@ -40,4 +47,8 @@ COMPILER_RT_ABI di_int __fixxfdi(long double a) { return (r ^ s) - s; } +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif + #endif // !_ARCH_PPC diff --git a/lib/builtins/fp_add_impl.inc b/lib/builtins/fp_add_impl.inc index da8639341703..ab6321349032 100644 --- a/lib/builtins/fp_add_impl.inc +++ b/lib/builtins/fp_add_impl.inc @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "fp_lib.h" +#include "fp_mode.h" static __inline fp_t __addXf3__(fp_t a, fp_t b) { rep_t aRep = toRep(a); @@ -93,7 +94,7 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) { const unsigned int align = aExponent - bExponent; if (align) { if (align < typeWidth) { - const bool sticky = bSignificand << (typeWidth - align); + const bool sticky = (bSignificand << (typeWidth - align)) != 0; bSignificand = bSignificand >> align | sticky; } else { bSignificand = 1; // Set the sticky bit. b is known to be non-zero. @@ -132,7 +133,7 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) { // The result is denormal before rounding. The exponent is zero and we // need to shift the significand. const int shift = 1 - aExponent; - const bool sticky = aSignificand << (typeWidth - shift); + const bool sticky = (aSignificand << (typeWidth - shift)) != 0; aSignificand = aSignificand >> shift | sticky; aExponent = 0; } @@ -149,9 +150,23 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) { // Perform the final rounding. The result may overflow to infinity, but // that is the correct result in that case. - if (roundGuardSticky > 0x4) - result++; - if (roundGuardSticky == 0x4) - result += result & 1; + switch (__fe_getround()) { + case FE_TONEAREST: + if (roundGuardSticky > 0x4) + result++; + if (roundGuardSticky == 0x4) + result += result & 1; + break; + case FE_DOWNWARD: + if (resultSign && roundGuardSticky) result++; + break; + case FE_UPWARD: + if (!resultSign && roundGuardSticky) result++; + break; + case FE_TOWARDZERO: + break; + } + if (roundGuardSticky) + __fe_raise_inexact(); return fromRep(result); } diff --git a/lib/builtins/fp_lib.h b/lib/builtins/fp_lib.h index d1a988ea4713..e2a906681c46 100644 --- a/lib/builtins/fp_lib.h +++ b/lib/builtins/fp_lib.h @@ -245,7 +245,7 @@ static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) { static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo, unsigned int count) { if (count < typeWidth) { - const bool sticky = *lo << (typeWidth - count); + const bool sticky = (*lo << (typeWidth - count)) != 0; *lo = *hi << (typeWidth - count) | *lo >> count | sticky; *hi = *hi >> count; } else if (count < 2 * typeWidth) { diff --git a/lib/builtins/fp_mode.c b/lib/builtins/fp_mode.c new file mode 100644 index 000000000000..c1b6c1f6b8a3 --- /dev/null +++ b/lib/builtins/fp_mode.c @@ -0,0 +1,24 @@ +//===----- lib/fp_mode.c - Floaing-point environment mode utilities --C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides a default implementation of fp_mode.h for architectures +// that does not support or does not have an implementation of floating point +// environment mode. +// +//===----------------------------------------------------------------------===// + +#include "fp_mode.h" + +// IEEE-754 default rounding (to nearest, ties to even). +FE_ROUND_MODE __fe_getround() { + return FE_TONEAREST; +} + +int __fe_raise_inexact() { + return 0; +} diff --git a/lib/builtins/fp_mode.h b/lib/builtins/fp_mode.h new file mode 100644 index 000000000000..51bec0431a40 --- /dev/null +++ b/lib/builtins/fp_mode.h @@ -0,0 +1,29 @@ +//===----- lib/fp_mode.h - Floaing-point environment mode utilities --C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is not part of the interface of this library. +// +// This file defines an interface for accessing hardware floating point +// environment mode. +// +//===----------------------------------------------------------------------===// + +#ifndef FP_MODE +#define FP_MODE + +typedef enum { + FE_TONEAREST, + FE_DOWNWARD, + FE_UPWARD, + FE_TOWARDZERO +} FE_ROUND_MODE; + +FE_ROUND_MODE __fe_getround(); +int __fe_raise_inexact(); + +#endif // FP_MODE_H diff --git a/lib/builtins/fp_trunc_impl.inc b/lib/builtins/fp_trunc_impl.inc index 133c8bbe5c2f..6662be7607e7 100644 --- a/lib/builtins/fp_trunc_impl.inc +++ b/lib/builtins/fp_trunc_impl.inc @@ -113,7 +113,7 @@ static __inline dst_t __truncXfYf2__(src_t a) { if (shift > srcSigBits) { absResult = 0; } else { - const bool sticky = significand << (srcBits - shift); + const bool sticky = (significand << (srcBits - shift)) != 0; src_rep_t denormalizedSignificand = significand >> shift | sticky; absResult = denormalizedSignificand >> (srcSigBits - dstSigBits); const src_rep_t roundBits = denormalizedSignificand & roundMask; diff --git a/lib/builtins/subdf3.c b/lib/builtins/subdf3.c index 5346dbc970fa..2100fd39c4ef 100644 --- a/lib/builtins/subdf3.c +++ b/lib/builtins/subdf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements double-precision soft-float subtraction with the -// IEEE-754 default rounding (to nearest, ties to even). +// This file implements double-precision soft-float subtraction. // //===----------------------------------------------------------------------===// diff --git a/lib/builtins/subsf3.c b/lib/builtins/subsf3.c index 85bde029b5bc..ecfc24f7dd30 100644 --- a/lib/builtins/subsf3.c +++ b/lib/builtins/subsf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements single-precision soft-float subtraction with the -// IEEE-754 default rounding (to nearest, ties to even). +// This file implements single-precision soft-float subtraction. // //===----------------------------------------------------------------------===// diff --git a/lib/builtins/subtf3.c b/lib/builtins/subtf3.c index c96814692d2c..3364c28f8179 100644 --- a/lib/builtins/subtf3.c +++ b/lib/builtins/subtf3.c @@ -6,8 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements quad-precision soft-float subtraction with the -// IEEE-754 default rounding (to nearest, ties to even). +// This file implements quad-precision soft-float subtraction. // //===----------------------------------------------------------------------===// diff --git a/lib/builtins/udivmoddi4.c b/lib/builtins/udivmoddi4.c index 2914cc0fb46d..5b297c32d790 100644 --- a/lib/builtins/udivmoddi4.c +++ b/lib/builtins/udivmoddi4.c @@ -17,6 +17,13 @@ // Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide +#if defined(_MSC_VER) && !defined(__clang__) +// MSVC throws a warning about mod 0 here, disable it for builds that +// warn-as-error +#pragma warning(push) +#pragma warning(disable : 4724) +#endif + COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) { const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT; const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT; @@ -187,3 +194,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) { *rem = r.all; return q.all; } + +#if defined(_MSC_VER) && !defined(__clang__) +#pragma warning(pop) +#endif diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc deleted file mode 100644 index f4ba1148f782..000000000000 --- a/lib/dfsan/dfsan.cc +++ /dev/null @@ -1,460 +0,0 @@ -//===-- dfsan.cc ----------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of DataFlowSanitizer. -// -// DataFlowSanitizer runtime. This file defines the public interface to -// DataFlowSanitizer as well as the definition of certain runtime functions -// called automatically by the compiler (specifically the instrumentation pass -// in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp). -// -// The public interface is defined in include/sanitizer/dfsan_interface.h whose -// functions are prefixed dfsan_ while the compiler interface functions are -// prefixed __dfsan_. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_file.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_flag_parser.h" -#include "sanitizer_common/sanitizer_libc.h" - -#include "dfsan/dfsan.h" - -using namespace __dfsan; - -typedef atomic_uint16_t atomic_dfsan_label; -static const dfsan_label kInitializingLabel = -1; - -static const uptr kNumLabels = 1 << (sizeof(dfsan_label) * 8); - -static atomic_dfsan_label __dfsan_last_label; -static dfsan_label_info __dfsan_label_info[kNumLabels]; - -Flags __dfsan::flags_data; - -SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_retval_tls; -SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_arg_tls[64]; - -SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask; - -// On Linux/x86_64, memory is laid out as follows: -// -// +--------------------+ 0x800000000000 (top of memory) -// | application memory | -// +--------------------+ 0x700000008000 (kAppAddr) -// | | -// | unused | -// | | -// +--------------------+ 0x200200000000 (kUnusedAddr) -// | union table | -// +--------------------+ 0x200000000000 (kUnionTableAddr) -// | shadow memory | -// +--------------------+ 0x000000010000 (kShadowAddr) -// | reserved by kernel | -// +--------------------+ 0x000000000000 -// -// To derive a shadow memory address from an application memory address, -// bits 44-46 are cleared to bring the address into the range -// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to -// account for the double byte representation of shadow labels and move the -// address into the shadow memory range. See the function shadow_for below. - -// On Linux/MIPS64, memory is laid out as follows: -// -// +--------------------+ 0x10000000000 (top of memory) -// | application memory | -// +--------------------+ 0xF000008000 (kAppAddr) -// | | -// | unused | -// | | -// +--------------------+ 0x2200000000 (kUnusedAddr) -// | union table | -// +--------------------+ 0x2000000000 (kUnionTableAddr) -// | shadow memory | -// +--------------------+ 0x0000010000 (kShadowAddr) -// | reserved by kernel | -// +--------------------+ 0x0000000000 - -// On Linux/AArch64 (39-bit VMA), memory is laid out as follow: -// -// +--------------------+ 0x8000000000 (top of memory) -// | application memory | -// +--------------------+ 0x7000008000 (kAppAddr) -// | | -// | unused | -// | | -// +--------------------+ 0x1200000000 (kUnusedAddr) -// | union table | -// +--------------------+ 0x1000000000 (kUnionTableAddr) -// | shadow memory | -// +--------------------+ 0x0000010000 (kShadowAddr) -// | reserved by kernel | -// +--------------------+ 0x0000000000 - -// On Linux/AArch64 (42-bit VMA), memory is laid out as follow: -// -// +--------------------+ 0x40000000000 (top of memory) -// | application memory | -// +--------------------+ 0x3ff00008000 (kAppAddr) -// | | -// | unused | -// | | -// +--------------------+ 0x1200000000 (kUnusedAddr) -// | union table | -// +--------------------+ 0x8000000000 (kUnionTableAddr) -// | shadow memory | -// +--------------------+ 0x0000010000 (kShadowAddr) -// | reserved by kernel | -// +--------------------+ 0x0000000000 - -// On Linux/AArch64 (48-bit VMA), memory is laid out as follow: -// -// +--------------------+ 0x1000000000000 (top of memory) -// | application memory | -// +--------------------+ 0xffff00008000 (kAppAddr) -// | unused | -// +--------------------+ 0xaaaab0000000 (top of PIE address) -// | application PIE | -// +--------------------+ 0xaaaaa0000000 (top of PIE address) -// | | -// | unused | -// | | -// +--------------------+ 0x1200000000 (kUnusedAddr) -// | union table | -// +--------------------+ 0x8000000000 (kUnionTableAddr) -// | shadow memory | -// +--------------------+ 0x0000010000 (kShadowAddr) -// | reserved by kernel | -// +--------------------+ 0x0000000000 - -typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels]; - -#ifdef DFSAN_RUNTIME_VMA -// Runtime detected VMA size. -int __dfsan::vmaSize; -#endif - -static uptr UnusedAddr() { - return MappingArchImpl() - + sizeof(dfsan_union_table_t); -} - -static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) { - return &(*(dfsan_union_table_t *) UnionTableAddr())[l1][l2]; -} - -// Checks we do not run out of labels. -static void dfsan_check_label(dfsan_label label) { - if (label == kInitializingLabel) { - Report("FATAL: DataFlowSanitizer: out of labels\n"); - Die(); - } -} - -// Resolves the union of two unequal labels. Nonequality is a precondition for -// this function (the instrumentation pass inlines the equality test). -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) { - if (flags().fast16labels) - return l1 | l2; - DCHECK_NE(l1, l2); - - if (l1 == 0) - return l2; - if (l2 == 0) - return l1; - - if (l1 > l2) - Swap(l1, l2); - - atomic_dfsan_label *table_ent = union_table(l1, l2); - // We need to deal with the case where two threads concurrently request - // a union of the same pair of labels. If the table entry is uninitialized, - // (i.e. 0) use a compare-exchange to set the entry to kInitializingLabel - // (i.e. -1) to mark that we are initializing it. - dfsan_label label = 0; - if (atomic_compare_exchange_strong(table_ent, &label, kInitializingLabel, - memory_order_acquire)) { - // Check whether l2 subsumes l1. We don't need to check whether l1 - // subsumes l2 because we are guaranteed here that l1 < l2, and (at least - // in the cases we are interested in) a label may only subsume labels - // created earlier (i.e. with a lower numerical value). - if (__dfsan_label_info[l2].l1 == l1 || - __dfsan_label_info[l2].l2 == l1) { - label = l2; - } else { - label = - atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; - dfsan_check_label(label); - __dfsan_label_info[label].l1 = l1; - __dfsan_label_info[label].l2 = l2; - } - atomic_store(table_ent, label, memory_order_release); - } else if (label == kInitializingLabel) { - // Another thread is initializing the entry. Wait until it is finished. - do { - internal_sched_yield(); - label = atomic_load(table_ent, memory_order_acquire); - } while (label == kInitializingLabel); - } - return label; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) { - dfsan_label label = ls[0]; - for (uptr i = 1; i != n; ++i) { - dfsan_label next_label = ls[i]; - if (label != next_label) - label = __dfsan_union(label, next_label); - } - return label; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __dfsan_unimplemented(char *fname) { - if (flags().warn_unimplemented) - Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n", - fname); -} - -// Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function -// to try to figure out where labels are being introduced in a nominally -// label-free program. -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() { - if (flags().warn_nonzero_labels) - Report("WARNING: DataFlowSanitizer: saw nonzero label\n"); -} - -// Indirect call to an uninstrumented vararg function. We don't have a way of -// handling these at the moment. -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void -__dfsan_vararg_wrapper(const char *fname) { - Report("FATAL: DataFlowSanitizer: unsupported indirect call to vararg " - "function %s\n", fname); - Die(); -} - -// Like __dfsan_union, but for use from the client or custom functions. Hence -// the equality comparison is done here before calling __dfsan_union. -SANITIZER_INTERFACE_ATTRIBUTE dfsan_label -dfsan_union(dfsan_label l1, dfsan_label l2) { - if (l1 == l2) - return l1; - return __dfsan_union(l1, l2); -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -dfsan_label dfsan_create_label(const char *desc, void *userdata) { - dfsan_label label = - atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; - dfsan_check_label(label); - __dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0; - __dfsan_label_info[label].desc = desc; - __dfsan_label_info[label].userdata = userdata; - return label; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -void __dfsan_set_label(dfsan_label label, void *addr, uptr size) { - for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) { - // Don't write the label if it is already the value we need it to be. - // In a program where most addresses are not labeled, it is common that - // a page of shadow memory is entirely zeroed. The Linux copy-on-write - // implementation will share all of the zeroed pages, making a copy of a - // page when any value is written. The un-sharing will happen even if - // the value written does not change the value in memory. Avoiding the - // write when both |label| and |*labelp| are zero dramatically reduces - // the amount of real memory used by large programs. - if (label == *labelp) - continue; - - *labelp = label; - } -} - -SANITIZER_INTERFACE_ATTRIBUTE -void dfsan_set_label(dfsan_label label, void *addr, uptr size) { - __dfsan_set_label(label, addr, size); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void dfsan_add_label(dfsan_label label, void *addr, uptr size) { - for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) - if (*labelp != label) - *labelp = __dfsan_union(*labelp, label); -} - -// Unlike the other dfsan interface functions the behavior of this function -// depends on the label of one of its arguments. Hence it is implemented as a -// custom function. -extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label -__dfsw_dfsan_get_label(long data, dfsan_label data_label, - dfsan_label *ret_label) { - *ret_label = 0; - return data_label; -} - -SANITIZER_INTERFACE_ATTRIBUTE dfsan_label -dfsan_read_label(const void *addr, uptr size) { - if (size == 0) - return 0; - return __dfsan_union_load(shadow_for(addr), size); -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE -const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) { - return &__dfsan_label_info[label]; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE int -dfsan_has_label(dfsan_label label, dfsan_label elem) { - if (label == elem) - return true; - const dfsan_label_info *info = dfsan_get_label_info(label); - if (info->l1 != 0) { - return dfsan_has_label(info->l1, elem) || dfsan_has_label(info->l2, elem); - } else { - return false; - } -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label -dfsan_has_label_with_desc(dfsan_label label, const char *desc) { - const dfsan_label_info *info = dfsan_get_label_info(label); - if (info->l1 != 0) { - return dfsan_has_label_with_desc(info->l1, desc) || - dfsan_has_label_with_desc(info->l2, desc); - } else { - return internal_strcmp(desc, info->desc) == 0; - } -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr -dfsan_get_label_count(void) { - dfsan_label max_label_allocated = - atomic_load(&__dfsan_last_label, memory_order_relaxed); - - return static_cast(max_label_allocated); -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void -dfsan_dump_labels(int fd) { - dfsan_label last_label = - atomic_load(&__dfsan_last_label, memory_order_relaxed); - - for (uptr l = 1; l <= last_label; ++l) { - char buf[64]; - internal_snprintf(buf, sizeof(buf), "%u %u %u ", l, - __dfsan_label_info[l].l1, __dfsan_label_info[l].l2); - WriteToFile(fd, buf, internal_strlen(buf)); - if (__dfsan_label_info[l].l1 == 0 && __dfsan_label_info[l].desc) { - WriteToFile(fd, __dfsan_label_info[l].desc, - internal_strlen(__dfsan_label_info[l].desc)); - } - WriteToFile(fd, "\n", 1); - } -} - -void Flags::SetDefaults() { -#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; -#include "dfsan_flags.inc" -#undef DFSAN_FLAG -} - -static void RegisterDfsanFlags(FlagParser *parser, Flags *f) { -#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \ - RegisterFlag(parser, #Name, Description, &f->Name); -#include "dfsan_flags.inc" -#undef DFSAN_FLAG -} - -static void InitializeFlags() { - SetCommonFlagsDefaults(); - flags().SetDefaults(); - - FlagParser parser; - RegisterCommonFlags(&parser); - RegisterDfsanFlags(&parser, &flags()); - parser.ParseStringFromEnv("DFSAN_OPTIONS"); - InitializeCommonFlags(); - if (Verbosity()) ReportUnrecognizedFlags(); - if (common_flags()->help) parser.PrintFlagDescriptions(); -} - -static void InitializePlatformEarly() { - AvoidCVE_2016_2143(); -#ifdef DFSAN_RUNTIME_VMA - __dfsan::vmaSize = - (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); - if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 || - __dfsan::vmaSize == 48) { - __dfsan_shadow_ptr_mask = ShadowMask(); - } else { - Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize); - Die(); - } -#endif -} - -static void dfsan_fini() { - if (internal_strcmp(flags().dump_labels_at_exit, "") != 0) { - fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly); - if (fd == kInvalidFd) { - Report("WARNING: DataFlowSanitizer: unable to open output file %s\n", - flags().dump_labels_at_exit); - return; - } - - Report("INFO: DataFlowSanitizer: dumping labels to %s\n", - flags().dump_labels_at_exit); - dfsan_dump_labels(fd); - CloseFile(fd); - } -} - -extern "C" void dfsan_flush() { - UnmapOrDie((void*)ShadowAddr(), UnusedAddr() - ShadowAddr()); - if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) - Die(); -} - -static void dfsan_init(int argc, char **argv, char **envp) { - InitializeFlags(); - - ::InitializePlatformEarly(); - - if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) - Die(); - - // Protect the region of memory we don't use, to preserve the one-to-one - // mapping from application to shadow memory. But if ASLR is disabled, Linux - // will load our executable in the middle of our unused region. This mostly - // works so long as the program doesn't use too much memory. We support this - // case by disabling memory protection when ASLR is disabled. - uptr init_addr = (uptr)&dfsan_init; - if (!(init_addr >= UnusedAddr() && init_addr < AppAddr())) - MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr()); - - InitializeInterceptors(); - - // Register the fini callback to run when the program terminates successfully - // or it is killed by the runtime. - Atexit(dfsan_fini); - AddDieCallback(dfsan_fini); - - __dfsan_label_info[kInitializingLabel].desc = ""; -} - -#if SANITIZER_CAN_USE_PREINIT_ARRAY -__attribute__((section(".preinit_array"), used)) -static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init; -#endif diff --git a/lib/dfsan/dfsan.cpp b/lib/dfsan/dfsan.cpp new file mode 100644 index 000000000000..0e2fb9f5f334 --- /dev/null +++ b/lib/dfsan/dfsan.cpp @@ -0,0 +1,460 @@ +//===-- dfsan.cpp ---------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataFlowSanitizer. +// +// DataFlowSanitizer runtime. This file defines the public interface to +// DataFlowSanitizer as well as the definition of certain runtime functions +// called automatically by the compiler (specifically the instrumentation pass +// in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp). +// +// The public interface is defined in include/sanitizer/dfsan_interface.h whose +// functions are prefixed dfsan_ while the compiler interface functions are +// prefixed __dfsan_. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_file.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_libc.h" + +#include "dfsan/dfsan.h" + +using namespace __dfsan; + +typedef atomic_uint16_t atomic_dfsan_label; +static const dfsan_label kInitializingLabel = -1; + +static const uptr kNumLabels = 1 << (sizeof(dfsan_label) * 8); + +static atomic_dfsan_label __dfsan_last_label; +static dfsan_label_info __dfsan_label_info[kNumLabels]; + +Flags __dfsan::flags_data; + +SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_retval_tls; +SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_arg_tls[64]; + +SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask; + +// On Linux/x86_64, memory is laid out as follows: +// +// +--------------------+ 0x800000000000 (top of memory) +// | application memory | +// +--------------------+ 0x700000008000 (kAppAddr) +// | | +// | unused | +// | | +// +--------------------+ 0x200200000000 (kUnusedAddr) +// | union table | +// +--------------------+ 0x200000000000 (kUnionTableAddr) +// | shadow memory | +// +--------------------+ 0x000000010000 (kShadowAddr) +// | reserved by kernel | +// +--------------------+ 0x000000000000 +// +// To derive a shadow memory address from an application memory address, +// bits 44-46 are cleared to bring the address into the range +// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to +// account for the double byte representation of shadow labels and move the +// address into the shadow memory range. See the function shadow_for below. + +// On Linux/MIPS64, memory is laid out as follows: +// +// +--------------------+ 0x10000000000 (top of memory) +// | application memory | +// +--------------------+ 0xF000008000 (kAppAddr) +// | | +// | unused | +// | | +// +--------------------+ 0x2200000000 (kUnusedAddr) +// | union table | +// +--------------------+ 0x2000000000 (kUnionTableAddr) +// | shadow memory | +// +--------------------+ 0x0000010000 (kShadowAddr) +// | reserved by kernel | +// +--------------------+ 0x0000000000 + +// On Linux/AArch64 (39-bit VMA), memory is laid out as follow: +// +// +--------------------+ 0x8000000000 (top of memory) +// | application memory | +// +--------------------+ 0x7000008000 (kAppAddr) +// | | +// | unused | +// | | +// +--------------------+ 0x1200000000 (kUnusedAddr) +// | union table | +// +--------------------+ 0x1000000000 (kUnionTableAddr) +// | shadow memory | +// +--------------------+ 0x0000010000 (kShadowAddr) +// | reserved by kernel | +// +--------------------+ 0x0000000000 + +// On Linux/AArch64 (42-bit VMA), memory is laid out as follow: +// +// +--------------------+ 0x40000000000 (top of memory) +// | application memory | +// +--------------------+ 0x3ff00008000 (kAppAddr) +// | | +// | unused | +// | | +// +--------------------+ 0x1200000000 (kUnusedAddr) +// | union table | +// +--------------------+ 0x8000000000 (kUnionTableAddr) +// | shadow memory | +// +--------------------+ 0x0000010000 (kShadowAddr) +// | reserved by kernel | +// +--------------------+ 0x0000000000 + +// On Linux/AArch64 (48-bit VMA), memory is laid out as follow: +// +// +--------------------+ 0x1000000000000 (top of memory) +// | application memory | +// +--------------------+ 0xffff00008000 (kAppAddr) +// | unused | +// +--------------------+ 0xaaaab0000000 (top of PIE address) +// | application PIE | +// +--------------------+ 0xaaaaa0000000 (top of PIE address) +// | | +// | unused | +// | | +// +--------------------+ 0x1200000000 (kUnusedAddr) +// | union table | +// +--------------------+ 0x8000000000 (kUnionTableAddr) +// | shadow memory | +// +--------------------+ 0x0000010000 (kShadowAddr) +// | reserved by kernel | +// +--------------------+ 0x0000000000 + +typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels]; + +#ifdef DFSAN_RUNTIME_VMA +// Runtime detected VMA size. +int __dfsan::vmaSize; +#endif + +static uptr UnusedAddr() { + return MappingArchImpl() + + sizeof(dfsan_union_table_t); +} + +static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) { + return &(*(dfsan_union_table_t *) UnionTableAddr())[l1][l2]; +} + +// Checks we do not run out of labels. +static void dfsan_check_label(dfsan_label label) { + if (label == kInitializingLabel) { + Report("FATAL: DataFlowSanitizer: out of labels\n"); + Die(); + } +} + +// Resolves the union of two unequal labels. Nonequality is a precondition for +// this function (the instrumentation pass inlines the equality test). +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) { + if (flags().fast16labels) + return l1 | l2; + DCHECK_NE(l1, l2); + + if (l1 == 0) + return l2; + if (l2 == 0) + return l1; + + if (l1 > l2) + Swap(l1, l2); + + atomic_dfsan_label *table_ent = union_table(l1, l2); + // We need to deal with the case where two threads concurrently request + // a union of the same pair of labels. If the table entry is uninitialized, + // (i.e. 0) use a compare-exchange to set the entry to kInitializingLabel + // (i.e. -1) to mark that we are initializing it. + dfsan_label label = 0; + if (atomic_compare_exchange_strong(table_ent, &label, kInitializingLabel, + memory_order_acquire)) { + // Check whether l2 subsumes l1. We don't need to check whether l1 + // subsumes l2 because we are guaranteed here that l1 < l2, and (at least + // in the cases we are interested in) a label may only subsume labels + // created earlier (i.e. with a lower numerical value). + if (__dfsan_label_info[l2].l1 == l1 || + __dfsan_label_info[l2].l2 == l1) { + label = l2; + } else { + label = + atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; + dfsan_check_label(label); + __dfsan_label_info[label].l1 = l1; + __dfsan_label_info[label].l2 = l2; + } + atomic_store(table_ent, label, memory_order_release); + } else if (label == kInitializingLabel) { + // Another thread is initializing the entry. Wait until it is finished. + do { + internal_sched_yield(); + label = atomic_load(table_ent, memory_order_acquire); + } while (label == kInitializingLabel); + } + return label; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) { + dfsan_label label = ls[0]; + for (uptr i = 1; i != n; ++i) { + dfsan_label next_label = ls[i]; + if (label != next_label) + label = __dfsan_union(label, next_label); + } + return label; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __dfsan_unimplemented(char *fname) { + if (flags().warn_unimplemented) + Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n", + fname); +} + +// Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function +// to try to figure out where labels are being introduced in a nominally +// label-free program. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() { + if (flags().warn_nonzero_labels) + Report("WARNING: DataFlowSanitizer: saw nonzero label\n"); +} + +// Indirect call to an uninstrumented vararg function. We don't have a way of +// handling these at the moment. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void +__dfsan_vararg_wrapper(const char *fname) { + Report("FATAL: DataFlowSanitizer: unsupported indirect call to vararg " + "function %s\n", fname); + Die(); +} + +// Like __dfsan_union, but for use from the client or custom functions. Hence +// the equality comparison is done here before calling __dfsan_union. +SANITIZER_INTERFACE_ATTRIBUTE dfsan_label +dfsan_union(dfsan_label l1, dfsan_label l2) { + if (l1 == l2) + return l1; + return __dfsan_union(l1, l2); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +dfsan_label dfsan_create_label(const char *desc, void *userdata) { + dfsan_label label = + atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1; + dfsan_check_label(label); + __dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0; + __dfsan_label_info[label].desc = desc; + __dfsan_label_info[label].userdata = userdata; + return label; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __dfsan_set_label(dfsan_label label, void *addr, uptr size) { + for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) { + // Don't write the label if it is already the value we need it to be. + // In a program where most addresses are not labeled, it is common that + // a page of shadow memory is entirely zeroed. The Linux copy-on-write + // implementation will share all of the zeroed pages, making a copy of a + // page when any value is written. The un-sharing will happen even if + // the value written does not change the value in memory. Avoiding the + // write when both |label| and |*labelp| are zero dramatically reduces + // the amount of real memory used by large programs. + if (label == *labelp) + continue; + + *labelp = label; + } +} + +SANITIZER_INTERFACE_ATTRIBUTE +void dfsan_set_label(dfsan_label label, void *addr, uptr size) { + __dfsan_set_label(label, addr, size); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void dfsan_add_label(dfsan_label label, void *addr, uptr size) { + for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) + if (*labelp != label) + *labelp = __dfsan_union(*labelp, label); +} + +// Unlike the other dfsan interface functions the behavior of this function +// depends on the label of one of its arguments. Hence it is implemented as a +// custom function. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label +__dfsw_dfsan_get_label(long data, dfsan_label data_label, + dfsan_label *ret_label) { + *ret_label = 0; + return data_label; +} + +SANITIZER_INTERFACE_ATTRIBUTE dfsan_label +dfsan_read_label(const void *addr, uptr size) { + if (size == 0) + return 0; + return __dfsan_union_load(shadow_for(addr), size); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) { + return &__dfsan_label_info[label]; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE int +dfsan_has_label(dfsan_label label, dfsan_label elem) { + if (label == elem) + return true; + const dfsan_label_info *info = dfsan_get_label_info(label); + if (info->l1 != 0) { + return dfsan_has_label(info->l1, elem) || dfsan_has_label(info->l2, elem); + } else { + return false; + } +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label +dfsan_has_label_with_desc(dfsan_label label, const char *desc) { + const dfsan_label_info *info = dfsan_get_label_info(label); + if (info->l1 != 0) { + return dfsan_has_label_with_desc(info->l1, desc) || + dfsan_has_label_with_desc(info->l2, desc); + } else { + return internal_strcmp(desc, info->desc) == 0; + } +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr +dfsan_get_label_count(void) { + dfsan_label max_label_allocated = + atomic_load(&__dfsan_last_label, memory_order_relaxed); + + return static_cast(max_label_allocated); +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void +dfsan_dump_labels(int fd) { + dfsan_label last_label = + atomic_load(&__dfsan_last_label, memory_order_relaxed); + + for (uptr l = 1; l <= last_label; ++l) { + char buf[64]; + internal_snprintf(buf, sizeof(buf), "%u %u %u ", l, + __dfsan_label_info[l].l1, __dfsan_label_info[l].l2); + WriteToFile(fd, buf, internal_strlen(buf)); + if (__dfsan_label_info[l].l1 == 0 && __dfsan_label_info[l].desc) { + WriteToFile(fd, __dfsan_label_info[l].desc, + internal_strlen(__dfsan_label_info[l].desc)); + } + WriteToFile(fd, "\n", 1); + } +} + +void Flags::SetDefaults() { +#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "dfsan_flags.inc" +#undef DFSAN_FLAG +} + +static void RegisterDfsanFlags(FlagParser *parser, Flags *f) { +#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "dfsan_flags.inc" +#undef DFSAN_FLAG +} + +static void InitializeFlags() { + SetCommonFlagsDefaults(); + flags().SetDefaults(); + + FlagParser parser; + RegisterCommonFlags(&parser); + RegisterDfsanFlags(&parser, &flags()); + parser.ParseStringFromEnv("DFSAN_OPTIONS"); + InitializeCommonFlags(); + if (Verbosity()) ReportUnrecognizedFlags(); + if (common_flags()->help) parser.PrintFlagDescriptions(); +} + +static void InitializePlatformEarly() { + AvoidCVE_2016_2143(); +#ifdef DFSAN_RUNTIME_VMA + __dfsan::vmaSize = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); + if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 || + __dfsan::vmaSize == 48) { + __dfsan_shadow_ptr_mask = ShadowMask(); + } else { + Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n"); + Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize); + Die(); + } +#endif +} + +static void dfsan_fini() { + if (internal_strcmp(flags().dump_labels_at_exit, "") != 0) { + fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly); + if (fd == kInvalidFd) { + Report("WARNING: DataFlowSanitizer: unable to open output file %s\n", + flags().dump_labels_at_exit); + return; + } + + Report("INFO: DataFlowSanitizer: dumping labels to %s\n", + flags().dump_labels_at_exit); + dfsan_dump_labels(fd); + CloseFile(fd); + } +} + +extern "C" void dfsan_flush() { + UnmapOrDie((void*)ShadowAddr(), UnusedAddr() - ShadowAddr()); + if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) + Die(); +} + +static void dfsan_init(int argc, char **argv, char **envp) { + InitializeFlags(); + + ::InitializePlatformEarly(); + + if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) + Die(); + + // Protect the region of memory we don't use, to preserve the one-to-one + // mapping from application to shadow memory. But if ASLR is disabled, Linux + // will load our executable in the middle of our unused region. This mostly + // works so long as the program doesn't use too much memory. We support this + // case by disabling memory protection when ASLR is disabled. + uptr init_addr = (uptr)&dfsan_init; + if (!(init_addr >= UnusedAddr() && init_addr < AppAddr())) + MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr()); + + InitializeInterceptors(); + + // Register the fini callback to run when the program terminates successfully + // or it is killed by the runtime. + Atexit(dfsan_fini); + AddDieCallback(dfsan_fini); + + __dfsan_label_info[kInitializingLabel].desc = ""; +} + +#if SANITIZER_CAN_USE_PREINIT_ARRAY +__attribute__((section(".preinit_array"), used)) +static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init; +#endif diff --git a/lib/dfsan/dfsan_custom.cc b/lib/dfsan/dfsan_custom.cc deleted file mode 100644 index dc7b81da4566..000000000000 --- a/lib/dfsan/dfsan_custom.cc +++ /dev/null @@ -1,1156 +0,0 @@ -//===-- dfsan.cc ----------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of DataFlowSanitizer. -// -// This file defines the custom functions listed in done_abilist.txt. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_linux.h" - -#include "dfsan/dfsan.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace __dfsan; - -#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \ - do { \ - if (f) \ - f(__VA_ARGS__); \ - } while (false) -#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \ -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE int -__dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label, - dfsan_label buf_label, dfsan_label *ret_label) { - int ret = stat(path, buf); - if (ret == 0) - dfsan_set_label(0, buf, sizeof(struct stat)); - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf, - dfsan_label fd_label, - dfsan_label buf_label, - dfsan_label *ret_label) { - int ret = fstat(fd, buf); - if (ret == 0) - dfsan_set_label(0, buf, sizeof(struct stat)); - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c, - dfsan_label s_label, - dfsan_label c_label, - dfsan_label *ret_label) { - for (size_t i = 0;; ++i) { - if (s[i] == c || s[i] == 0) { - if (flags().strict_data_dependencies) { - *ret_label = s_label; - } else { - *ret_label = dfsan_union(dfsan_read_label(s, i + 1), - dfsan_union(s_label, c_label)); - } - return s[i] == 0 ? nullptr : const_cast(s+i); - } - } -} - -DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc, - const void *s1, const void *s2, size_t n, - dfsan_label s1_label, dfsan_label s2_label, - dfsan_label n_label) - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2, - size_t n, dfsan_label s1_label, - dfsan_label s2_label, - dfsan_label n_label, - dfsan_label *ret_label) { - CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n, - s1_label, s2_label, n_label); - const char *cs1 = (const char *) s1, *cs2 = (const char *) s2; - for (size_t i = 0; i != n; ++i) { - if (cs1[i] != cs2[i]) { - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(cs1, i + 1), - dfsan_read_label(cs2, i + 1)); - } - return cs1[i] - cs2[i]; - } - } - - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(cs1, n), - dfsan_read_label(cs2, n)); - } - return 0; -} - -DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc, - const char *s1, const char *s2, - dfsan_label s1_label, dfsan_label s2_label) - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcmp(const char *s1, const char *s2, - dfsan_label s1_label, - dfsan_label s2_label, - dfsan_label *ret_label) { - CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, GET_CALLER_PC(), s1, s2, - s1_label, s2_label); - for (size_t i = 0;; ++i) { - if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0) { - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), - dfsan_read_label(s2, i + 1)); - } - return s1[i] - s2[i]; - } - } - return 0; -} - -SANITIZER_INTERFACE_ATTRIBUTE int -__dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label, - dfsan_label s2_label, dfsan_label *ret_label) { - for (size_t i = 0;; ++i) { - if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0) { - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), - dfsan_read_label(s2, i + 1)); - } - return s1[i] - s2[i]; - } - } - return 0; -} - -DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc, - const char *s1, const char *s2, size_t n, - dfsan_label s1_label, dfsan_label s2_label, - dfsan_label n_label) - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2, - size_t n, dfsan_label s1_label, - dfsan_label s2_label, - dfsan_label n_label, - dfsan_label *ret_label) { - if (n == 0) { - *ret_label = 0; - return 0; - } - - CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, GET_CALLER_PC(), s1, s2, - n, s1_label, s2_label, n_label); - - for (size_t i = 0;; ++i) { - if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0 || i == n - 1) { - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), - dfsan_read_label(s2, i + 1)); - } - return s1[i] - s2[i]; - } - } - return 0; -} - -SANITIZER_INTERFACE_ATTRIBUTE int -__dfsw_strncasecmp(const char *s1, const char *s2, size_t n, - dfsan_label s1_label, dfsan_label s2_label, - dfsan_label n_label, dfsan_label *ret_label) { - if (n == 0) { - *ret_label = 0; - return 0; - } - - for (size_t i = 0;; ++i) { - if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0 || - i == n - 1) { - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), - dfsan_read_label(s2, i + 1)); - } - return s1[i] - s2[i]; - } - } - return 0; -} - -SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_calloc(size_t nmemb, size_t size, - dfsan_label nmemb_label, - dfsan_label size_label, - dfsan_label *ret_label) { - void *p = calloc(nmemb, size); - dfsan_set_label(0, p, nmemb * size); - *ret_label = 0; - return p; -} - -SANITIZER_INTERFACE_ATTRIBUTE size_t -__dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) { - size_t ret = strlen(s); - if (flags().strict_data_dependencies) { - *ret_label = 0; - } else { - *ret_label = dfsan_read_label(s, ret + 1); - } - return ret; -} - - -static void *dfsan_memcpy(void *dest, const void *src, size_t n) { - dfsan_label *sdest = shadow_for(dest); - const dfsan_label *ssrc = shadow_for(src); - internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label)); - return internal_memcpy(dest, src, n); -} - -static void dfsan_memset(void *s, int c, dfsan_label c_label, size_t n) { - internal_memset(s, c, n); - dfsan_set_label(c_label, s, n); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *__dfsw_memcpy(void *dest, const void *src, size_t n, - dfsan_label dest_label, dfsan_label src_label, - dfsan_label n_label, dfsan_label *ret_label) { - *ret_label = dest_label; - return dfsan_memcpy(dest, src, n); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *__dfsw_memset(void *s, int c, size_t n, - dfsan_label s_label, dfsan_label c_label, - dfsan_label n_label, dfsan_label *ret_label) { - dfsan_memset(s, c, c_label, n); - *ret_label = s_label; - return s; -} - -SANITIZER_INTERFACE_ATTRIBUTE char * -__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) { - size_t len = strlen(s); - void *p = malloc(len+1); - dfsan_memcpy(p, s, len+1); - *ret_label = 0; - return static_cast(p); -} - -SANITIZER_INTERFACE_ATTRIBUTE char * -__dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label, - dfsan_label s2_label, dfsan_label n_label, - dfsan_label *ret_label) { - size_t len = strlen(s2); - if (len < n) { - dfsan_memcpy(s1, s2, len+1); - dfsan_memset(s1+len+1, 0, 0, n-len-1); - } else { - dfsan_memcpy(s1, s2, n); - } - - *ret_label = s1_label; - return s1; -} - -SANITIZER_INTERFACE_ATTRIBUTE ssize_t -__dfsw_pread(int fd, void *buf, size_t count, off_t offset, - dfsan_label fd_label, dfsan_label buf_label, - dfsan_label count_label, dfsan_label offset_label, - dfsan_label *ret_label) { - ssize_t ret = pread(fd, buf, count, offset); - if (ret > 0) - dfsan_set_label(0, buf, ret); - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE ssize_t -__dfsw_read(int fd, void *buf, size_t count, - dfsan_label fd_label, dfsan_label buf_label, - dfsan_label count_label, - dfsan_label *ret_label) { - ssize_t ret = read(fd, buf, count); - if (ret > 0) - dfsan_set_label(0, buf, ret); - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id, - struct timespec *tp, - dfsan_label clk_id_label, - dfsan_label tp_label, - dfsan_label *ret_label) { - int ret = clock_gettime(clk_id, tp); - if (ret == 0) - dfsan_set_label(0, tp, sizeof(struct timespec)); - *ret_label = 0; - return ret; -} - -static void unpoison(const void *ptr, uptr size) { - dfsan_set_label(0, const_cast(ptr), size); -} - -// dlopen() ultimately calls mmap() down inside the loader, which generally -// doesn't participate in dynamic symbol resolution. Therefore we won't -// intercept its calls to mmap, and we have to hook it here. -SANITIZER_INTERFACE_ATTRIBUTE void * -__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label, - dfsan_label flag_label, dfsan_label *ret_label) { - void *handle = dlopen(filename, flag); - link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle); - if (map) - ForEachMappedRegion(map, unpoison); - *ret_label = 0; - return handle; -} - -struct pthread_create_info { - void *(*start_routine_trampoline)(void *, void *, dfsan_label, dfsan_label *); - void *start_routine; - void *arg; -}; - -static void *pthread_create_cb(void *p) { - pthread_create_info pci(*(pthread_create_info *)p); - free(p); - dfsan_label ret_label; - return pci.start_routine_trampoline(pci.start_routine, pci.arg, 0, - &ret_label); -} - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create( - pthread_t *thread, const pthread_attr_t *attr, - void *(*start_routine_trampoline)(void *, void *, dfsan_label, - dfsan_label *), - void *start_routine, void *arg, dfsan_label thread_label, - dfsan_label attr_label, dfsan_label start_routine_label, - dfsan_label arg_label, dfsan_label *ret_label) { - pthread_create_info *pci = - (pthread_create_info *)malloc(sizeof(pthread_create_info)); - pci->start_routine_trampoline = start_routine_trampoline; - pci->start_routine = start_routine; - pci->arg = arg; - int rv = pthread_create(thread, attr, pthread_create_cb, (void *)pci); - if (rv != 0) - free(pci); - *ret_label = 0; - return rv; -} - -struct dl_iterate_phdr_info { - int (*callback_trampoline)(void *callback, struct dl_phdr_info *info, - size_t size, void *data, dfsan_label info_label, - dfsan_label size_label, dfsan_label data_label, - dfsan_label *ret_label); - void *callback; - void *data; -}; - -int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) { - dl_iterate_phdr_info *dipi = (dl_iterate_phdr_info *)data; - dfsan_set_label(0, *info); - dfsan_set_label(0, const_cast(info->dlpi_name), - strlen(info->dlpi_name) + 1); - dfsan_set_label( - 0, const_cast(reinterpret_cast(info->dlpi_phdr)), - sizeof(*info->dlpi_phdr) * info->dlpi_phnum); - dfsan_label ret_label; - return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0, - 0, &ret_label); -} - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr( - int (*callback_trampoline)(void *callback, struct dl_phdr_info *info, - size_t size, void *data, dfsan_label info_label, - dfsan_label size_label, dfsan_label data_label, - dfsan_label *ret_label), - void *callback, void *data, dfsan_label callback_label, - dfsan_label data_label, dfsan_label *ret_label) { - dl_iterate_phdr_info dipi = { callback_trampoline, callback, data }; - *ret_label = 0; - return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi); -} - -SANITIZER_INTERFACE_ATTRIBUTE -char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label, - dfsan_label buf_label, dfsan_label *ret_label) { - char *ret = ctime_r(timep, buf); - if (ret) { - dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), buf, - strlen(buf) + 1); - *ret_label = buf_label; - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -char *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label, - dfsan_label size_label, dfsan_label stream_label, - dfsan_label *ret_label) { - char *ret = fgets(s, size, stream); - if (ret) { - dfsan_set_label(0, ret, strlen(ret) + 1); - *ret_label = s_label; - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -char *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label, - dfsan_label size_label, dfsan_label *ret_label) { - char *ret = getcwd(buf, size); - if (ret) { - dfsan_set_label(0, ret, strlen(ret) + 1); - *ret_label = buf_label; - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -char *__dfsw_get_current_dir_name(dfsan_label *ret_label) { - char *ret = get_current_dir_name(); - if (ret) { - dfsan_set_label(0, ret, strlen(ret) + 1); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label, - dfsan_label len_label, dfsan_label *ret_label) { - int ret = gethostname(name, len); - if (ret == 0) { - dfsan_set_label(0, name, strlen(name) + 1); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_getrlimit(int resource, struct rlimit *rlim, - dfsan_label resource_label, dfsan_label rlim_label, - dfsan_label *ret_label) { - int ret = getrlimit(resource, rlim); - if (ret == 0) { - dfsan_set_label(0, rlim, sizeof(struct rlimit)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label, - dfsan_label usage_label, dfsan_label *ret_label) { - int ret = getrusage(who, usage); - if (ret == 0) { - dfsan_set_label(0, usage, sizeof(struct rusage)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label, - dfsan_label src_label, dfsan_label *ret_label) { - char *ret = strcpy(dest, src); - if (ret) { - internal_memcpy(shadow_for(dest), shadow_for(src), - sizeof(dfsan_label) * (strlen(src) + 1)); - } - *ret_label = dst_label; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -long int __dfsw_strtol(const char *nptr, char **endptr, int base, - dfsan_label nptr_label, dfsan_label endptr_label, - dfsan_label base_label, dfsan_label *ret_label) { - char *tmp_endptr; - long int ret = strtol(nptr, &tmp_endptr, base); - if (endptr) { - *endptr = tmp_endptr; - } - if (tmp_endptr > nptr) { - // If *tmp_endptr is '\0' include its label as well. - *ret_label = dfsan_union( - base_label, - dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -double __dfsw_strtod(const char *nptr, char **endptr, - dfsan_label nptr_label, dfsan_label endptr_label, - dfsan_label *ret_label) { - char *tmp_endptr; - double ret = strtod(nptr, &tmp_endptr); - if (endptr) { - *endptr = tmp_endptr; - } - if (tmp_endptr > nptr) { - // If *tmp_endptr is '\0' include its label as well. - *ret_label = dfsan_read_label( - nptr, - tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)); - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -long long int __dfsw_strtoll(const char *nptr, char **endptr, int base, - dfsan_label nptr_label, dfsan_label endptr_label, - dfsan_label base_label, dfsan_label *ret_label) { - char *tmp_endptr; - long long int ret = strtoll(nptr, &tmp_endptr, base); - if (endptr) { - *endptr = tmp_endptr; - } - if (tmp_endptr > nptr) { - // If *tmp_endptr is '\0' include its label as well. - *ret_label = dfsan_union( - base_label, - dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base, - dfsan_label nptr_label, dfsan_label endptr_label, - dfsan_label base_label, dfsan_label *ret_label) { - char *tmp_endptr; - unsigned long int ret = strtoul(nptr, &tmp_endptr, base); - if (endptr) { - *endptr = tmp_endptr; - } - if (tmp_endptr > nptr) { - // If *tmp_endptr is '\0' include its label as well. - *ret_label = dfsan_union( - base_label, - dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr, - dfsan_label nptr_label, - int base, dfsan_label endptr_label, - dfsan_label base_label, - dfsan_label *ret_label) { - char *tmp_endptr; - long long unsigned int ret = strtoull(nptr, &tmp_endptr, base); - if (endptr) { - *endptr = tmp_endptr; - } - if (tmp_endptr > nptr) { - // If *tmp_endptr is '\0' include its label as well. - *ret_label = dfsan_union( - base_label, - dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -time_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) { - time_t ret = time(t); - if (ret != (time_t) -1 && t) { - dfsan_set_label(0, t, sizeof(time_t)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label, - dfsan_label src_label, dfsan_label dst_label, - dfsan_label *ret_label) { - int ret = inet_pton(af, src, dst); - if (ret == 1) { - dfsan_set_label(dfsan_read_label(src, strlen(src) + 1), dst, - af == AF_INET ? sizeof(struct in_addr) : sizeof(in6_addr)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -struct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result, - dfsan_label timep_label, dfsan_label result_label, - dfsan_label *ret_label) { - struct tm *ret = localtime_r(timep, result); - if (ret) { - dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), result, - sizeof(struct tm)); - *ret_label = result_label; - } else { - *ret_label = 0; - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd, - char *buf, size_t buflen, struct passwd **result, - dfsan_label uid_label, dfsan_label pwd_label, - dfsan_label buf_label, dfsan_label buflen_label, - dfsan_label result_label, dfsan_label *ret_label) { - // Store the data in pwd, the strings referenced from pwd in buf, and the - // address of pwd in *result. On failure, NULL is stored in *result. - int ret = getpwuid_r(uid, pwd, buf, buflen, result); - if (ret == 0) { - dfsan_set_label(0, pwd, sizeof(struct passwd)); - dfsan_set_label(0, buf, strlen(buf) + 1); - } - *ret_label = 0; - dfsan_set_label(0, result, sizeof(struct passwd*)); - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout, - dfsan_label dfs_label, dfsan_label nfds_label, - dfsan_label timeout_label, dfsan_label *ret_label) { - int ret = poll(fds, nfds, timeout); - if (ret >= 0) { - for (; nfds > 0; --nfds) { - dfsan_set_label(0, &fds[nfds - 1].revents, sizeof(fds[nfds - 1].revents)); - } - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds, - fd_set *exceptfds, struct timeval *timeout, - dfsan_label nfds_label, dfsan_label readfds_label, - dfsan_label writefds_label, dfsan_label exceptfds_label, - dfsan_label timeout_label, dfsan_label *ret_label) { - int ret = select(nfds, readfds, writefds, exceptfds, timeout); - // Clear everything (also on error) since their content is either set or - // undefined. - if (readfds) { - dfsan_set_label(0, readfds, sizeof(fd_set)); - } - if (writefds) { - dfsan_set_label(0, writefds, sizeof(fd_set)); - } - if (exceptfds) { - dfsan_set_label(0, exceptfds, sizeof(fd_set)); - } - dfsan_set_label(0, timeout, sizeof(struct timeval)); - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask, - dfsan_label pid_label, - dfsan_label cpusetsize_label, - dfsan_label mask_label, dfsan_label *ret_label) { - int ret = sched_getaffinity(pid, cpusetsize, mask); - if (ret == 0) { - dfsan_set_label(0, mask, cpusetsize); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_sigemptyset(sigset_t *set, dfsan_label set_label, - dfsan_label *ret_label) { - int ret = sigemptyset(set); - dfsan_set_label(0, set, sizeof(sigset_t)); - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_sigaction(int signum, const struct sigaction *act, - struct sigaction *oldact, dfsan_label signum_label, - dfsan_label act_label, dfsan_label oldact_label, - dfsan_label *ret_label) { - int ret = sigaction(signum, act, oldact); - if (oldact) { - dfsan_set_label(0, oldact, sizeof(struct sigaction)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz, - dfsan_label tv_label, dfsan_label tz_label, - dfsan_label *ret_label) { - int ret = gettimeofday(tv, tz); - if (tv) { - dfsan_set_label(0, tv, sizeof(struct timeval)); - } - if (tz) { - dfsan_set_label(0, tz, sizeof(struct timezone)); - } - *ret_label = 0; - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n, - dfsan_label s_label, - dfsan_label c_label, - dfsan_label n_label, - dfsan_label *ret_label) { - void *ret = memchr(s, c, n); - if (flags().strict_data_dependencies) { - *ret_label = ret ? s_label : 0; - } else { - size_t len = - ret ? reinterpret_cast(ret) - reinterpret_cast(s) + 1 - : n; - *ret_label = - dfsan_union(dfsan_read_label(s, len), dfsan_union(s_label, c_label)); - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c, - dfsan_label s_label, - dfsan_label c_label, - dfsan_label *ret_label) { - char *ret = strrchr(s, c); - if (flags().strict_data_dependencies) { - *ret_label = ret ? s_label : 0; - } else { - *ret_label = - dfsan_union(dfsan_read_label(s, strlen(s) + 1), - dfsan_union(s_label, c_label)); - } - - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle, - dfsan_label haystack_label, - dfsan_label needle_label, - dfsan_label *ret_label) { - char *ret = strstr(haystack, needle); - if (flags().strict_data_dependencies) { - *ret_label = ret ? haystack_label : 0; - } else { - size_t len = ret ? ret + strlen(needle) - haystack : strlen(haystack) + 1; - *ret_label = - dfsan_union(dfsan_read_label(haystack, len), - dfsan_union(dfsan_read_label(needle, strlen(needle) + 1), - dfsan_union(haystack_label, needle_label))); - } - - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req, - struct timespec *rem, - dfsan_label req_label, - dfsan_label rem_label, - dfsan_label *ret_label) { - int ret = nanosleep(req, rem); - *ret_label = 0; - if (ret == -1) { - // Interrupted by a signal, rem is filled with the remaining time. - dfsan_set_label(0, rem, sizeof(struct timespec)); - } - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE int -__dfsw_socketpair(int domain, int type, int protocol, int sv[2], - dfsan_label domain_label, dfsan_label type_label, - dfsan_label protocol_label, dfsan_label sv_label, - dfsan_label *ret_label) { - int ret = socketpair(domain, type, protocol, sv); - *ret_label = 0; - if (ret == 0) { - dfsan_set_label(0, sv, sizeof(*sv) * 2); - } - return ret; -} - -// Type of the trampoline function passed to the custom version of -// dfsan_set_write_callback. -typedef void (*write_trampoline_t)( - void *callback, - int fd, const void *buf, ssize_t count, - dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label); - -// Calls to dfsan_set_write_callback() set the values in this struct. -// Calls to the custom version of write() read (and invoke) them. -static struct { - write_trampoline_t write_callback_trampoline = nullptr; - void *write_callback = nullptr; -} write_callback_info; - -SANITIZER_INTERFACE_ATTRIBUTE void -__dfsw_dfsan_set_write_callback( - write_trampoline_t write_callback_trampoline, - void *write_callback, - dfsan_label write_callback_label, - dfsan_label *ret_label) { - write_callback_info.write_callback_trampoline = write_callback_trampoline; - write_callback_info.write_callback = write_callback; -} - -SANITIZER_INTERFACE_ATTRIBUTE int -__dfsw_write(int fd, const void *buf, size_t count, - dfsan_label fd_label, dfsan_label buf_label, - dfsan_label count_label, dfsan_label *ret_label) { - if (write_callback_info.write_callback) { - write_callback_info.write_callback_trampoline( - write_callback_info.write_callback, - fd, buf, count, - fd_label, buf_label, count_label); - } - - *ret_label = 0; - return write(fd, buf, count); -} -} // namespace __dfsan - -// Type used to extract a dfsan_label with va_arg() -typedef int dfsan_label_va; - -// Formats a chunk either a constant string or a single format directive (e.g., -// '%.3f'). -struct Formatter { - Formatter(char *str_, const char *fmt_, size_t size_) - : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_), - width(-1) {} - - int format() { - char *tmp_fmt = build_format_string(); - int retval = - snprintf(str + str_off, str_off < size ? size - str_off : 0, tmp_fmt, - 0 /* used only to avoid warnings */); - free(tmp_fmt); - return retval; - } - - template int format(T arg) { - char *tmp_fmt = build_format_string(); - int retval; - if (width >= 0) { - retval = snprintf(str + str_off, str_off < size ? size - str_off : 0, - tmp_fmt, width, arg); - } else { - retval = snprintf(str + str_off, str_off < size ? size - str_off : 0, - tmp_fmt, arg); - } - free(tmp_fmt); - return retval; - } - - char *build_format_string() { - size_t fmt_size = fmt_cur - fmt_start + 1; - char *new_fmt = (char *)malloc(fmt_size + 1); - assert(new_fmt); - internal_memcpy(new_fmt, fmt_start, fmt_size); - new_fmt[fmt_size] = '\0'; - return new_fmt; - } - - char *str_cur() { return str + str_off; } - - size_t num_written_bytes(int retval) { - if (retval < 0) { - return 0; - } - - size_t num_avail = str_off < size ? size - str_off : 0; - if (num_avail == 0) { - return 0; - } - - size_t num_written = retval; - // A return value of {v,}snprintf of size or more means that the output was - // truncated. - if (num_written >= num_avail) { - num_written -= num_avail; - } - - return num_written; - } - - char *str; - size_t str_off; - size_t size; - const char *fmt_start; - const char *fmt_cur; - int width; -}; - -// Formats the input and propagates the input labels to the output. The output -// is stored in 'str'. 'size' bounds the number of output bytes. 'format' and -// 'ap' are the format string and the list of arguments for formatting. Returns -// the return value vsnprintf would return. -// -// The function tokenizes the format string in chunks representing either a -// constant string or a single format directive (e.g., '%.3f') and formats each -// chunk independently into the output string. This approach allows to figure -// out which bytes of the output string depends on which argument and thus to -// propagate labels more precisely. -// -// WARNING: This implementation does not support conversion specifiers with -// positional arguments. -static int format_buffer(char *str, size_t size, const char *fmt, - dfsan_label *va_labels, dfsan_label *ret_label, - va_list ap) { - Formatter formatter(str, fmt, size); - - while (*formatter.fmt_cur) { - formatter.fmt_start = formatter.fmt_cur; - formatter.width = -1; - int retval = 0; - - if (*formatter.fmt_cur != '%') { - // Ordinary character. Consume all the characters until a '%' or the end - // of the string. - for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%'; - ++formatter.fmt_cur) {} - retval = formatter.format(); - dfsan_set_label(0, formatter.str_cur(), - formatter.num_written_bytes(retval)); - } else { - // Conversion directive. Consume all the characters until a conversion - // specifier or the end of the string. - bool end_fmt = false; - for (; *formatter.fmt_cur && !end_fmt; ) { - switch (*++formatter.fmt_cur) { - case 'd': - case 'i': - case 'o': - case 'u': - case 'x': - case 'X': - switch (*(formatter.fmt_cur - 1)) { - case 'h': - // Also covers the 'hh' case (since the size of the arg is still - // an int). - retval = formatter.format(va_arg(ap, int)); - break; - case 'l': - if (formatter.fmt_cur - formatter.fmt_start >= 2 && - *(formatter.fmt_cur - 2) == 'l') { - retval = formatter.format(va_arg(ap, long long int)); - } else { - retval = formatter.format(va_arg(ap, long int)); - } - break; - case 'q': - retval = formatter.format(va_arg(ap, long long int)); - break; - case 'j': - retval = formatter.format(va_arg(ap, intmax_t)); - break; - case 'z': - case 't': - retval = formatter.format(va_arg(ap, size_t)); - break; - default: - retval = formatter.format(va_arg(ap, int)); - } - dfsan_set_label(*va_labels++, formatter.str_cur(), - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - - case 'a': - case 'A': - case 'e': - case 'E': - case 'f': - case 'F': - case 'g': - case 'G': - if (*(formatter.fmt_cur - 1) == 'L') { - retval = formatter.format(va_arg(ap, long double)); - } else { - retval = formatter.format(va_arg(ap, double)); - } - dfsan_set_label(*va_labels++, formatter.str_cur(), - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - - case 'c': - retval = formatter.format(va_arg(ap, int)); - dfsan_set_label(*va_labels++, formatter.str_cur(), - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - - case 's': { - char *arg = va_arg(ap, char *); - retval = formatter.format(arg); - va_labels++; - internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg), - sizeof(dfsan_label) * - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - } - - case 'p': - retval = formatter.format(va_arg(ap, void *)); - dfsan_set_label(*va_labels++, formatter.str_cur(), - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - - case 'n': { - int *ptr = va_arg(ap, int *); - *ptr = (int)formatter.str_off; - va_labels++; - dfsan_set_label(0, ptr, sizeof(ptr)); - end_fmt = true; - break; - } - - case '%': - retval = formatter.format(); - dfsan_set_label(0, formatter.str_cur(), - formatter.num_written_bytes(retval)); - end_fmt = true; - break; - - case '*': - formatter.width = va_arg(ap, int); - va_labels++; - break; - - default: - break; - } - } - } - - if (retval < 0) { - return retval; - } - - formatter.fmt_cur++; - formatter.str_off += retval; - } - - *ret_label = 0; - - // Number of bytes written in total. - return formatter.str_off; -} - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label, - dfsan_label format_label, dfsan_label *va_labels, - dfsan_label *ret_label, ...) { - va_list ap; - va_start(ap, ret_label); - int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, ap); - va_end(ap); - return ret; -} - -SANITIZER_INTERFACE_ATTRIBUTE -int __dfsw_snprintf(char *str, size_t size, const char *format, - dfsan_label str_label, dfsan_label size_label, - dfsan_label format_label, dfsan_label *va_labels, - dfsan_label *ret_label, ...) { - va_list ap; - va_start(ap, ret_label); - int ret = format_buffer(str, size, format, va_labels, ret_label, ap); - va_end(ap); - return ret; -} - -// Default empty implementations (weak). Users should redefine them. -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *, - u32 *) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} - -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp1, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp2, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp4, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp8, void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp1, - void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp2, - void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp4, - void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp8, - void) {} -SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_switch, void) {} -} // extern "C" diff --git a/lib/dfsan/dfsan_custom.cpp b/lib/dfsan/dfsan_custom.cpp new file mode 100644 index 000000000000..84f0271b15e0 --- /dev/null +++ b/lib/dfsan/dfsan_custom.cpp @@ -0,0 +1,1156 @@ +//===-- dfsan.cpp ---------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataFlowSanitizer. +// +// This file defines the custom functions listed in done_abilist.txt. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_linux.h" + +#include "dfsan/dfsan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace __dfsan; + +#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \ + do { \ + if (f) \ + f(__VA_ARGS__); \ + } while (false) +#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \ +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE int +__dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label, + dfsan_label buf_label, dfsan_label *ret_label) { + int ret = stat(path, buf); + if (ret == 0) + dfsan_set_label(0, buf, sizeof(struct stat)); + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf, + dfsan_label fd_label, + dfsan_label buf_label, + dfsan_label *ret_label) { + int ret = fstat(fd, buf); + if (ret == 0) + dfsan_set_label(0, buf, sizeof(struct stat)); + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c, + dfsan_label s_label, + dfsan_label c_label, + dfsan_label *ret_label) { + for (size_t i = 0;; ++i) { + if (s[i] == c || s[i] == 0) { + if (flags().strict_data_dependencies) { + *ret_label = s_label; + } else { + *ret_label = dfsan_union(dfsan_read_label(s, i + 1), + dfsan_union(s_label, c_label)); + } + return s[i] == 0 ? nullptr : const_cast(s+i); + } + } +} + +DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc, + const void *s1, const void *s2, size_t n, + dfsan_label s1_label, dfsan_label s2_label, + dfsan_label n_label) + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, + dfsan_label n_label, + dfsan_label *ret_label) { + CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n, + s1_label, s2_label, n_label); + const char *cs1 = (const char *) s1, *cs2 = (const char *) s2; + for (size_t i = 0; i != n; ++i) { + if (cs1[i] != cs2[i]) { + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(cs1, i + 1), + dfsan_read_label(cs2, i + 1)); + } + return cs1[i] - cs2[i]; + } + } + + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(cs1, n), + dfsan_read_label(cs2, n)); + } + return 0; +} + +DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc, + const char *s1, const char *s2, + dfsan_label s1_label, dfsan_label s2_label) + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcmp(const char *s1, const char *s2, + dfsan_label s1_label, + dfsan_label s2_label, + dfsan_label *ret_label) { + CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, GET_CALLER_PC(), s1, s2, + s1_label, s2_label); + for (size_t i = 0;; ++i) { + if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0) { + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), + dfsan_read_label(s2, i + 1)); + } + return s1[i] - s2[i]; + } + } + return 0; +} + +SANITIZER_INTERFACE_ATTRIBUTE int +__dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label *ret_label) { + for (size_t i = 0;; ++i) { + if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0) { + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), + dfsan_read_label(s2, i + 1)); + } + return s1[i] - s2[i]; + } + } + return 0; +} + +DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc, + const char *s1, const char *s2, size_t n, + dfsan_label s1_label, dfsan_label s2_label, + dfsan_label n_label) + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, + dfsan_label n_label, + dfsan_label *ret_label) { + if (n == 0) { + *ret_label = 0; + return 0; + } + + CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, GET_CALLER_PC(), s1, s2, + n, s1_label, s2_label, n_label); + + for (size_t i = 0;; ++i) { + if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0 || i == n - 1) { + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), + dfsan_read_label(s2, i + 1)); + } + return s1[i] - s2[i]; + } + } + return 0; +} + +SANITIZER_INTERFACE_ATTRIBUTE int +__dfsw_strncasecmp(const char *s1, const char *s2, size_t n, + dfsan_label s1_label, dfsan_label s2_label, + dfsan_label n_label, dfsan_label *ret_label) { + if (n == 0) { + *ret_label = 0; + return 0; + } + + for (size_t i = 0;; ++i) { + if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0 || + i == n - 1) { + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_union(dfsan_read_label(s1, i + 1), + dfsan_read_label(s2, i + 1)); + } + return s1[i] - s2[i]; + } + } + return 0; +} + +SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_calloc(size_t nmemb, size_t size, + dfsan_label nmemb_label, + dfsan_label size_label, + dfsan_label *ret_label) { + void *p = calloc(nmemb, size); + dfsan_set_label(0, p, nmemb * size); + *ret_label = 0; + return p; +} + +SANITIZER_INTERFACE_ATTRIBUTE size_t +__dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) { + size_t ret = strlen(s); + if (flags().strict_data_dependencies) { + *ret_label = 0; + } else { + *ret_label = dfsan_read_label(s, ret + 1); + } + return ret; +} + + +static void *dfsan_memcpy(void *dest, const void *src, size_t n) { + dfsan_label *sdest = shadow_for(dest); + const dfsan_label *ssrc = shadow_for(src); + internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label)); + return internal_memcpy(dest, src, n); +} + +static void dfsan_memset(void *s, int c, dfsan_label c_label, size_t n) { + internal_memset(s, c, n); + dfsan_set_label(c_label, s, n); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *__dfsw_memcpy(void *dest, const void *src, size_t n, + dfsan_label dest_label, dfsan_label src_label, + dfsan_label n_label, dfsan_label *ret_label) { + *ret_label = dest_label; + return dfsan_memcpy(dest, src, n); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *__dfsw_memset(void *s, int c, size_t n, + dfsan_label s_label, dfsan_label c_label, + dfsan_label n_label, dfsan_label *ret_label) { + dfsan_memset(s, c, c_label, n); + *ret_label = s_label; + return s; +} + +SANITIZER_INTERFACE_ATTRIBUTE char * +__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) { + size_t len = strlen(s); + void *p = malloc(len+1); + dfsan_memcpy(p, s, len+1); + *ret_label = 0; + return static_cast(p); +} + +SANITIZER_INTERFACE_ATTRIBUTE char * +__dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label n_label, + dfsan_label *ret_label) { + size_t len = strlen(s2); + if (len < n) { + dfsan_memcpy(s1, s2, len+1); + dfsan_memset(s1+len+1, 0, 0, n-len-1); + } else { + dfsan_memcpy(s1, s2, n); + } + + *ret_label = s1_label; + return s1; +} + +SANITIZER_INTERFACE_ATTRIBUTE ssize_t +__dfsw_pread(int fd, void *buf, size_t count, off_t offset, + dfsan_label fd_label, dfsan_label buf_label, + dfsan_label count_label, dfsan_label offset_label, + dfsan_label *ret_label) { + ssize_t ret = pread(fd, buf, count, offset); + if (ret > 0) + dfsan_set_label(0, buf, ret); + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE ssize_t +__dfsw_read(int fd, void *buf, size_t count, + dfsan_label fd_label, dfsan_label buf_label, + dfsan_label count_label, + dfsan_label *ret_label) { + ssize_t ret = read(fd, buf, count); + if (ret > 0) + dfsan_set_label(0, buf, ret); + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id, + struct timespec *tp, + dfsan_label clk_id_label, + dfsan_label tp_label, + dfsan_label *ret_label) { + int ret = clock_gettime(clk_id, tp); + if (ret == 0) + dfsan_set_label(0, tp, sizeof(struct timespec)); + *ret_label = 0; + return ret; +} + +static void unpoison(const void *ptr, uptr size) { + dfsan_set_label(0, const_cast(ptr), size); +} + +// dlopen() ultimately calls mmap() down inside the loader, which generally +// doesn't participate in dynamic symbol resolution. Therefore we won't +// intercept its calls to mmap, and we have to hook it here. +SANITIZER_INTERFACE_ATTRIBUTE void * +__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label, + dfsan_label flag_label, dfsan_label *ret_label) { + void *handle = dlopen(filename, flag); + link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle); + if (map) + ForEachMappedRegion(map, unpoison); + *ret_label = 0; + return handle; +} + +struct pthread_create_info { + void *(*start_routine_trampoline)(void *, void *, dfsan_label, dfsan_label *); + void *start_routine; + void *arg; +}; + +static void *pthread_create_cb(void *p) { + pthread_create_info pci(*(pthread_create_info *)p); + free(p); + dfsan_label ret_label; + return pci.start_routine_trampoline(pci.start_routine, pci.arg, 0, + &ret_label); +} + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create( + pthread_t *thread, const pthread_attr_t *attr, + void *(*start_routine_trampoline)(void *, void *, dfsan_label, + dfsan_label *), + void *start_routine, void *arg, dfsan_label thread_label, + dfsan_label attr_label, dfsan_label start_routine_label, + dfsan_label arg_label, dfsan_label *ret_label) { + pthread_create_info *pci = + (pthread_create_info *)malloc(sizeof(pthread_create_info)); + pci->start_routine_trampoline = start_routine_trampoline; + pci->start_routine = start_routine; + pci->arg = arg; + int rv = pthread_create(thread, attr, pthread_create_cb, (void *)pci); + if (rv != 0) + free(pci); + *ret_label = 0; + return rv; +} + +struct dl_iterate_phdr_info { + int (*callback_trampoline)(void *callback, struct dl_phdr_info *info, + size_t size, void *data, dfsan_label info_label, + dfsan_label size_label, dfsan_label data_label, + dfsan_label *ret_label); + void *callback; + void *data; +}; + +int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) { + dl_iterate_phdr_info *dipi = (dl_iterate_phdr_info *)data; + dfsan_set_label(0, *info); + dfsan_set_label(0, const_cast(info->dlpi_name), + strlen(info->dlpi_name) + 1); + dfsan_set_label( + 0, const_cast(reinterpret_cast(info->dlpi_phdr)), + sizeof(*info->dlpi_phdr) * info->dlpi_phnum); + dfsan_label ret_label; + return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0, + 0, &ret_label); +} + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr( + int (*callback_trampoline)(void *callback, struct dl_phdr_info *info, + size_t size, void *data, dfsan_label info_label, + dfsan_label size_label, dfsan_label data_label, + dfsan_label *ret_label), + void *callback, void *data, dfsan_label callback_label, + dfsan_label data_label, dfsan_label *ret_label) { + dl_iterate_phdr_info dipi = { callback_trampoline, callback, data }; + *ret_label = 0; + return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi); +} + +SANITIZER_INTERFACE_ATTRIBUTE +char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label, + dfsan_label buf_label, dfsan_label *ret_label) { + char *ret = ctime_r(timep, buf); + if (ret) { + dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), buf, + strlen(buf) + 1); + *ret_label = buf_label; + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +char *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label, + dfsan_label size_label, dfsan_label stream_label, + dfsan_label *ret_label) { + char *ret = fgets(s, size, stream); + if (ret) { + dfsan_set_label(0, ret, strlen(ret) + 1); + *ret_label = s_label; + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +char *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label, + dfsan_label size_label, dfsan_label *ret_label) { + char *ret = getcwd(buf, size); + if (ret) { + dfsan_set_label(0, ret, strlen(ret) + 1); + *ret_label = buf_label; + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +char *__dfsw_get_current_dir_name(dfsan_label *ret_label) { + char *ret = get_current_dir_name(); + if (ret) { + dfsan_set_label(0, ret, strlen(ret) + 1); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label, + dfsan_label len_label, dfsan_label *ret_label) { + int ret = gethostname(name, len); + if (ret == 0) { + dfsan_set_label(0, name, strlen(name) + 1); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_getrlimit(int resource, struct rlimit *rlim, + dfsan_label resource_label, dfsan_label rlim_label, + dfsan_label *ret_label) { + int ret = getrlimit(resource, rlim); + if (ret == 0) { + dfsan_set_label(0, rlim, sizeof(struct rlimit)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label, + dfsan_label usage_label, dfsan_label *ret_label) { + int ret = getrusage(who, usage); + if (ret == 0) { + dfsan_set_label(0, usage, sizeof(struct rusage)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label, + dfsan_label src_label, dfsan_label *ret_label) { + char *ret = strcpy(dest, src); // NOLINT + if (ret) { + internal_memcpy(shadow_for(dest), shadow_for(src), + sizeof(dfsan_label) * (strlen(src) + 1)); + } + *ret_label = dst_label; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +long int __dfsw_strtol(const char *nptr, char **endptr, int base, + dfsan_label nptr_label, dfsan_label endptr_label, + dfsan_label base_label, dfsan_label *ret_label) { + char *tmp_endptr; + long int ret = strtol(nptr, &tmp_endptr, base); + if (endptr) { + *endptr = tmp_endptr; + } + if (tmp_endptr > nptr) { + // If *tmp_endptr is '\0' include its label as well. + *ret_label = dfsan_union( + base_label, + dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +double __dfsw_strtod(const char *nptr, char **endptr, + dfsan_label nptr_label, dfsan_label endptr_label, + dfsan_label *ret_label) { + char *tmp_endptr; + double ret = strtod(nptr, &tmp_endptr); + if (endptr) { + *endptr = tmp_endptr; + } + if (tmp_endptr > nptr) { + // If *tmp_endptr is '\0' include its label as well. + *ret_label = dfsan_read_label( + nptr, + tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)); + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +long long int __dfsw_strtoll(const char *nptr, char **endptr, int base, + dfsan_label nptr_label, dfsan_label endptr_label, + dfsan_label base_label, dfsan_label *ret_label) { + char *tmp_endptr; + long long int ret = strtoll(nptr, &tmp_endptr, base); + if (endptr) { + *endptr = tmp_endptr; + } + if (tmp_endptr > nptr) { + // If *tmp_endptr is '\0' include its label as well. + *ret_label = dfsan_union( + base_label, + dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base, + dfsan_label nptr_label, dfsan_label endptr_label, + dfsan_label base_label, dfsan_label *ret_label) { + char *tmp_endptr; + unsigned long int ret = strtoul(nptr, &tmp_endptr, base); + if (endptr) { + *endptr = tmp_endptr; + } + if (tmp_endptr > nptr) { + // If *tmp_endptr is '\0' include its label as well. + *ret_label = dfsan_union( + base_label, + dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr, + dfsan_label nptr_label, + int base, dfsan_label endptr_label, + dfsan_label base_label, + dfsan_label *ret_label) { + char *tmp_endptr; + long long unsigned int ret = strtoull(nptr, &tmp_endptr, base); + if (endptr) { + *endptr = tmp_endptr; + } + if (tmp_endptr > nptr) { + // If *tmp_endptr is '\0' include its label as well. + *ret_label = dfsan_union( + base_label, + dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1))); + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +time_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) { + time_t ret = time(t); + if (ret != (time_t) -1 && t) { + dfsan_set_label(0, t, sizeof(time_t)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label, + dfsan_label src_label, dfsan_label dst_label, + dfsan_label *ret_label) { + int ret = inet_pton(af, src, dst); + if (ret == 1) { + dfsan_set_label(dfsan_read_label(src, strlen(src) + 1), dst, + af == AF_INET ? sizeof(struct in_addr) : sizeof(in6_addr)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +struct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result, + dfsan_label timep_label, dfsan_label result_label, + dfsan_label *ret_label) { + struct tm *ret = localtime_r(timep, result); + if (ret) { + dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), result, + sizeof(struct tm)); + *ret_label = result_label; + } else { + *ret_label = 0; + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd, + char *buf, size_t buflen, struct passwd **result, + dfsan_label uid_label, dfsan_label pwd_label, + dfsan_label buf_label, dfsan_label buflen_label, + dfsan_label result_label, dfsan_label *ret_label) { + // Store the data in pwd, the strings referenced from pwd in buf, and the + // address of pwd in *result. On failure, NULL is stored in *result. + int ret = getpwuid_r(uid, pwd, buf, buflen, result); + if (ret == 0) { + dfsan_set_label(0, pwd, sizeof(struct passwd)); + dfsan_set_label(0, buf, strlen(buf) + 1); + } + *ret_label = 0; + dfsan_set_label(0, result, sizeof(struct passwd*)); + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout, + dfsan_label dfs_label, dfsan_label nfds_label, + dfsan_label timeout_label, dfsan_label *ret_label) { + int ret = poll(fds, nfds, timeout); + if (ret >= 0) { + for (; nfds > 0; --nfds) { + dfsan_set_label(0, &fds[nfds - 1].revents, sizeof(fds[nfds - 1].revents)); + } + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds, + fd_set *exceptfds, struct timeval *timeout, + dfsan_label nfds_label, dfsan_label readfds_label, + dfsan_label writefds_label, dfsan_label exceptfds_label, + dfsan_label timeout_label, dfsan_label *ret_label) { + int ret = select(nfds, readfds, writefds, exceptfds, timeout); + // Clear everything (also on error) since their content is either set or + // undefined. + if (readfds) { + dfsan_set_label(0, readfds, sizeof(fd_set)); + } + if (writefds) { + dfsan_set_label(0, writefds, sizeof(fd_set)); + } + if (exceptfds) { + dfsan_set_label(0, exceptfds, sizeof(fd_set)); + } + dfsan_set_label(0, timeout, sizeof(struct timeval)); + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask, + dfsan_label pid_label, + dfsan_label cpusetsize_label, + dfsan_label mask_label, dfsan_label *ret_label) { + int ret = sched_getaffinity(pid, cpusetsize, mask); + if (ret == 0) { + dfsan_set_label(0, mask, cpusetsize); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_sigemptyset(sigset_t *set, dfsan_label set_label, + dfsan_label *ret_label) { + int ret = sigemptyset(set); + dfsan_set_label(0, set, sizeof(sigset_t)); + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_sigaction(int signum, const struct sigaction *act, + struct sigaction *oldact, dfsan_label signum_label, + dfsan_label act_label, dfsan_label oldact_label, + dfsan_label *ret_label) { + int ret = sigaction(signum, act, oldact); + if (oldact) { + dfsan_set_label(0, oldact, sizeof(struct sigaction)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz, + dfsan_label tv_label, dfsan_label tz_label, + dfsan_label *ret_label) { + int ret = gettimeofday(tv, tz); + if (tv) { + dfsan_set_label(0, tv, sizeof(struct timeval)); + } + if (tz) { + dfsan_set_label(0, tz, sizeof(struct timezone)); + } + *ret_label = 0; + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n, + dfsan_label s_label, + dfsan_label c_label, + dfsan_label n_label, + dfsan_label *ret_label) { + void *ret = memchr(s, c, n); + if (flags().strict_data_dependencies) { + *ret_label = ret ? s_label : 0; + } else { + size_t len = + ret ? reinterpret_cast(ret) - reinterpret_cast(s) + 1 + : n; + *ret_label = + dfsan_union(dfsan_read_label(s, len), dfsan_union(s_label, c_label)); + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c, + dfsan_label s_label, + dfsan_label c_label, + dfsan_label *ret_label) { + char *ret = strrchr(s, c); + if (flags().strict_data_dependencies) { + *ret_label = ret ? s_label : 0; + } else { + *ret_label = + dfsan_union(dfsan_read_label(s, strlen(s) + 1), + dfsan_union(s_label, c_label)); + } + + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle, + dfsan_label haystack_label, + dfsan_label needle_label, + dfsan_label *ret_label) { + char *ret = strstr(haystack, needle); + if (flags().strict_data_dependencies) { + *ret_label = ret ? haystack_label : 0; + } else { + size_t len = ret ? ret + strlen(needle) - haystack : strlen(haystack) + 1; + *ret_label = + dfsan_union(dfsan_read_label(haystack, len), + dfsan_union(dfsan_read_label(needle, strlen(needle) + 1), + dfsan_union(haystack_label, needle_label))); + } + + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req, + struct timespec *rem, + dfsan_label req_label, + dfsan_label rem_label, + dfsan_label *ret_label) { + int ret = nanosleep(req, rem); + *ret_label = 0; + if (ret == -1) { + // Interrupted by a signal, rem is filled with the remaining time. + dfsan_set_label(0, rem, sizeof(struct timespec)); + } + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE int +__dfsw_socketpair(int domain, int type, int protocol, int sv[2], + dfsan_label domain_label, dfsan_label type_label, + dfsan_label protocol_label, dfsan_label sv_label, + dfsan_label *ret_label) { + int ret = socketpair(domain, type, protocol, sv); + *ret_label = 0; + if (ret == 0) { + dfsan_set_label(0, sv, sizeof(*sv) * 2); + } + return ret; +} + +// Type of the trampoline function passed to the custom version of +// dfsan_set_write_callback. +typedef void (*write_trampoline_t)( + void *callback, + int fd, const void *buf, ssize_t count, + dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label); + +// Calls to dfsan_set_write_callback() set the values in this struct. +// Calls to the custom version of write() read (and invoke) them. +static struct { + write_trampoline_t write_callback_trampoline = nullptr; + void *write_callback = nullptr; +} write_callback_info; + +SANITIZER_INTERFACE_ATTRIBUTE void +__dfsw_dfsan_set_write_callback( + write_trampoline_t write_callback_trampoline, + void *write_callback, + dfsan_label write_callback_label, + dfsan_label *ret_label) { + write_callback_info.write_callback_trampoline = write_callback_trampoline; + write_callback_info.write_callback = write_callback; +} + +SANITIZER_INTERFACE_ATTRIBUTE int +__dfsw_write(int fd, const void *buf, size_t count, + dfsan_label fd_label, dfsan_label buf_label, + dfsan_label count_label, dfsan_label *ret_label) { + if (write_callback_info.write_callback) { + write_callback_info.write_callback_trampoline( + write_callback_info.write_callback, + fd, buf, count, + fd_label, buf_label, count_label); + } + + *ret_label = 0; + return write(fd, buf, count); +} +} // namespace __dfsan + +// Type used to extract a dfsan_label with va_arg() +typedef int dfsan_label_va; + +// Formats a chunk either a constant string or a single format directive (e.g., +// '%.3f'). +struct Formatter { + Formatter(char *str_, const char *fmt_, size_t size_) + : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_), + width(-1) {} + + int format() { + char *tmp_fmt = build_format_string(); + int retval = + snprintf(str + str_off, str_off < size ? size - str_off : 0, tmp_fmt, + 0 /* used only to avoid warnings */); + free(tmp_fmt); + return retval; + } + + template int format(T arg) { + char *tmp_fmt = build_format_string(); + int retval; + if (width >= 0) { + retval = snprintf(str + str_off, str_off < size ? size - str_off : 0, + tmp_fmt, width, arg); + } else { + retval = snprintf(str + str_off, str_off < size ? size - str_off : 0, + tmp_fmt, arg); + } + free(tmp_fmt); + return retval; + } + + char *build_format_string() { + size_t fmt_size = fmt_cur - fmt_start + 1; + char *new_fmt = (char *)malloc(fmt_size + 1); + assert(new_fmt); + internal_memcpy(new_fmt, fmt_start, fmt_size); + new_fmt[fmt_size] = '\0'; + return new_fmt; + } + + char *str_cur() { return str + str_off; } + + size_t num_written_bytes(int retval) { + if (retval < 0) { + return 0; + } + + size_t num_avail = str_off < size ? size - str_off : 0; + if (num_avail == 0) { + return 0; + } + + size_t num_written = retval; + // A return value of {v,}snprintf of size or more means that the output was + // truncated. + if (num_written >= num_avail) { + num_written -= num_avail; + } + + return num_written; + } + + char *str; + size_t str_off; + size_t size; + const char *fmt_start; + const char *fmt_cur; + int width; +}; + +// Formats the input and propagates the input labels to the output. The output +// is stored in 'str'. 'size' bounds the number of output bytes. 'format' and +// 'ap' are the format string and the list of arguments for formatting. Returns +// the return value vsnprintf would return. +// +// The function tokenizes the format string in chunks representing either a +// constant string or a single format directive (e.g., '%.3f') and formats each +// chunk independently into the output string. This approach allows to figure +// out which bytes of the output string depends on which argument and thus to +// propagate labels more precisely. +// +// WARNING: This implementation does not support conversion specifiers with +// positional arguments. +static int format_buffer(char *str, size_t size, const char *fmt, + dfsan_label *va_labels, dfsan_label *ret_label, + va_list ap) { + Formatter formatter(str, fmt, size); + + while (*formatter.fmt_cur) { + formatter.fmt_start = formatter.fmt_cur; + formatter.width = -1; + int retval = 0; + + if (*formatter.fmt_cur != '%') { + // Ordinary character. Consume all the characters until a '%' or the end + // of the string. + for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%'; + ++formatter.fmt_cur) {} + retval = formatter.format(); + dfsan_set_label(0, formatter.str_cur(), + formatter.num_written_bytes(retval)); + } else { + // Conversion directive. Consume all the characters until a conversion + // specifier or the end of the string. + bool end_fmt = false; + for (; *formatter.fmt_cur && !end_fmt; ) { + switch (*++formatter.fmt_cur) { + case 'd': + case 'i': + case 'o': + case 'u': + case 'x': + case 'X': + switch (*(formatter.fmt_cur - 1)) { + case 'h': + // Also covers the 'hh' case (since the size of the arg is still + // an int). + retval = formatter.format(va_arg(ap, int)); + break; + case 'l': + if (formatter.fmt_cur - formatter.fmt_start >= 2 && + *(formatter.fmt_cur - 2) == 'l') { + retval = formatter.format(va_arg(ap, long long int)); + } else { + retval = formatter.format(va_arg(ap, long int)); + } + break; + case 'q': + retval = formatter.format(va_arg(ap, long long int)); + break; + case 'j': + retval = formatter.format(va_arg(ap, intmax_t)); + break; + case 'z': + case 't': + retval = formatter.format(va_arg(ap, size_t)); + break; + default: + retval = formatter.format(va_arg(ap, int)); + } + dfsan_set_label(*va_labels++, formatter.str_cur(), + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + + case 'a': + case 'A': + case 'e': + case 'E': + case 'f': + case 'F': + case 'g': + case 'G': + if (*(formatter.fmt_cur - 1) == 'L') { + retval = formatter.format(va_arg(ap, long double)); + } else { + retval = formatter.format(va_arg(ap, double)); + } + dfsan_set_label(*va_labels++, formatter.str_cur(), + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + + case 'c': + retval = formatter.format(va_arg(ap, int)); + dfsan_set_label(*va_labels++, formatter.str_cur(), + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + + case 's': { + char *arg = va_arg(ap, char *); + retval = formatter.format(arg); + va_labels++; + internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg), + sizeof(dfsan_label) * + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + } + + case 'p': + retval = formatter.format(va_arg(ap, void *)); + dfsan_set_label(*va_labels++, formatter.str_cur(), + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + + case 'n': { + int *ptr = va_arg(ap, int *); + *ptr = (int)formatter.str_off; + va_labels++; + dfsan_set_label(0, ptr, sizeof(ptr)); + end_fmt = true; + break; + } + + case '%': + retval = formatter.format(); + dfsan_set_label(0, formatter.str_cur(), + formatter.num_written_bytes(retval)); + end_fmt = true; + break; + + case '*': + formatter.width = va_arg(ap, int); + va_labels++; + break; + + default: + break; + } + } + } + + if (retval < 0) { + return retval; + } + + formatter.fmt_cur++; + formatter.str_off += retval; + } + + *ret_label = 0; + + // Number of bytes written in total. + return formatter.str_off; +} + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label, + dfsan_label format_label, dfsan_label *va_labels, + dfsan_label *ret_label, ...) { + va_list ap; + va_start(ap, ret_label); + int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, ap); + va_end(ap); + return ret; +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __dfsw_snprintf(char *str, size_t size, const char *format, + dfsan_label str_label, dfsan_label size_label, + dfsan_label format_label, dfsan_label *va_labels, + dfsan_label *ret_label, ...) { + va_list ap; + va_start(ap, ret_label); + int ret = format_buffer(str, size, format, va_labels, ret_label, ap); + va_end(ap); + return ret; +} + +// Default empty implementations (weak). Users should redefine them. +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *, + u32 *) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} + +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp1, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp2, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp4, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp8, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp1, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp2, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp4, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp8, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_switch, void) {} +} // extern "C" diff --git a/lib/dfsan/dfsan_interceptors.cc b/lib/dfsan/dfsan_interceptors.cc deleted file mode 100644 index f4b4babc65c2..000000000000 --- a/lib/dfsan/dfsan_interceptors.cc +++ /dev/null @@ -1,45 +0,0 @@ -//===-- dfsan_interceptors.cc ---------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of DataFlowSanitizer. -// -// Interceptors for standard library functions. -//===----------------------------------------------------------------------===// - -#include "dfsan/dfsan.h" -#include "interception/interception.h" -#include "sanitizer_common/sanitizer_common.h" - -using namespace __sanitizer; - -INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF_T offset) { - void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); - if (res != (void*)-1) - dfsan_set_label(0, res, RoundUpTo(length, GetPageSize())); - return res; -} - -INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF64_T offset) { - void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); - if (res != (void*)-1) - dfsan_set_label(0, res, RoundUpTo(length, GetPageSize())); - return res; -} - -namespace __dfsan { -void InitializeInterceptors() { - static int inited = 0; - CHECK_EQ(inited, 0); - - INTERCEPT_FUNCTION(mmap); - INTERCEPT_FUNCTION(mmap64); - inited = 1; -} -} // namespace __dfsan diff --git a/lib/dfsan/dfsan_interceptors.cpp b/lib/dfsan/dfsan_interceptors.cpp new file mode 100644 index 000000000000..673171c46f5a --- /dev/null +++ b/lib/dfsan/dfsan_interceptors.cpp @@ -0,0 +1,45 @@ +//===-- dfsan_interceptors.cpp --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataFlowSanitizer. +// +// Interceptors for standard library functions. +//===----------------------------------------------------------------------===// + +#include "dfsan/dfsan.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_common.h" + +using namespace __sanitizer; + +INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, + int fd, OFF_T offset) { + void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); + if (res != (void*)-1) + dfsan_set_label(0, res, RoundUpTo(length, GetPageSize())); + return res; +} + +INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, + int fd, OFF64_T offset) { + void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); + if (res != (void*)-1) + dfsan_set_label(0, res, RoundUpTo(length, GetPageSize())); + return res; +} + +namespace __dfsan { +void InitializeInterceptors() { + static int inited = 0; + CHECK_EQ(inited, 0); + + INTERCEPT_FUNCTION(mmap); + INTERCEPT_FUNCTION(mmap64); + inited = 1; +} +} // namespace __dfsan diff --git a/lib/fuzzer/FuzzerBuiltinsMsvc.h b/lib/fuzzer/FuzzerBuiltinsMsvc.h index 82709cfe7b40..bc65c60098be 100644 --- a/lib/fuzzer/FuzzerBuiltinsMsvc.h +++ b/lib/fuzzer/FuzzerBuiltinsMsvc.h @@ -15,9 +15,6 @@ #include "FuzzerDefs.h" #if LIBFUZZER_MSVC -#if !defined(_M_ARM) && !defined(_M_X64) -#error "_BitScanReverse64 unavailable on this platform so MSVC is unsupported." -#endif #include #include #include @@ -40,7 +37,18 @@ inline uint64_t Bswap(uint64_t x) { return _byteswap_uint64(x); } // outside of Windows. inline uint32_t Clzll(uint64_t X) { unsigned long LeadZeroIdx = 0; + +#if !defined(_M_ARM) && !defined(_M_X64) + // Scan the high 32 bits. + if (_BitScanReverse(&LeadZeroIdx, static_cast(X >> 32))) + return static_cast(63 - (LeadZeroIdx + 32)); // Create a bit offset from the MSB. + // Scan the low 32 bits. + if (_BitScanReverse(&LeadZeroIdx, static_cast(X))) + return static_cast(63 - LeadZeroIdx); + +#else if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx; +#endif return 64; } @@ -50,7 +58,13 @@ inline uint32_t Clz(uint32_t X) { return 32; } -inline int Popcountll(unsigned long long X) { return __popcnt64(X); } +inline int Popcountll(unsigned long long X) { +#if !defined(_M_ARM) && !defined(_M_X64) + return __popcnt(X) + __popcnt(X >> 32); +#else + return __popcnt64(X); +#endif +} } // namespace fuzzer diff --git a/lib/fuzzer/FuzzerDefs.h b/lib/fuzzer/FuzzerDefs.h index 320b37d5f8e3..5dc2d8e1ac09 100644 --- a/lib/fuzzer/FuzzerDefs.h +++ b/lib/fuzzer/FuzzerDefs.h @@ -15,10 +15,11 @@ #include #include #include +#include +#include #include #include -#include -#include + // Platform detection. #ifdef __linux__ diff --git a/lib/fuzzer/FuzzerDriver.cpp b/lib/fuzzer/FuzzerDriver.cpp index 54c7ff079585..44c90655b932 100644 --- a/lib/fuzzer/FuzzerDriver.cpp +++ b/lib/fuzzer/FuzzerDriver.cpp @@ -708,7 +708,6 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Options.FeaturesDir = Flags.features_dir; if (Flags.collect_data_flow) Options.CollectDataFlow = Flags.collect_data_flow; - Options.LazyCounters = Flags.lazy_counters; if (Flags.stop_file) Options.StopFile = Flags.stop_file; diff --git a/lib/fuzzer/FuzzerExtFunctions.def b/lib/fuzzer/FuzzerExtFunctions.def index 41fa0fd2b748..51edf8444e94 100644 --- a/lib/fuzzer/FuzzerExtFunctions.def +++ b/lib/fuzzer/FuzzerExtFunctions.def @@ -16,12 +16,12 @@ // Optional user functions EXT_FUNC(LLVMFuzzerInitialize, int, (int *argc, char ***argv), false); EXT_FUNC(LLVMFuzzerCustomMutator, size_t, - (uint8_t * Data, size_t Size, size_t MaxSize, unsigned int Seed), + (uint8_t *Data, size_t Size, size_t MaxSize, unsigned int Seed), false); EXT_FUNC(LLVMFuzzerCustomCrossOver, size_t, - (const uint8_t * Data1, size_t Size1, - const uint8_t * Data2, size_t Size2, - uint8_t * Out, size_t MaxOutSize, unsigned int Seed), + (const uint8_t *Data1, size_t Size1, + const uint8_t *Data2, size_t Size2, + uint8_t *Out, size_t MaxOutSize, unsigned int Seed), false); // Sanitizer functions @@ -33,8 +33,9 @@ EXT_FUNC(__sanitizer_install_malloc_and_free_hooks, int, (void (*malloc_hook)(const volatile void *, size_t), void (*free_hook)(const volatile void *)), false); +EXT_FUNC(__sanitizer_log_write, void, (const char *buf, size_t len), false); EXT_FUNC(__sanitizer_purge_allocator, void, (), false); -EXT_FUNC(__sanitizer_print_memory_profile, int, (size_t, size_t), false); +EXT_FUNC(__sanitizer_print_memory_profile, void, (size_t, size_t), false); EXT_FUNC(__sanitizer_print_stack_trace, void, (), true); EXT_FUNC(__sanitizer_symbolize_pc, void, (void *, const char *fmt, char *out_buf, size_t out_buf_size), false); diff --git a/lib/fuzzer/FuzzerFlags.def b/lib/fuzzer/FuzzerFlags.def index a11cfe4405f3..0e19a9cde6ca 100644 --- a/lib/fuzzer/FuzzerFlags.def +++ b/lib/fuzzer/FuzzerFlags.def @@ -123,9 +123,6 @@ FUZZER_FLAG_INT(handle_term, 1, "If 1, try to intercept SIGTERM.") FUZZER_FLAG_INT(handle_xfsz, 1, "If 1, try to intercept SIGXFSZ.") FUZZER_FLAG_INT(handle_usr1, 1, "If 1, try to intercept SIGUSR1.") FUZZER_FLAG_INT(handle_usr2, 1, "If 1, try to intercept SIGUSR2.") -FUZZER_FLAG_INT(lazy_counters, 0, "If 1, a performance optimization is" - "enabled for the 8bit inline counters. " - "Requires that libFuzzer successfully installs its SEGV handler") FUZZER_FLAG_INT(close_fd_mask, 0, "If 1, close stdout at startup; " "if 2, close stderr; if 3, close both. " "Be careful, this will also close e.g. stderr of asan.") diff --git a/lib/fuzzer/FuzzerInternal.h b/lib/fuzzer/FuzzerInternal.h index f2a4c437de38..31096ce804bc 100644 --- a/lib/fuzzer/FuzzerInternal.h +++ b/lib/fuzzer/FuzzerInternal.h @@ -98,7 +98,8 @@ private: void ReportNewCoverage(InputInfo *II, const Unit &U); void PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size); void WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix); - void PrintStats(const char *Where, const char *End = "\n", size_t Units = 0); + void PrintStats(const char *Where, const char *End = "\n", size_t Units = 0, + size_t Features = 0); void PrintStatusForNewUnit(const Unit &U, const char *Text); void CheckExitOnSrcPosOrItem(); diff --git a/lib/fuzzer/FuzzerLoop.cpp b/lib/fuzzer/FuzzerLoop.cpp index f773f9a13398..451a4c173167 100644 --- a/lib/fuzzer/FuzzerLoop.cpp +++ b/lib/fuzzer/FuzzerLoop.cpp @@ -273,9 +273,9 @@ void Fuzzer::InterruptCallback() { NO_SANITIZE_MEMORY void Fuzzer::AlarmCallback() { assert(Options.UnitTimeoutSec > 0); - // In Windows Alarm callback is executed by a different thread. + // In Windows and Fuchsia, Alarm callback is executed by a different thread. // NetBSD's current behavior needs this change too. -#if !LIBFUZZER_WINDOWS && !LIBFUZZER_NETBSD +#if !LIBFUZZER_WINDOWS && !LIBFUZZER_NETBSD && !LIBFUZZER_FUCHSIA if (!InFuzzingThread()) return; #endif @@ -319,14 +319,15 @@ void Fuzzer::RssLimitCallback() { _Exit(Options.OOMExitCode); // Stop right now. } -void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) { +void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units, + size_t Features) { size_t ExecPerSec = execPerSec(); if (!Options.Verbosity) return; Printf("#%zd\t%s", TotalNumberOfRuns, Where); if (size_t N = TPC.GetTotalPCCoverage()) Printf(" cov: %zd", N); - if (size_t N = Corpus.NumFeatures()) + if (size_t N = Features ? Features : Corpus.NumFeatures()) Printf(" ft: %zd", N); if (!Corpus.empty()) { Printf(" corp: %zd", Corpus.NumActiveUnits()); @@ -512,10 +513,12 @@ size_t Fuzzer::GetCurrentUnitInFuzzingThead(const uint8_t **Data) const { } void Fuzzer::CrashOnOverwrittenData() { - Printf("==%d== ERROR: libFuzzer: fuzz target overwrites it's const input\n", + Printf("==%d== ERROR: libFuzzer: fuzz target overwrites its const input\n", GetPid()); + PrintStackTrace(); + Printf("SUMMARY: libFuzzer: overwrites-const-input\n"); DumpCurrentUnit("crash-"); - Printf("SUMMARY: libFuzzer: out-of-memory\n"); + PrintFinalStats(); _Exit(Options.ErrorExitCode); // Stop right now. } @@ -739,10 +742,6 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector &CorporaFiles) { uint8_t dummy = 0; ExecuteCallback(&dummy, 0); - // Protect lazy counters here, after the once-init code has been executed. - if (Options.LazyCounters) - TPC.ProtectLazyCounters(); - if (CorporaFiles.empty()) { Printf("INFO: A corpus is not provided, starting from an empty corpus\n"); Unit U({'\n'}); // Valid ASCII input. diff --git a/lib/fuzzer/FuzzerMerge.cpp b/lib/fuzzer/FuzzerMerge.cpp index 75b2b5d59b9c..e3ad8b3851e7 100644 --- a/lib/fuzzer/FuzzerMerge.cpp +++ b/lib/fuzzer/FuzzerMerge.cpp @@ -19,6 +19,7 @@ #include #include #include +#include namespace fuzzer { @@ -210,6 +211,9 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) { std::ofstream OF(CFPath, std::ofstream::out | std::ofstream::app); Set AllFeatures; + auto PrintStatsWrapper = [this, &AllFeatures](const char* Where) { + this->PrintStats(Where, "\n", 0, AllFeatures.size()); + }; Set AllPCs; for (size_t i = M.FirstNotProcessedFile; i < M.Files.size(); i++) { Fuzzer::MaybeExitGracefully(); @@ -218,7 +222,7 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) { U.resize(MaxInputLen); U.shrink_to_fit(); } - std::ostringstream StartedLine; + // Write the pre-run marker. OF << "STARTED " << i << " " << U.size() << "\n"; OF.flush(); // Flush is important since Command::Execute may crash. @@ -238,7 +242,9 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) { TPC.UpdateObservedPCs(); // Show stats. if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1))) - PrintStats("pulse "); + PrintStatsWrapper("pulse "); + if (TotalNumberOfRuns == M.NumFilesInFirstCorpus) + PrintStatsWrapper("LOADED"); // Write the post-run marker and the coverage. OF << "FT " << i; for (size_t F : UniqFeatures) @@ -252,25 +258,42 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) { OF << "\n"; OF.flush(); } - PrintStats("DONE "); + PrintStatsWrapper("DONE "); } -static void WriteNewControlFile(const std::string &CFPath, - const Vector &OldCorpus, - const Vector &NewCorpus) { - RemoveFile(CFPath); - std::ofstream ControlFile(CFPath); - ControlFile << (OldCorpus.size() + NewCorpus.size()) << "\n"; - ControlFile << OldCorpus.size() << "\n"; +static size_t WriteNewControlFile(const std::string &CFPath, + const Vector &OldCorpus, + const Vector &NewCorpus, + const Vector &KnownFiles) { + std::unordered_set FilesToSkip; + for (auto &SF: KnownFiles) + FilesToSkip.insert(SF.Name); + + Vector FilesToUse; + auto MaybeUseFile = [=, &FilesToUse](std::string Name) { + if (FilesToSkip.find(Name) == FilesToSkip.end()) + FilesToUse.push_back(Name); + }; for (auto &SF: OldCorpus) - ControlFile << SF.File << "\n"; + MaybeUseFile(SF.File); + auto FilesToUseFromOldCorpus = FilesToUse.size(); for (auto &SF: NewCorpus) - ControlFile << SF.File << "\n"; + MaybeUseFile(SF.File); + + RemoveFile(CFPath); + std::ofstream ControlFile(CFPath); + ControlFile << FilesToUse.size() << "\n"; + ControlFile << FilesToUseFromOldCorpus << "\n"; + for (auto &FN: FilesToUse) + ControlFile << FN << "\n"; + if (!ControlFile) { Printf("MERGE-OUTER: failed to write to the control file: %s\n", CFPath.c_str()); exit(1); } + + return FilesToUse.size(); } // Outer process. Does not call the target code and thus should not fail. @@ -286,12 +309,13 @@ void CrashResistantMerge(const Vector &Args, bool V /*Verbose*/) { if (NewCorpus.empty() && OldCorpus.empty()) return; // Nothing to merge. size_t NumAttempts = 0; + Vector KnownFiles; if (FileSize(CFPath)) { VPrintf(V, "MERGE-OUTER: non-empty control file provided: '%s'\n", CFPath.c_str()); Merger M; std::ifstream IF(CFPath); - if (M.Parse(IF, /*ParseCoverage=*/false)) { + if (M.Parse(IF, /*ParseCoverage=*/true)) { VPrintf(V, "MERGE-OUTER: control file ok, %zd files total," " first not processed file %zd\n", M.Files.size(), M.FirstNotProcessedFile); @@ -300,12 +324,25 @@ void CrashResistantMerge(const Vector &Args, "(merge has stumbled on it the last time)\n", M.LastFailure.c_str()); if (M.FirstNotProcessedFile >= M.Files.size()) { + // Merge has already been completed with the given merge control file. + if (M.Files.size() == OldCorpus.size() + NewCorpus.size()) { + VPrintf( + V, + "MERGE-OUTER: nothing to do, merge has been completed before\n"); + exit(0); + } + + // Number of input files likely changed, start merge from scratch, but + // reuse coverage information from the given merge control file. VPrintf( - V, "MERGE-OUTER: nothing to do, merge has been completed before\n"); - exit(0); + V, + "MERGE-OUTER: starting merge from scratch, but reusing coverage " + "information from the given control file\n"); + KnownFiles = M.Files; + } else { + // There is a merge in progress, continue. + NumAttempts = M.Files.size() - M.FirstNotProcessedFile; } - - NumAttempts = M.Files.size() - M.FirstNotProcessedFile; } else { VPrintf(V, "MERGE-OUTER: bad control file, will overwrite it\n"); } @@ -313,10 +350,11 @@ void CrashResistantMerge(const Vector &Args, if (!NumAttempts) { // The supplied control file is empty or bad, create a fresh one. - NumAttempts = OldCorpus.size() + NewCorpus.size(); - VPrintf(V, "MERGE-OUTER: %zd files, %zd in the initial corpus\n", - NumAttempts, OldCorpus.size()); - WriteNewControlFile(CFPath, OldCorpus, NewCorpus); + VPrintf(V, "MERGE-OUTER: " + "%zd files, %zd in the initial corpus, %zd processed earlier\n", + OldCorpus.size() + NewCorpus.size(), OldCorpus.size(), + KnownFiles.size()); + NumAttempts = WriteNewControlFile(CFPath, OldCorpus, NewCorpus, KnownFiles); } // Execute the inner process until it passes. @@ -353,6 +391,8 @@ void CrashResistantMerge(const Vector &Args, VPrintf(V, "MERGE-OUTER: consumed %zdMb (%zdMb rss) to parse the control file\n", M.ApproximateMemoryConsumption() >> 20, GetPeakRSSMb()); + + M.Files.insert(M.Files.end(), KnownFiles.begin(), KnownFiles.end()); M.Merge(InitialFeatures, NewFeatures, InitialCov, NewCov, NewFiles); VPrintf(V, "MERGE-OUTER: %zd new files with %zd new features added; " "%zd new coverage edges\n", diff --git a/lib/fuzzer/FuzzerOptions.h b/lib/fuzzer/FuzzerOptions.h index ad3df015bc77..beecc980380b 100644 --- a/lib/fuzzer/FuzzerOptions.h +++ b/lib/fuzzer/FuzzerOptions.h @@ -75,7 +75,6 @@ struct FuzzingOptions { bool HandleXfsz = false; bool HandleUsr1 = false; bool HandleUsr2 = false; - bool LazyCounters = false; }; } // namespace fuzzer diff --git a/lib/fuzzer/FuzzerTracePC.cpp b/lib/fuzzer/FuzzerTracePC.cpp index 4a1308de5504..f03be7a39502 100644 --- a/lib/fuzzer/FuzzerTracePC.cpp +++ b/lib/fuzzer/FuzzerTracePC.cpp @@ -67,45 +67,6 @@ void TracePC::HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop) { NumInline8bitCounters += M.Size(); } -// Mark all full page counter regions as PROT_NONE and set Enabled=false. -// The first time the instrumented code hits such a protected/disabled -// counter region we should catch a SEGV and call UnprotectLazyCounters, -// which will mark the page as PROT_READ|PROT_WRITE and set Enabled=true. -// -// Whenever other functions iterate over the counters they should ignore -// regions with Enabled=false. -void TracePC::ProtectLazyCounters() { - size_t NumPagesProtected = 0; - IterateCounterRegions([&](Module::Region &R) { - if (!R.OneFullPage) return; - if (Mprotect(R.Start, R.Stop - R.Start, false)) { - R.Enabled = false; - NumPagesProtected++; - } - }); - if (NumPagesProtected) - Printf("INFO: %zd pages of counters where protected;" - " libFuzzer's SEGV handler must be installed\n", - NumPagesProtected); -} - -bool TracePC::UnprotectLazyCounters(void *CounterPtr) { - // Printf("UnprotectLazyCounters: %p\n", CounterPtr); - if (!CounterPtr) - return false; - bool Done = false; - uint8_t *Addr = reinterpret_cast(CounterPtr); - IterateCounterRegions([&](Module::Region &R) { - if (!R.OneFullPage || R.Enabled || Done) return; - if (Addr >= R.Start && Addr < R.Stop) - if (Mprotect(R.Start, R.Stop - R.Start, true)) { - R.Enabled = true; - Done = true; - } - }); - return Done; -} - void TracePC::HandlePCsInit(const uintptr_t *Start, const uintptr_t *Stop) { const PCTableEntry *B = reinterpret_cast(Start); const PCTableEntry *E = reinterpret_cast(Stop); @@ -173,7 +134,7 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) { } /// \return the address of the next instruction. -/// Note: the logic is copied from `sanitizer_common/sanitizer_stacktrace.cc` +/// Note: the logic is copied from `sanitizer_common/sanitizer_stacktrace.cpp` ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) { #if defined(__mips__) return PC + 8; diff --git a/lib/fuzzer/FuzzerTracePC.h b/lib/fuzzer/FuzzerTracePC.h index 4f5ebeb047a1..501f3b544971 100644 --- a/lib/fuzzer/FuzzerTracePC.h +++ b/lib/fuzzer/FuzzerTracePC.h @@ -119,9 +119,6 @@ class TracePC { void SetFocusFunction(const std::string &FuncName); bool ObservedFocusFunction(); - void ProtectLazyCounters(); - bool UnprotectLazyCounters(void *CounterPtr); - struct PCTableEntry { uintptr_t PC, PCFlags; }; diff --git a/lib/fuzzer/FuzzerUtil.h b/lib/fuzzer/FuzzerUtil.h index 0a127911df3c..85c5571d684f 100644 --- a/lib/fuzzer/FuzzerUtil.h +++ b/lib/fuzzer/FuzzerUtil.h @@ -52,8 +52,6 @@ void SetSignalHandler(const FuzzingOptions& Options); void SleepSeconds(int Seconds); -bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite); - unsigned long GetPid(); size_t GetPeakRSSMb(); diff --git a/lib/fuzzer/FuzzerUtilFuchsia.cpp b/lib/fuzzer/FuzzerUtilFuchsia.cpp index 1f04b33c154e..79fd950bbf97 100644 --- a/lib/fuzzer/FuzzerUtilFuchsia.cpp +++ b/lib/fuzzer/FuzzerUtilFuchsia.cpp @@ -305,12 +305,19 @@ void CrashHandler(zx_handle_t *Event) { } // namespace -bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) { - return false; // UNIMPLEMENTED -} - // Platform specific functions. void SetSignalHandler(const FuzzingOptions &Options) { + // Make sure information from libFuzzer and the sanitizers are easy to + // reassemble. `__sanitizer_log_write` has the added benefit of ensuring the + // DSO map is always available for the symbolizer. + // A uint64_t fits in 20 chars, so 64 is plenty. + char Buf[64]; + memset(Buf, 0, sizeof(Buf)); + snprintf(Buf, sizeof(Buf), "==%lu== INFO: libFuzzer starting.\n", GetPid()); + if (EF->__sanitizer_log_write) + __sanitizer_log_write(Buf, sizeof(Buf)); + Printf("%s", Buf); + // Set up alarm handler if needed. if (Options.UnitTimeoutSec > 0) { std::thread T(AlarmHandler, Options.UnitTimeoutSec / 2 + 1); @@ -400,13 +407,14 @@ int ExecuteCommand(const Command &Cmd) { // that lacks a mutable working directory. Fortunately, when this is the case // a mutable output directory must be specified using "-artifact_prefix=...", // so write the log file(s) there. + // However, we don't want to apply this logic for absolute paths. int FdOut = STDOUT_FILENO; if (Cmd.hasOutputFile()) { - std::string Path; - if (Cmd.hasFlag("artifact_prefix")) - Path = Cmd.getFlagValue("artifact_prefix") + "/" + Cmd.getOutputFile(); - else - Path = Cmd.getOutputFile(); + std::string Path = Cmd.getOutputFile(); + bool IsAbsolutePath = Path.length() > 1 && Path[0] == '/'; + if (!IsAbsolutePath && Cmd.hasFlag("artifact_prefix")) + Path = Cmd.getFlagValue("artifact_prefix") + "/" + Path; + FdOut = open(Path.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0); if (FdOut == -1) { Printf("libFuzzer: failed to open %s: %s\n", Path.c_str(), diff --git a/lib/fuzzer/FuzzerUtilPosix.cpp b/lib/fuzzer/FuzzerUtilPosix.cpp index 110785d87413..cefe7ae181e7 100644 --- a/lib/fuzzer/FuzzerUtilPosix.cpp +++ b/lib/fuzzer/FuzzerUtilPosix.cpp @@ -37,7 +37,6 @@ static void (*upstream_segv_handler)(int, siginfo_t *, void *); static void SegvHandler(int sig, siginfo_t *si, void *ucontext) { assert(si->si_signo == SIGSEGV); - if (TPC.UnprotectLazyCounters(si->si_addr)) return; if (upstream_segv_handler) return upstream_segv_handler(sig, si, ucontext); Fuzzer::StaticCrashSignalCallback(); @@ -98,11 +97,6 @@ void SetTimer(int Seconds) { SetSigaction(SIGALRM, AlarmHandler); } -bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) { - return 0 == mprotect(Ptr, Size, - AllowReadWrite ? (PROT_READ | PROT_WRITE) : PROT_NONE); -} - void SetSignalHandler(const FuzzingOptions& Options) { if (Options.UnitTimeoutSec > 0) SetTimer(Options.UnitTimeoutSec / 2 + 1); diff --git a/lib/fuzzer/FuzzerUtilWindows.cpp b/lib/fuzzer/FuzzerUtilWindows.cpp index 074e1eb42309..ed90044c3f83 100644 --- a/lib/fuzzer/FuzzerUtilWindows.cpp +++ b/lib/fuzzer/FuzzerUtilWindows.cpp @@ -111,10 +111,6 @@ static TimerQ Timer; static void CrashHandler(int) { Fuzzer::StaticCrashSignalCallback(); } -bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) { - return false; // UNIMPLEMENTED -} - void SetSignalHandler(const FuzzingOptions& Options) { HandlerOpt = &Options; diff --git a/lib/fuzzer/utils/FuzzedDataProvider.h b/lib/fuzzer/utils/FuzzedDataProvider.h deleted file mode 100644 index 1b5b4bb01269..000000000000 --- a/lib/fuzzer/utils/FuzzedDataProvider.h +++ /dev/null @@ -1,245 +0,0 @@ -//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// A single header library providing an utility class to break up an array of -// bytes. Whenever run on the same input, provides the same output, as long as -// its methods are called in the same order, with the same arguments. -//===----------------------------------------------------------------------===// - -#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ -#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -class FuzzedDataProvider { -public: - // |data| is an array of length |size| that the FuzzedDataProvider wraps to - // provide more granular access. |data| must outlive the FuzzedDataProvider. - FuzzedDataProvider(const uint8_t *data, size_t size) - : data_ptr_(data), remaining_bytes_(size) {} - ~FuzzedDataProvider() = default; - - // Returns a std::vector containing |num_bytes| of input data. If fewer than - // |num_bytes| of data remain, returns a shorter std::vector containing all - // of the data that's left. Can be used with any byte sized type, such as - // char, unsigned char, uint8_t, etc. - template std::vector ConsumeBytes(size_t num_bytes) { - num_bytes = std::min(num_bytes, remaining_bytes_); - return ConsumeBytes(num_bytes, num_bytes); - } - - // Similar to |ConsumeBytes|, but also appends the terminator value at the end - // of the resulting vector. Useful, when a mutable null-terminated C-string is - // needed, for example. But that is a rare case. Better avoid it, if possible, - // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods. - template - std::vector ConsumeBytesWithTerminator(size_t num_bytes, - T terminator = 0) { - num_bytes = std::min(num_bytes, remaining_bytes_); - std::vector result = ConsumeBytes(num_bytes + 1, num_bytes); - result.back() = terminator; - return result; - } - - // Returns a std::string containing |num_bytes| of input data. Using this and - // |.c_str()| on the resulting string is the best way to get an immutable - // null-terminated C string. If fewer than |num_bytes| of data remain, returns - // a shorter std::string containing all of the data that's left. - std::string ConsumeBytesAsString(size_t num_bytes) { - static_assert(sizeof(std::string::value_type) == sizeof(uint8_t), - "ConsumeBytesAsString cannot convert the data to a string."); - - num_bytes = std::min(num_bytes, remaining_bytes_); - std::string result( - reinterpret_cast(data_ptr_), - num_bytes); - Advance(num_bytes); - return result; - } - - // Returns a number in the range [min, max] by consuming bytes from the - // input data. The value might not be uniformly distributed in the given - // range. If there's no input data left, always returns |min|. |min| must - // be less than or equal to |max|. - template T ConsumeIntegralInRange(T min, T max) { - static_assert(std::is_integral::value, "An integral type is required."); - static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type."); - - if (min > max) - abort(); - - // Use the biggest type possible to hold the range and the result. - uint64_t range = static_cast(max) - min; - uint64_t result = 0; - size_t offset = 0; - - while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 && - remaining_bytes_ != 0) { - // Pull bytes off the end of the seed data. Experimentally, this seems to - // allow the fuzzer to more easily explore the input space. This makes - // sense, since it works by modifying inputs that caused new code to run, - // and this data is often used to encode length of data read by - // |ConsumeBytes|. Separating out read lengths makes it easier modify the - // contents of the data that is actually read. - --remaining_bytes_; - result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_]; - offset += CHAR_BIT; - } - - // Avoid division by 0, in case |range + 1| results in overflow. - if (range != std::numeric_limits::max()) - result = result % (range + 1); - - return static_cast(min + result); - } - - // Returns a std::string of length from 0 to |max_length|. When it runs out of - // input data, returns what remains of the input. Designed to be more stable - // with respect to a fuzzer inserting characters than just picking a random - // length and then consuming that many bytes with |ConsumeBytes|. - std::string ConsumeRandomLengthString(size_t max_length) { - // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\" - // followed by anything else to the end of the string. As a result of this - // logic, a fuzzer can insert characters into the string, and the string - // will be lengthened to include those new characters, resulting in a more - // stable fuzzer than picking the length of a string independently from - // picking its contents. - std::string result; - - // Reserve the anticipated capaticity to prevent several reallocations. - result.reserve(std::min(max_length, remaining_bytes_)); - for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) { - char next = ConvertUnsignedToSigned(data_ptr_[0]); - Advance(1); - if (next == '\\' && remaining_bytes_ != 0) { - next = ConvertUnsignedToSigned(data_ptr_[0]); - Advance(1); - if (next != '\\') - break; - } - result += next; - } - - result.shrink_to_fit(); - return result; - } - - // Returns a std::vector containing all remaining bytes of the input data. - template std::vector ConsumeRemainingBytes() { - return ConsumeBytes(remaining_bytes_); - } - - // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string - // object. - // Returns a std::vector containing all remaining bytes of the input data. - std::string ConsumeRemainingBytesAsString() { - return ConsumeBytesAsString(remaining_bytes_); - } - - // Returns a number in the range [Type's min, Type's max]. The value might - // not be uniformly distributed in the given range. If there's no input data - // left, always returns |min|. - template T ConsumeIntegral() { - return ConsumeIntegralInRange(std::numeric_limits::min(), - std::numeric_limits::max()); - } - - // Reads one byte and returns a bool, or false when no data remains. - bool ConsumeBool() { return 1 & ConsumeIntegral(); } - - // Returns a copy of a value selected from a fixed-size |array|. - template - T PickValueInArray(const T (&array)[size]) { - static_assert(size > 0, "The array must be non empty."); - return array[ConsumeIntegralInRange(0, size - 1)]; - } - - template - T PickValueInArray(std::initializer_list list) { - // static_assert(list.size() > 0, "The array must be non empty."); - return *(list.begin() + ConsumeIntegralInRange(0, list.size() - 1)); - } - - // Return an enum value. The enum must start at 0 and be contiguous. It must - // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as: - // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue }; - template T ConsumeEnum() { - static_assert(std::is_enum::value, "|T| must be an enum type."); - return static_cast(ConsumeIntegralInRange( - 0, static_cast(T::kMaxValue))); - } - - // Reports the remaining bytes available for fuzzed input. - size_t remaining_bytes() { return remaining_bytes_; } - -private: - FuzzedDataProvider(const FuzzedDataProvider &) = delete; - FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete; - - void Advance(size_t num_bytes) { - if (num_bytes > remaining_bytes_) - abort(); - - data_ptr_ += num_bytes; - remaining_bytes_ -= num_bytes; - } - - template - std::vector ConsumeBytes(size_t size, size_t num_bytes_to_consume) { - static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type."); - - // The point of using the size-based constructor below is to increase the - // odds of having a vector object with capacity being equal to the length. - // That part is always implementation specific, but at least both libc++ and - // libstdc++ allocate the requested number of bytes in that constructor, - // which seems to be a natural choice for other implementations as well. - // To increase the odds even more, we also call |shrink_to_fit| below. - std::vector result(size); - std::memcpy(result.data(), data_ptr_, num_bytes_to_consume); - Advance(num_bytes_to_consume); - - // Even though |shrink_to_fit| is also implementation specific, we expect it - // to provide an additional assurance in case vector's constructor allocated - // a buffer which is larger than the actual amount of data we put inside it. - result.shrink_to_fit(); - return result; - } - - template TS ConvertUnsignedToSigned(TU value) { - static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types."); - static_assert(!std::numeric_limits::is_signed, - "Source type must be unsigned."); - - // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream. - if (std::numeric_limits::is_modulo) - return static_cast(value); - - // Avoid using implementation-defined unsigned to signer conversions. - // To learn more, see https://stackoverflow.com/questions/13150449. - if (value <= std::numeric_limits::max()) - return static_cast(value); - else { - constexpr auto TS_min = std::numeric_limits::min(); - return TS_min + static_cast(value - TS_min); - } - } - - const uint8_t *data_ptr_; - size_t remaining_bytes_; -}; - -#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_ diff --git a/lib/gwp_asan/guarded_pool_allocator.cpp b/lib/gwp_asan/guarded_pool_allocator.cpp index 7e3628eba6ff..ef497336025f 100644 --- a/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/lib/gwp_asan/guarded_pool_allocator.cpp @@ -13,7 +13,7 @@ // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this // macro is defined before including . #ifndef __STDC_FORMAT_MACROS - #define __STDC_FORMAT_MACROS 1 +#define __STDC_FORMAT_MACROS 1 #endif #include @@ -44,11 +44,12 @@ private: bool &Bool; }; -void defaultPrintStackTrace(uintptr_t *Trace, options::Printf_t Printf) { - if (Trace[0] == 0) +void defaultPrintStackTrace(uintptr_t *Trace, size_t TraceLength, + options::Printf_t Printf) { + if (TraceLength == 0) Printf(" \n"); - for (size_t i = 0; Trace[i] != 0; ++i) { + for (size_t i = 0; i < TraceLength; ++i) { Printf(" #%zu 0x%zx in \n", i, Trace[i]); } Printf("\n"); @@ -68,12 +69,18 @@ void GuardedPoolAllocator::AllocationMetadata::RecordAllocation( // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste // other thread's time getting the thread ID under lock. AllocationTrace.ThreadID = getThreadID(); + AllocationTrace.TraceSize = 0; + DeallocationTrace.TraceSize = 0; DeallocationTrace.ThreadID = kInvalidThreadID; - if (Backtrace) - Backtrace(AllocationTrace.Trace, kMaximumStackFrames); - else - AllocationTrace.Trace[0] = 0; - DeallocationTrace.Trace[0] = 0; + + if (Backtrace) { + uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; + size_t BacktraceLength = + Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); + AllocationTrace.TraceSize = compression::pack( + UncompressedBuffer, BacktraceLength, AllocationTrace.CompressedTrace, + kStackFrameStorageBytes); + } } void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation( @@ -81,11 +88,16 @@ void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation( IsDeallocated = true; // Ensure that the unwinder is not called if the recursive flag is set, // otherwise non-reentrant unwinders may deadlock. + DeallocationTrace.TraceSize = 0; if (Backtrace && !ThreadLocals.RecursiveGuard) { ScopedBoolean B(ThreadLocals.RecursiveGuard); - Backtrace(DeallocationTrace.Trace, kMaximumStackFrames); - } else { - DeallocationTrace.Trace[0] = 0; + + uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; + size_t BacktraceLength = + Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); + DeallocationTrace.TraceSize = compression::pack( + UncompressedBuffer, BacktraceLength, DeallocationTrace.CompressedTrace, + kStackFrameStorageBytes); } DeallocationTrace.ThreadID = getThreadID(); } @@ -161,7 +173,7 @@ void GuardedPoolAllocator::init(const options::Options &Opts) { // Ensure that signal handlers are installed as late as possible, as the class // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a - // race to members if recieved during init(). + // race to members if received during init(). if (Opts.InstallSignalHandlers) installSignalHandlers(); } @@ -373,7 +385,7 @@ void printErrorType(Error E, uintptr_t AccessPtr, AllocationMetadata *Meta, case Error::UNKNOWN: ErrorString = "GWP-ASan couldn't automatically determine the source of " "the memory error. It was likely caused by a wild memory " - "access into the GWP-ASan pool. The error occured"; + "access into the GWP-ASan pool. The error occurred"; break; case Error::USE_AFTER_FREE: ErrorString = "Use after free"; @@ -442,7 +454,13 @@ void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta, Printf("0x%zx was deallocated by thread %zu here:\n", AccessPtr, Meta->DeallocationTrace.ThreadID); - PrintBacktrace(Meta->DeallocationTrace.Trace, Printf); + uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; + size_t UncompressedLength = compression::unpack( + Meta->DeallocationTrace.CompressedTrace, + Meta->DeallocationTrace.TraceSize, UncompressedTrace, + AllocationMetadata::kMaxTraceLengthToCollect); + + PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); } if (Meta->AllocationTrace.ThreadID == GuardedPoolAllocator::kInvalidThreadID) @@ -451,7 +469,12 @@ void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta, Printf("0x%zx was allocated by thread %zu here:\n", Meta->Addr, Meta->AllocationTrace.ThreadID); - PrintBacktrace(Meta->AllocationTrace.Trace, Printf); + uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; + size_t UncompressedLength = compression::unpack( + Meta->AllocationTrace.CompressedTrace, Meta->AllocationTrace.TraceSize, + UncompressedTrace, AllocationMetadata::kMaxTraceLengthToCollect); + + PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); } struct ScopedEndOfReportDecorator { @@ -491,11 +514,11 @@ void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) { uint64_t ThreadID = getThreadID(); printErrorType(E, AccessPtr, Meta, Printf, ThreadID); if (Backtrace) { - static constexpr unsigned kMaximumStackFramesForCrashTrace = 128; + static constexpr unsigned kMaximumStackFramesForCrashTrace = 512; uintptr_t Trace[kMaximumStackFramesForCrashTrace]; - Backtrace(Trace, kMaximumStackFramesForCrashTrace); + size_t TraceLength = Backtrace(Trace, kMaximumStackFramesForCrashTrace); - PrintBacktrace(Trace, Printf); + PrintBacktrace(Trace, TraceLength, Printf); } else { Printf(" \n\n"); } diff --git a/lib/gwp_asan/guarded_pool_allocator.h b/lib/gwp_asan/guarded_pool_allocator.h index 28a41110faed..57ad61e9cf4f 100644 --- a/lib/gwp_asan/guarded_pool_allocator.h +++ b/lib/gwp_asan/guarded_pool_allocator.h @@ -13,6 +13,7 @@ #include "gwp_asan/mutex.h" #include "gwp_asan/options.h" #include "gwp_asan/random.h" +#include "gwp_asan/stack_trace_compressor.h" #include #include @@ -39,9 +40,15 @@ public: }; struct AllocationMetadata { - // Maximum number of stack trace frames to collect for allocations + frees. - // TODO(hctim): Implement stack frame compression, a-la Chromium. - static constexpr size_t kMaximumStackFrames = 64; + // The number of bytes used to store a compressed stack frame. On 64-bit + // platforms, assuming a compression ratio of 50%, this should allow us to + // store ~64 frames per trace. + static constexpr size_t kStackFrameStorageBytes = 256; + + // Maximum number of stack frames to collect on allocation/deallocation. The + // actual number of collected frames may be less than this as the stack + // frames are compressed into a fixed memory range. + static constexpr size_t kMaxTraceLengthToCollect = 128; // Records the given allocation metadata into this struct. void RecordAllocation(uintptr_t Addr, size_t Size, @@ -51,11 +58,13 @@ public: void RecordDeallocation(options::Backtrace_t Backtrace); struct CallSiteInfo { - // The backtrace to the allocation/deallocation. If the first value is - // zero, we did not collect a trace. - uintptr_t Trace[kMaximumStackFrames] = {}; + // The compressed backtrace to the allocation/deallocation. + uint8_t CompressedTrace[kStackFrameStorageBytes]; // The thread ID for this trace, or kInvalidThreadID if not available. uint64_t ThreadID = kInvalidThreadID; + // The size of the compressed trace (in bytes). Zero indicates that no + // trace was collected. + size_t TraceSize = 0; }; // The address of this allocation. diff --git a/lib/gwp_asan/optional/backtrace.h b/lib/gwp_asan/optional/backtrace.h index 2700970e5e8e..6c9ee9f6506d 100644 --- a/lib/gwp_asan/optional/backtrace.h +++ b/lib/gwp_asan/optional/backtrace.h @@ -14,7 +14,12 @@ namespace gwp_asan { namespace options { // Functions to get the platform-specific and implementation-specific backtrace -// and backtrace printing functions. +// and backtrace printing functions when RTGwpAsanBacktraceLibc or +// RTGwpAsanBacktraceSanitizerCommon are linked. Use these functions to get the +// backtrace function for populating the Options::Backtrace and +// Options::PrintBacktrace when initialising the GuardedPoolAllocator. Please +// note any thread-safety descriptions for the implementation of these functions +// that you use. Backtrace_t getBacktraceFunction(); PrintBacktrace_t getPrintBacktraceFunction(); } // namespace options diff --git a/lib/gwp_asan/optional/backtrace_linux_libc.cpp b/lib/gwp_asan/optional/backtrace_linux_libc.cpp index f20a3100927e..a656c9b41d5d 100644 --- a/lib/gwp_asan/optional/backtrace_linux_libc.cpp +++ b/lib/gwp_asan/optional/backtrace_linux_libc.cpp @@ -17,33 +17,23 @@ #include "gwp_asan/options.h" namespace { -void Backtrace(uintptr_t *TraceBuffer, size_t Size) { - // Grab (what seems to be) one more trace than we need. TraceBuffer needs to - // be null-terminated, but we wish to remove the frame of this function call. +size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) { static_assert(sizeof(uintptr_t) == sizeof(void *), "uintptr_t is not void*"); - int NumTraces = - backtrace(reinterpret_cast(TraceBuffer), Size); - // Now shift the entire trace one place to the left and null-terminate. - memmove(TraceBuffer, TraceBuffer + 1, NumTraces * sizeof(void *)); - TraceBuffer[NumTraces - 1] = 0; + return backtrace(reinterpret_cast(TraceBuffer), Size); } -static void PrintBacktrace(uintptr_t *Trace, +static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength, gwp_asan::options::Printf_t Printf) { - size_t NumTraces = 0; - for (; Trace[NumTraces] != 0; ++NumTraces) { - } - - if (NumTraces == 0) { + if (TraceLength == 0) { Printf(" \n\n"); return; } char **BacktraceSymbols = - backtrace_symbols(reinterpret_cast(Trace), NumTraces); + backtrace_symbols(reinterpret_cast(Trace), TraceLength); - for (size_t i = 0; i < NumTraces; ++i) { + for (size_t i = 0; i < TraceLength; ++i) { if (!BacktraceSymbols) Printf(" #%zu %p\n", i, Trace[i]); else diff --git a/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp b/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp index 7d17eec0da2f..5e07fd6f465a 100644 --- a/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp +++ b/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp @@ -13,6 +13,9 @@ #include "gwp_asan/optional/backtrace.h" #include "gwp_asan/options.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_stacktrace.h" void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, @@ -26,7 +29,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp, } namespace { -void Backtrace(uintptr_t *TraceBuffer, size_t Size) { +size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) { __sanitizer::BufferedStackTrace Trace; Trace.Reset(); if (Size > __sanitizer::kStackTraceMax) @@ -38,19 +41,14 @@ void Backtrace(uintptr_t *TraceBuffer, size_t Size) { /* fast unwind */ true, Size - 1); memcpy(TraceBuffer, Trace.trace, Trace.size * sizeof(uintptr_t)); - TraceBuffer[Trace.size] = 0; + return Trace.size; } -static void PrintBacktrace(uintptr_t *Trace, +static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength, gwp_asan::options::Printf_t Printf) { __sanitizer::StackTrace StackTrace; StackTrace.trace = reinterpret_cast<__sanitizer::uptr *>(Trace); - - for (StackTrace.size = 0; StackTrace.size < __sanitizer::kStackTraceMax; - ++StackTrace.size) { - if (Trace[StackTrace.size] == 0) - break; - } + StackTrace.size = TraceLength; if (StackTrace.size == 0) { Printf(" \n\n"); @@ -63,7 +61,18 @@ static void PrintBacktrace(uintptr_t *Trace, namespace gwp_asan { namespace options { -Backtrace_t getBacktraceFunction() { return Backtrace; } +// This function is thread-compatible. It must be synchronised in respect to any +// other calls to getBacktraceFunction(), calls to getPrintBacktraceFunction(), +// and calls to either of the functions that they return. Furthermore, this may +// require synchronisation with any calls to sanitizer_common that use flags. +// Generally, this function will be called during the initialisation of the +// allocator, which is done in a thread-compatible manner. +Backtrace_t getBacktraceFunction() { + // The unwinder requires the default flags to be set. + __sanitizer::SetCommonFlagsDefaults(); + __sanitizer::InitializeCommonFlags(); + return Backtrace; +} PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; } } // namespace options } // namespace gwp_asan diff --git a/lib/gwp_asan/options.h b/lib/gwp_asan/options.h index 6423e16526f4..ae3f3d45e946 100644 --- a/lib/gwp_asan/options.h +++ b/lib/gwp_asan/options.h @@ -14,22 +14,63 @@ namespace gwp_asan { namespace options { -// The function pointer type for printf(). Follows the standard format from the -// sanitizers library. If the supported allocator exposes printing via a -// different function signature, please provide a wrapper which has this -// printf() signature, and pass the wrapper instead. +// ================================ Requirements =============================== +// This function is required to be implemented by the supporting allocator. The +// sanitizer::Printf() function can be simply used here. +// ================================ Description ================================ +// This function shall produce output according to a strict subset of the C +// standard library's printf() family. This function must support printing the +// following formats: +// 1. integers: "%([0-9]*)?(z|ll)?{d,u,x,X}" +// 2. pointers: "%p" +// 3. strings: "%[-]([0-9]*)?(\\.\\*)?s" +// 4. chars: "%c" +// This function must be implemented in a signal-safe manner. +// =================================== Notes =================================== +// This function has a slightly different signature than the C standard +// library's printf(). Notably, it returns 'void' rather than 'int'. typedef void (*Printf_t)(const char *Format, ...); -// The function pointer type for backtrace information. Required to be -// implemented by the supporting allocator. The callee should elide itself and -// all frames below itself from TraceBuffer, i.e. the caller's frame should be -// in TraceBuffer[0], and subsequent frames 1..n into TraceBuffer[1..n], where a -// maximum of `MaximumDepth - 1` frames are stored. TraceBuffer should be -// nullptr-terminated (i.e. if there are 5 frames; TraceBuffer[5] == nullptr). -// If the allocator cannot supply backtrace information, it should set -// TraceBuffer[0] == nullptr. -typedef void (*Backtrace_t)(uintptr_t *TraceBuffer, size_t Size); -typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, Printf_t Print); +// ================================ Requirements =============================== +// This function is required to be either implemented by the supporting +// allocator, or one of the two provided implementations may be used +// (RTGwpAsanBacktraceLibc or RTGwpAsanBacktraceSanitizerCommon). +// ================================ Description ================================ +// This function shall collect the backtrace for the calling thread and place +// the result in `TraceBuffer`. This function should elide itself and all frames +// below itself from `TraceBuffer`, i.e. the caller's frame should be in +// TraceBuffer[0], and subsequent frames 1..n into TraceBuffer[1..n], where a +// maximum of `Size` frames are stored. Returns the number of frames stored into +// `TraceBuffer`, and zero on failure. If the return value of this function is +// equal to `Size`, it may indicate that the backtrace is truncated. +// =================================== Notes =================================== +// This function may directly or indirectly call malloc(), as the +// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite +// recursion. Any allocation made inside this function will be served by the +// supporting allocator, and will not have GWP-ASan protections. +typedef size_t (*Backtrace_t)(uintptr_t *TraceBuffer, size_t Size); + +// ================================ Requirements =============================== +// This function is optional for the supporting allocator, but one of the two +// provided implementations may be used (RTGwpAsanBacktraceLibc or +// RTGwpAsanBacktraceSanitizerCommon). If not provided, a default implementation +// is used which prints the raw pointers only. +// ================================ Description ================================ +// This function shall take the backtrace provided in `TraceBuffer`, and print +// it in a human-readable format using `Print`. Generally, this function shall +// resolve raw pointers to section offsets and print them with the following +// sanitizer-common format: +// " #{frame_number} {pointer} in {function name} ({binary name}+{offset}" +// e.g. " #5 0x420459 in _start (/tmp/uaf+0x420459)" +// This format allows the backtrace to be symbolized offline successfully using +// llvm-symbolizer. +// =================================== Notes =================================== +// This function may directly or indirectly call malloc(), as the +// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite +// recursion. Any allocation made inside this function will be served by the +// supporting allocator, and will not have GWP-ASan protections. +typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, size_t TraceLength, + Printf_t Print); struct Options { Printf_t Printf = nullptr; diff --git a/lib/gwp_asan/options.inc b/lib/gwp_asan/options.inc index 9042b11895ae..df6c46e6e98f 100644 --- a/lib/gwp_asan/options.inc +++ b/lib/gwp_asan/options.inc @@ -21,9 +21,9 @@ GWP_ASAN_OPTION( "byte buffer-overflows for multibyte allocations at the cost of " "performance, and may be incompatible with some architectures.") -GWP_ASAN_OPTION( - int, MaxSimultaneousAllocations, 16, - "Number of usable guarded slots in the allocation pool. Defaults to 16.") +GWP_ASAN_OPTION(int, MaxSimultaneousAllocations, 16, + "Number of simultaneously-guarded allocations available in the " + "pool. Defaults to 16.") GWP_ASAN_OPTION(int, SampleRate, 5000, "The probability (1 / SampleRate) that an allocation is " diff --git a/lib/gwp_asan/scripts/symbolize.sh b/lib/gwp_asan/scripts/symbolize.sh new file mode 100755 index 000000000000..fad9620a676e --- /dev/null +++ b/lib/gwp_asan/scripts/symbolize.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# The lines that we're looking to symbolize look like this: + #0 ./a.out(_foo+0x3e6) [0x55a52e64c696] +# ... which come from the backtrace_symbols() symbolisation function used by +# default in Scudo's implementation of GWP-ASan. + +while read -r line; do + # Check that this line needs symbolization. + should_symbolize="$(echo $line |\ + grep -E '^[ ]*\#.*\(.*\+0x[0-9a-f]+\) \[0x[0-9a-f]+\]$')" + + if [ -z "$should_symbolize" ]; then + echo "$line" + continue + fi + + # Carve up the input line into sections. + binary_name="$(echo $line | grep -oE ' .*\(' | rev | cut -c2- | rev |\ + cut -c2-)" + function_name="$(echo $line | grep -oE '\([^+]*' | cut -c2-)" + function_offset="$(echo $line | grep -oE '\(.*\)' | grep -oE '\+.*\)' |\ + cut -c2- | rev | cut -c2- | rev)" + frame_number="$(echo $line | grep -oE '\#[0-9]+ ')" + + if [ -z "$function_name" ]; then + # If the offset is binary-relative, just resolve that. + symbolized="$(echo $function_offset | addr2line -e $binary_name)" + else + # Otherwise, the offset is function-relative. Get the address of the + # function, and add it to the offset, then symbolize. + function_addr="0x$(echo $function_offset |\ + nm --defined-only $binary_name 2> /dev/null |\ + grep -E " $function_name$" | cut -d' ' -f1)" + + # Check that we could get the function address from nm. + if [ -z "$function_addr" ]; then + echo "$line" + continue + fi + + # Add the function address and offset to get the offset into the binary. + binary_offset="$(printf "0x%X" "$((function_addr+function_offset))")" + symbolized="$(echo $binary_offset | addr2line -e $binary_name)" + fi + + # Check that it symbolized properly. If it didn't, output the old line. + echo $symbolized | grep -E ".*\?.*:" > /dev/null + if [ "$?" -eq "0" ]; then + echo "$line" + continue + else + echo "${frame_number}${symbolized}" + fi +done diff --git a/lib/gwp_asan/stack_trace_compressor.cpp b/lib/gwp_asan/stack_trace_compressor.cpp new file mode 100644 index 000000000000..ca3167fb83a8 --- /dev/null +++ b/lib/gwp_asan/stack_trace_compressor.cpp @@ -0,0 +1,111 @@ +//===-- stack_trace_compressor.cpp ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/stack_trace_compressor.h" + +namespace gwp_asan { +namespace compression { +namespace { +// Encodes `Value` as a variable-length integer to `Out`. Returns zero if there +// was not enough space in the output buffer to write the complete varInt. +// Otherwise returns the length of the encoded integer. +size_t varIntEncode(uintptr_t Value, uint8_t *Out, size_t OutLen) { + for (size_t i = 0; i < OutLen; ++i) { + Out[i] = Value & 0x7f; + Value >>= 7; + if (!Value) + return i + 1; + + Out[i] |= 0x80; + } + + return 0; +} + +// Decodes a variable-length integer to `Out`. Returns zero if the integer was +// too large to be represented in a uintptr_t, or if the input buffer finished +// before the integer was decoded (either case meaning that the `In` does not +// point to a valid varInt buffer). Otherwise, returns the number of bytes that +// were used to store the decoded integer. +size_t varIntDecode(const uint8_t *In, size_t InLen, uintptr_t *Out) { + *Out = 0; + uint8_t Shift = 0; + + for (size_t i = 0; i < InLen; ++i) { + *Out |= (static_cast(In[i]) & 0x7f) << Shift; + + if (In[i] < 0x80) + return i + 1; + + Shift += 7; + + // Disallow overflowing the range of the output integer. + if (Shift >= sizeof(uintptr_t) * 8) + return 0; + } + return 0; +} + +uintptr_t zigzagEncode(uintptr_t Value) { + uintptr_t Encoded = Value << 1; + if (static_cast(Value) >= 0) + return Encoded; + return ~Encoded; +} + +uintptr_t zigzagDecode(uintptr_t Value) { + uintptr_t Decoded = Value >> 1; + if (!(Value & 1)) + return Decoded; + return ~Decoded; +} +} // anonymous namespace + +size_t pack(const uintptr_t *Unpacked, size_t UnpackedSize, uint8_t *Packed, + size_t PackedMaxSize) { + size_t Index = 0; + for (size_t CurrentDepth = 0; CurrentDepth < UnpackedSize; CurrentDepth++) { + uintptr_t Diff = Unpacked[CurrentDepth]; + if (CurrentDepth > 0) + Diff -= Unpacked[CurrentDepth - 1]; + size_t EncodedLength = + varIntEncode(zigzagEncode(Diff), Packed + Index, PackedMaxSize - Index); + if (!EncodedLength) + break; + + Index += EncodedLength; + } + + return Index; +} + +size_t unpack(const uint8_t *Packed, size_t PackedSize, uintptr_t *Unpacked, + size_t UnpackedMaxSize) { + size_t CurrentDepth; + size_t Index = 0; + for (CurrentDepth = 0; CurrentDepth < UnpackedMaxSize; CurrentDepth++) { + uintptr_t EncodedDiff; + size_t DecodedLength = + varIntDecode(Packed + Index, PackedSize - Index, &EncodedDiff); + if (!DecodedLength) + break; + Index += DecodedLength; + + Unpacked[CurrentDepth] = zigzagDecode(EncodedDiff); + if (CurrentDepth > 0) + Unpacked[CurrentDepth] += Unpacked[CurrentDepth - 1]; + } + + if (Index != PackedSize && CurrentDepth != UnpackedMaxSize) + return 0; + + return CurrentDepth; +} + +} // namespace compression +} // namespace gwp_asan diff --git a/lib/gwp_asan/stack_trace_compressor.h b/lib/gwp_asan/stack_trace_compressor.h new file mode 100644 index 000000000000..dcbd9a3c1f0a --- /dev/null +++ b/lib/gwp_asan/stack_trace_compressor.h @@ -0,0 +1,38 @@ +//===-- stack_trace_compressor.h --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef GWP_ASAN_STACK_TRACE_COMPRESSOR_ +#define GWP_ASAN_STACK_TRACE_COMPRESSOR_ + +#include +#include + +// These functions implement stack frame compression and decompression. We store +// the zig-zag encoded pointer difference between frame[i] and frame[i - 1] as +// a variable-length integer. This can reduce the memory overhead of stack +// traces by 50%. + +namespace gwp_asan { +namespace compression { + +// For the stack trace in `Unpacked` with length `UnpackedSize`, pack it into +// the buffer `Packed` maximum length `PackedMaxSize`. The return value is the +// number of bytes that were written to the output buffer. +size_t pack(const uintptr_t *Unpacked, size_t UnpackedSize, uint8_t *Packed, + size_t PackedMaxSize); + +// From the packed stack trace in `Packed` of length `PackedSize`, write the +// unpacked stack trace of maximum length `UnpackedMaxSize` into `Unpacked`. +// Returns the number of full entries unpacked, or zero on error. +size_t unpack(const uint8_t *Packed, size_t PackedSize, uintptr_t *Unpacked, + size_t UnpackedMaxSize); + +} // namespace compression +} // namespace gwp_asan + +#endif // GWP_ASAN_STACK_TRACE_COMPRESSOR_ diff --git a/lib/hwasan/hwasan.cpp b/lib/hwasan/hwasan.cpp index 6f2246552164..7b5c6c694be9 100644 --- a/lib/hwasan/hwasan.cpp +++ b/lib/hwasan/hwasan.cpp @@ -193,27 +193,12 @@ void UpdateMemoryUsage() { void UpdateMemoryUsage() {} #endif -// Prepare to run instrumented code on the main thread. -void InitInstrumentation() { - if (hwasan_instrumentation_inited) return; - - if (!InitShadow()) { - Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n"); - DumpProcessMap(); - Die(); - } - - InitThreads(); - hwasanThreadList().CreateCurrentThread(); - - hwasan_instrumentation_inited = 1; -} - } // namespace __hwasan +using namespace __hwasan; + void __sanitizer::BufferedStackTrace::UnwindImpl( uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) { - using namespace __hwasan; Thread *t = GetCurrentThread(); if (!t) { // the thread is still being created. @@ -231,9 +216,117 @@ void __sanitizer::BufferedStackTrace::UnwindImpl( Unwind(max_depth, pc, 0, context, 0, 0, false); } -// Interface. +struct hwasan_global { + s32 gv_relptr; + u32 info; +}; + +static void InitGlobals(const hwasan_global *begin, const hwasan_global *end) { + for (auto *desc = begin; desc != end; ++desc) { + uptr gv = reinterpret_cast(desc) + desc->gv_relptr; + uptr size = desc->info & 0xffffff; + uptr full_granule_size = RoundDownTo(size, 16); + u8 tag = desc->info >> 24; + TagMemoryAligned(gv, full_granule_size, tag); + if (size % 16) + TagMemoryAligned(gv + full_granule_size, 16, size % 16); + } +} -using namespace __hwasan; +enum { NT_LLVM_HWASAN_GLOBALS = 3 }; + +struct hwasan_global_note { + s32 begin_relptr; + s32 end_relptr; +}; + +// Check that the given library meets the code model requirements for tagged +// globals. These properties are not checked at link time so they need to be +// checked at runtime. +static void CheckCodeModel(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + ElfW(Addr) min_addr = -1ull, max_addr = 0; + for (unsigned i = 0; i != phnum; ++i) { + if (phdr[i].p_type != PT_LOAD) + continue; + ElfW(Addr) lo = base + phdr[i].p_vaddr, hi = lo + phdr[i].p_memsz; + if (min_addr > lo) + min_addr = lo; + if (max_addr < hi) + max_addr = hi; + } + + if (max_addr - min_addr > 1ull << 32) { + Report("FATAL: HWAddressSanitizer: library size exceeds 2^32\n"); + Die(); + } + if (max_addr > 1ull << 48) { + Report("FATAL: HWAddressSanitizer: library loaded above address 2^48\n"); + Die(); + } +} + +static void InitGlobalsFromPhdrs(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + for (unsigned i = 0; i != phnum; ++i) { + if (phdr[i].p_type != PT_NOTE) + continue; + const char *note = reinterpret_cast(base + phdr[i].p_vaddr); + const char *nend = note + phdr[i].p_memsz; + while (note < nend) { + auto *nhdr = reinterpret_cast(note); + const char *name = note + sizeof(ElfW(Nhdr)); + const char *desc = name + RoundUpTo(nhdr->n_namesz, 4); + if (nhdr->n_type != NT_LLVM_HWASAN_GLOBALS || + internal_strcmp(name, "LLVM") != 0) { + note = desc + RoundUpTo(nhdr->n_descsz, 4); + continue; + } + + // Only libraries with instrumented globals need to be checked against the + // code model since they use relocations that aren't checked at link time. + CheckCodeModel(base, phdr, phnum); + + auto *global_note = reinterpret_cast(desc); + auto *global_begin = reinterpret_cast( + note + global_note->begin_relptr); + auto *global_end = reinterpret_cast( + note + global_note->end_relptr); + InitGlobals(global_begin, global_end); + return; + } + } +} + +static void InitLoadedGlobals() { + dl_iterate_phdr( + [](dl_phdr_info *info, size_t size, void *data) { + InitGlobalsFromPhdrs(info->dlpi_addr, info->dlpi_phdr, + info->dlpi_phnum); + return 0; + }, + nullptr); +} + +// Prepare to run instrumented code on the main thread. +static void InitInstrumentation() { + if (hwasan_instrumentation_inited) return; + + InitPrctl(); + + if (!InitShadow()) { + Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n"); + DumpProcessMap(); + Die(); + } + + InitThreads(); + hwasanThreadList().CreateCurrentThread(); + + hwasan_instrumentation_inited = 1; +} + +// Interface. uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol. @@ -244,6 +337,17 @@ void __hwasan_init_frames(uptr beg, uptr end) {} void __hwasan_init_static() { InitShadowGOT(); InitInstrumentation(); + + // In the non-static code path we call dl_iterate_phdr here. But at this point + // libc might not have been initialized enough for dl_iterate_phdr to work. + // Fortunately, since this is a statically linked executable we can use the + // linker-defined symbol __ehdr_start to find the only relevant set of phdrs. + extern ElfW(Ehdr) __ehdr_start; + InitGlobalsFromPhdrs( + 0, + reinterpret_cast( + reinterpret_cast(&__ehdr_start) + __ehdr_start.e_phoff), + __ehdr_start.e_phnum); } void __hwasan_init() { @@ -267,6 +371,7 @@ void __hwasan_init() { DisableCoreDumperIfNecessary(); InitInstrumentation(); + InitLoadedGlobals(); // Needs to be called here because flags()->random_tags might not have been // initialized when InitInstrumentation() was called. @@ -301,6 +406,18 @@ void __hwasan_init() { hwasan_inited = 1; } +void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + InitGlobalsFromPhdrs(base, phdr, phnum); +} + +void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum) { + for (; phnum != 0; ++phdr, --phnum) + if (phdr->p_type == PT_LOAD) + TagMemory(base + phdr->p_vaddr, phdr->p_memsz, 0); +} + void __hwasan_print_shadow(const void *p, uptr sz) { uptr ptr_raw = UntagAddr(reinterpret_cast(p)); uptr shadow_first = MemToShadow(ptr_raw); diff --git a/lib/hwasan/hwasan.h b/lib/hwasan/hwasan.h index 465e56c3a8cc..9e0ced93b55d 100644 --- a/lib/hwasan/hwasan.h +++ b/lib/hwasan/hwasan.h @@ -74,8 +74,8 @@ extern int hwasan_report_count; bool ProtectRange(uptr beg, uptr end); bool InitShadow(); +void InitPrctl(); void InitThreads(); -void InitInstrumentation(); void MadviseShadow(); char *GetProcSelfMaps(); void InitializeInterceptors(); diff --git a/lib/hwasan/hwasan_allocator.cpp b/lib/hwasan/hwasan_allocator.cpp index b4fae5820d0a..81a57d3afd4d 100644 --- a/lib/hwasan/hwasan_allocator.cpp +++ b/lib/hwasan/hwasan_allocator.cpp @@ -22,11 +22,6 @@ #include "hwasan_thread.h" #include "hwasan_report.h" -#if HWASAN_WITH_INTERCEPTORS -DEFINE_REAL(void *, realloc, void *ptr, uptr size) -DEFINE_REAL(void, free, void *ptr) -#endif - namespace __hwasan { static Allocator allocator; @@ -301,14 +296,6 @@ void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) { void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { if (!ptr) return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); - -#if HWASAN_WITH_INTERCEPTORS - // A tag of 0 means that this is a system allocator allocation, so we must use - // the system allocator to realloc it. - if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) - return REAL(realloc)(ptr, size); -#endif - if (size == 0) { HwasanDeallocate(stack, ptr); return nullptr; @@ -381,13 +368,6 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, } void hwasan_free(void *ptr, StackTrace *stack) { -#if HWASAN_WITH_INTERCEPTORS - // A tag of 0 means that this is a system allocator allocation, so we must use - // the system allocator to free it. - if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) - return REAL(free)(ptr); -#endif - return HwasanDeallocate(stack, ptr); } @@ -400,15 +380,6 @@ void __hwasan_enable_allocator_tagging() { } void __hwasan_disable_allocator_tagging() { -#if HWASAN_WITH_INTERCEPTORS - // Allocator tagging must be enabled for the system allocator fallback to work - // correctly. This means that we can't disable it at runtime if it was enabled - // at startup since that might result in our deallocations going to the system - // allocator. If tagging was disabled at startup we avoid this problem by - // disabling the fallback altogether. - CHECK(flags()->disable_allocator_tagging); -#endif - atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0); } diff --git a/lib/hwasan/hwasan_allocator.h b/lib/hwasan/hwasan_allocator.h index 3a50a11f3526..f62be2696021 100644 --- a/lib/hwasan/hwasan_allocator.h +++ b/lib/hwasan/hwasan_allocator.h @@ -13,7 +13,6 @@ #ifndef HWASAN_ALLOCATOR_H #define HWASAN_ALLOCATOR_H -#include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -26,11 +25,6 @@ #error Unsupported platform #endif -#if HWASAN_WITH_INTERCEPTORS -DECLARE_REAL(void *, realloc, void *ptr, uptr size) -DECLARE_REAL(void, free, void *ptr) -#endif - namespace __hwasan { struct Metadata { diff --git a/lib/hwasan/hwasan_exceptions.cpp b/lib/hwasan/hwasan_exceptions.cpp new file mode 100644 index 000000000000..169e7876cb58 --- /dev/null +++ b/lib/hwasan/hwasan_exceptions.cpp @@ -0,0 +1,67 @@ +//===-- hwasan_exceptions.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// HWAddressSanitizer runtime. +//===----------------------------------------------------------------------===// + +#include "hwasan_poisoning.h" +#include "sanitizer_common/sanitizer_common.h" + +#include + +using namespace __hwasan; +using namespace __sanitizer; + +typedef _Unwind_Reason_Code PersonalityFn(int version, _Unwind_Action actions, + uint64_t exception_class, + _Unwind_Exception* unwind_exception, + _Unwind_Context* context); + +// Pointers to the _Unwind_GetGR and _Unwind_GetCFA functions are passed in +// instead of being called directly. This is to handle cases where the unwinder +// is statically linked and the sanitizer runtime and the program are linked +// against different unwinders. The _Unwind_Context data structure is opaque so +// it may be incompatible between unwinders. +typedef _Unwind_Word GetGRFn(_Unwind_Context* context, int index); +typedef _Unwind_Word GetCFAFn(_Unwind_Context* context); + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE _Unwind_Reason_Code +__hwasan_personality_wrapper(int version, _Unwind_Action actions, + uint64_t exception_class, + _Unwind_Exception* unwind_exception, + _Unwind_Context* context, + PersonalityFn* real_personality, GetGRFn* get_gr, + GetCFAFn* get_cfa) { + _Unwind_Reason_Code rc; + if (real_personality) + rc = real_personality(version, actions, exception_class, unwind_exception, + context); + else + rc = _URC_CONTINUE_UNWIND; + + // We only untag frames without a landing pad because landing pads are + // responsible for untagging the stack themselves if they resume. + // + // Here we assume that the frame record appears after any locals. This is not + // required by AAPCS but is a requirement for HWASAN instrumented functions. + if ((actions & _UA_CLEANUP_PHASE) && rc == _URC_CONTINUE_UNWIND) { +#if defined(__x86_64__) + uptr fp = get_gr(context, 6); // rbp +#elif defined(__aarch64__) + uptr fp = get_gr(context, 29); // x29 +#else +#error Unsupported architecture +#endif + uptr sp = get_cfa(context); + TagMemory(sp, fp - sp, 0); + } + + return rc; +} diff --git a/lib/hwasan/hwasan_flags.inc b/lib/hwasan/hwasan_flags.inc index 2dff2b9aca6e..dffbf56cb155 100644 --- a/lib/hwasan/hwasan_flags.inc +++ b/lib/hwasan/hwasan_flags.inc @@ -1,4 +1,4 @@ -//===-- hwasan_flags.inc ------------------------------------------*- C++ -*-===// +//===-- hwasan_flags.inc ----------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/lib/hwasan/hwasan_interceptors.cpp b/lib/hwasan/hwasan_interceptors.cpp index 47fed0fc9abb..95e2e865717d 100644 --- a/lib/hwasan/hwasan_interceptors.cpp +++ b/lib/hwasan/hwasan_interceptors.cpp @@ -260,8 +260,6 @@ void InitializeInterceptors() { #if !defined(__aarch64__) INTERCEPT_FUNCTION(pthread_create); #endif // __aarch64__ - INTERCEPT_FUNCTION(realloc); - INTERCEPT_FUNCTION(free); #endif inited = 1; diff --git a/lib/hwasan/hwasan_interface_internal.h b/lib/hwasan/hwasan_interface_internal.h index 1b10d76c78e9..ca57f0fe437b 100644 --- a/lib/hwasan/hwasan_interface_internal.h +++ b/lib/hwasan/hwasan_interface_internal.h @@ -16,6 +16,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include extern "C" { @@ -25,6 +26,14 @@ void __hwasan_init_static(); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_init(); +SANITIZER_INTERFACE_ATTRIBUTE +void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum); + +SANITIZER_INTERFACE_ATTRIBUTE +void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr, + ElfW(Half) phnum); + using __sanitizer::uptr; using __sanitizer::sptr; using __sanitizer::uu64; diff --git a/lib/hwasan/hwasan_linux.cpp b/lib/hwasan/hwasan_linux.cpp index d932976489e9..948e40154fec 100644 --- a/lib/hwasan/hwasan_linux.cpp +++ b/lib/hwasan/hwasan_linux.cpp @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_procmaps.h" @@ -144,6 +146,43 @@ static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { FindDynamicShadowStart(shadow_size_bytes); } +void InitPrctl() { +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +#define PR_TAGGED_ADDR_ENABLE (1UL << 0) + // Check we're running on a kernel that can use the tagged address ABI. + if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 && + errno == EINVAL) { +#if SANITIZER_ANDROID + // Some older Android kernels have the tagged pointer ABI on + // unconditionally, and hence don't have the tagged-addr prctl while still + // allow the ABI. + // If targeting Android and the prctl is not around we assume this is the + // case. + return; +#else + Printf( + "FATAL: " + "HWAddressSanitizer requires a kernel with tagged address ABI.\n"); + Die(); +#endif + } + + // Turn on the tagged address ABI. + if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == + (uptr)-1 || + !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) { + Printf( + "FATAL: HWAddressSanitizer failed to enable tagged address syscall " + "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` " + "configuration.\n"); + Die(); + } +#undef PR_SET_TAGGED_ADDR_CTRL +#undef PR_GET_TAGGED_ADDR_CTRL +#undef PR_TAGGED_ADDR_ENABLE +} + bool InitShadow() { // Define the entire memory range. kHighMemEnd = GetHighMemEnd(); @@ -211,8 +250,7 @@ void InitThreads() { static void MadviseShadowRegion(uptr beg, uptr end) { uptr size = end - beg + 1; - if (common_flags()->no_huge_pages_for_shadow) - NoHugePagesInRegion(beg, size); + SetShadowRegionHugePageMode(beg, size); if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); } diff --git a/lib/hwasan/hwasan_new_delete.cpp b/lib/hwasan/hwasan_new_delete.cpp index 4a9c79fe41b3..191c17e56a74 100644 --- a/lib/hwasan/hwasan_new_delete.cpp +++ b/lib/hwasan/hwasan_new_delete.cpp @@ -20,7 +20,7 @@ #include -using namespace __hwasan; // NOLINT +using namespace __hwasan; // Fake std::nothrow_t to avoid including . namespace std { diff --git a/lib/hwasan/hwasan_report.cpp b/lib/hwasan/hwasan_report.cpp index 346889797888..19cb27554bc6 100644 --- a/lib/hwasan/hwasan_report.cpp +++ b/lib/hwasan/hwasan_report.cpp @@ -278,6 +278,31 @@ void PrintAddressDescription( Printf("%s", d.Default()); GetStackTraceFromId(chunk.GetAllocStackId()).Print(); num_descriptions_printed++; + } else { + // Check whether the address points into a loaded library. If so, this is + // most likely a global variable. + const char *module_name; + uptr module_address; + Symbolizer *sym = Symbolizer::GetOrInit(); + if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, + &module_address)) { + DataInfo info; + if (sym->SymbolizeData(mem, &info) && info.start) { + Printf( + "%p is located %zd bytes to the %s of %zd-byte global variable " + "%s [%p,%p) in %s\n", + untagged_addr, + candidate == left ? untagged_addr - (info.start + info.size) + : info.start - untagged_addr, + candidate == left ? "right" : "left", info.size, info.name, + info.start, info.start + info.size, module_name); + } else { + Printf("%p is located to the %s of a global variable in (%s+0x%x)\n", + untagged_addr, candidate == left ? "right" : "left", + module_name, module_address); + } + num_descriptions_printed++; + } } } diff --git a/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/lib/hwasan/hwasan_tag_mismatch_aarch64.S index 92f627480486..4c060a61e98e 100644 --- a/lib/hwasan/hwasan_tag_mismatch_aarch64.S +++ b/lib/hwasan/hwasan_tag_mismatch_aarch64.S @@ -51,14 +51,60 @@ // +---------------------------------+ <-- [x30 / SP] // This function takes two arguments: -// * x0: The address of read/write instruction that caused HWASan check fail. -// * x1: The tag size. +// * x0: The data address. +// * x1: The encoded access info for the failing access. +// This function has two entry points. The first, __hwasan_tag_mismatch, is used +// by clients that were compiled without short tag checks (i.e. binaries built +// by older compilers and binaries targeting older runtimes). In this case the +// outlined tag check will be missing the code handling short tags (which won't +// be used in the binary's own stack variables but may be used on the heap +// or stack variables in other binaries), so the check needs to be done here. +// +// The second, __hwasan_tag_mismatch_v2, is used by binaries targeting newer +// runtimes. This entry point bypasses the short tag check since it will have +// already been done as part of the outlined tag check. Since tag mismatches are +// uncommon, there isn't a significant performance benefit to being able to +// bypass the check; the main benefits are that we can sometimes avoid +// clobbering the x17 register in error reports, and that the program will have +// a runtime dependency on the __hwasan_tag_mismatch_v2 symbol therefore it will +// fail to start up given an older (i.e. incompatible) runtime. .section .text .file "hwasan_tag_mismatch_aarch64.S" .global __hwasan_tag_mismatch .type __hwasan_tag_mismatch, %function __hwasan_tag_mismatch: + // Compute the granule position one past the end of the access. + mov x16, #1 + and x17, x1, #0xf + lsl x16, x16, x17 + and x17, x0, #0xf + add x17, x16, x17 + + // Load the shadow byte again and check whether it is a short tag within the + // range of the granule position computed above. + ubfx x16, x0, #4, #52 + ldrb w16, [x9, x16] + cmp w16, #0xf + b.hi __hwasan_tag_mismatch_v2 + cmp w16, w17 + b.lo __hwasan_tag_mismatch_v2 + + // Load the real tag from the last byte of the granule and compare against + // the pointer tag. + orr x16, x0, #0xf + ldrb w16, [x16] + cmp x16, x0, lsr #56 + b.ne __hwasan_tag_mismatch_v2 + + // Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch + // call and resume execution. + ldp x0, x1, [sp], #256 + ret + +.global __hwasan_tag_mismatch_v2 +.type __hwasan_tag_mismatch_v2, %function +__hwasan_tag_mismatch_v2: CFI_STARTPROC // Set the CFA to be the return address for caller of __hwasan_check_*. Note diff --git a/lib/interception/interception.h b/lib/interception/interception.h index dacfa5ede28d..d27a8ccf92a8 100644 --- a/lib/interception/interception.h +++ b/lib/interception/interception.h @@ -272,9 +272,9 @@ const interpose_substitution substitution_##func_name[] \ // INTERCEPT_FUNCTION macro, only its name. namespace __interception { #if defined(_WIN64) -typedef unsigned long long uptr; // NOLINT +typedef unsigned long long uptr; #else -typedef unsigned long uptr; // NOLINT +typedef unsigned long uptr; #endif // _WIN64 } // namespace __interception diff --git a/lib/interception/interception_linux.cc b/lib/interception/interception_linux.cc deleted file mode 100644 index 4b27102a159c..000000000000 --- a/lib/interception/interception_linux.cc +++ /dev/null @@ -1,83 +0,0 @@ -//===-- interception_linux.cc -----------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Linux-specific interception methods. -//===----------------------------------------------------------------------===// - -#include "interception.h" - -#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_OPENBSD || SANITIZER_SOLARIS - -#include // for dlsym() and dlvsym() - -namespace __interception { - -#if SANITIZER_NETBSD -static int StrCmp(const char *s1, const char *s2) { - while (true) { - if (*s1 != *s2) - return false; - if (*s1 == 0) - return true; - s1++; - s2++; - } -} -#endif - -static void *GetFuncAddr(const char *name, uptr wrapper_addr) { -#if SANITIZER_NETBSD - // FIXME: Find a better way to handle renames - if (StrCmp(name, "sigaction")) - name = "__sigaction14"; -#endif - void *addr = dlsym(RTLD_NEXT, name); - if (!addr) { - // If the lookup using RTLD_NEXT failed, the sanitizer runtime library is - // later in the library search order than the DSO that we are trying to - // intercept, which means that we cannot intercept this function. We still - // want the address of the real definition, though, so look it up using - // RTLD_DEFAULT. - addr = dlsym(RTLD_DEFAULT, name); - - // In case `name' is not loaded, dlsym ends up finding the actual wrapper. - // We don't want to intercept the wrapper and have it point to itself. - if ((uptr)addr == wrapper_addr) - addr = nullptr; - } - return addr; -} - -bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func, - uptr wrapper) { - void *addr = GetFuncAddr(name, wrapper); - *ptr_to_real = (uptr)addr; - return addr && (func == wrapper); -} - -// Android and Solaris do not have dlvsym -#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD -static void *GetFuncAddr(const char *name, const char *ver) { - return dlvsym(RTLD_NEXT, name, ver); -} - -bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real, - uptr func, uptr wrapper) { - void *addr = GetFuncAddr(name, ver); - *ptr_to_real = (uptr)addr; - return addr && (func == wrapper); -} -#endif // !SANITIZER_ANDROID - -} // namespace __interception - -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_OPENBSD || SANITIZER_SOLARIS diff --git a/lib/interception/interception_linux.cpp b/lib/interception/interception_linux.cpp new file mode 100644 index 000000000000..950cd5126538 --- /dev/null +++ b/lib/interception/interception_linux.cpp @@ -0,0 +1,83 @@ +//===-- interception_linux.cpp ----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Linux-specific interception methods. +//===----------------------------------------------------------------------===// + +#include "interception.h" + +#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS + +#include // for dlsym() and dlvsym() + +namespace __interception { + +#if SANITIZER_NETBSD +static int StrCmp(const char *s1, const char *s2) { + while (true) { + if (*s1 != *s2) + return false; + if (*s1 == 0) + return true; + s1++; + s2++; + } +} +#endif + +static void *GetFuncAddr(const char *name, uptr wrapper_addr) { +#if SANITIZER_NETBSD + // FIXME: Find a better way to handle renames + if (StrCmp(name, "sigaction")) + name = "__sigaction14"; +#endif + void *addr = dlsym(RTLD_NEXT, name); + if (!addr) { + // If the lookup using RTLD_NEXT failed, the sanitizer runtime library is + // later in the library search order than the DSO that we are trying to + // intercept, which means that we cannot intercept this function. We still + // want the address of the real definition, though, so look it up using + // RTLD_DEFAULT. + addr = dlsym(RTLD_DEFAULT, name); + + // In case `name' is not loaded, dlsym ends up finding the actual wrapper. + // We don't want to intercept the wrapper and have it point to itself. + if ((uptr)addr == wrapper_addr) + addr = nullptr; + } + return addr; +} + +bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func, + uptr wrapper) { + void *addr = GetFuncAddr(name, wrapper); + *ptr_to_real = (uptr)addr; + return addr && (func == wrapper); +} + +// Android and Solaris do not have dlvsym +#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD +static void *GetFuncAddr(const char *name, const char *ver) { + return dlvsym(RTLD_NEXT, name, ver); +} + +bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real, + uptr func, uptr wrapper) { + void *addr = GetFuncAddr(name, ver); + *ptr_to_real = (uptr)addr; + return addr && (func == wrapper); +} +#endif // !SANITIZER_ANDROID + +} // namespace __interception + +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || + // SANITIZER_OPENBSD || SANITIZER_SOLARIS diff --git a/lib/interception/interception_mac.cc b/lib/interception/interception_mac.cc deleted file mode 100644 index 5bfc1514d2b8..000000000000 --- a/lib/interception/interception_mac.cc +++ /dev/null @@ -1,18 +0,0 @@ -//===-- interception_mac.cc -------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Mac-specific interception methods. -//===----------------------------------------------------------------------===// - -#include "interception.h" - -#if SANITIZER_MAC - -#endif // SANITIZER_MAC diff --git a/lib/interception/interception_mac.cpp b/lib/interception/interception_mac.cpp new file mode 100644 index 000000000000..fb6eadcff597 --- /dev/null +++ b/lib/interception/interception_mac.cpp @@ -0,0 +1,18 @@ +//===-- interception_mac.cpp ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Mac-specific interception methods. +//===----------------------------------------------------------------------===// + +#include "interception.h" + +#if SANITIZER_MAC + +#endif // SANITIZER_MAC diff --git a/lib/interception/interception_type_test.cc b/lib/interception/interception_type_test.cc deleted file mode 100644 index c00294a9b474..000000000000 --- a/lib/interception/interception_type_test.cc +++ /dev/null @@ -1,39 +0,0 @@ -//===-- interception_type_test.cc -------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Compile-time tests of the internal type definitions. -//===----------------------------------------------------------------------===// - -#include "interception.h" - -#if SANITIZER_LINUX || SANITIZER_MAC - -#include -#include -#include - -COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t)); -COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t)); -COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t)); -COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t)); - -#if !SANITIZER_MAC -COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t)); -#endif - -// The following are the cases when pread (and friends) is used instead of -// pread64. In those cases we need OFF_T to match off_t. We don't care about the -// rest (they depend on _FILE_OFFSET_BITS setting when building an application). -# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \ - _FILE_OFFSET_BITS != 64 -COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t)); -# endif - -#endif diff --git a/lib/interception/interception_type_test.cpp b/lib/interception/interception_type_test.cpp new file mode 100644 index 000000000000..a611604a700c --- /dev/null +++ b/lib/interception/interception_type_test.cpp @@ -0,0 +1,39 @@ +//===-- interception_type_test.cpp ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Compile-time tests of the internal type definitions. +//===----------------------------------------------------------------------===// + +#include "interception.h" + +#if SANITIZER_LINUX || SANITIZER_MAC + +#include +#include +#include + +COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t)); +COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t)); +COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t)); +COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t)); + +#if !SANITIZER_MAC +COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t)); +#endif + +// The following are the cases when pread (and friends) is used instead of +// pread64. In those cases we need OFF_T to match off_t. We don't care about the +// rest (they depend on _FILE_OFFSET_BITS setting when building an application). +# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \ + _FILE_OFFSET_BITS != 64 +COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t)); +# endif + +#endif diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc deleted file mode 100644 index 40bde008052b..000000000000 --- a/lib/interception/interception_win.cc +++ /dev/null @@ -1,1022 +0,0 @@ -//===-- interception_linux.cc -----------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Windows-specific interception methods. -// -// This file is implementing several hooking techniques to intercept calls -// to functions. The hooks are dynamically installed by modifying the assembly -// code. -// -// The hooking techniques are making assumptions on the way the code is -// generated and are safe under these assumptions. -// -// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow -// arbitrary branching on the whole memory space, the notion of trampoline -// region is used. A trampoline region is a memory space withing 2G boundary -// where it is safe to add custom assembly code to build 64-bit jumps. -// -// Hooking techniques -// ================== -// -// 1) Detour -// -// The Detour hooking technique is assuming the presence of an header with -// padding and an overridable 2-bytes nop instruction (mov edi, edi). The -// nop instruction can safely be replaced by a 2-bytes jump without any need -// to save the instruction. A jump to the target is encoded in the function -// header and the nop instruction is replaced by a short jump to the header. -// -// head: 5 x nop head: jmp -// func: mov edi, edi --> func: jmp short -// [...] real: [...] -// -// This technique is only implemented on 32-bit architecture. -// Most of the time, Windows API are hookable with the detour technique. -// -// 2) Redirect Jump -// -// The redirect jump is applicable when the first instruction is a direct -// jump. The instruction is replaced by jump to the hook. -// -// func: jmp