aboutsummaryrefslogtreecommitdiff
path: root/lib/asan
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-02-10 07:45:43 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-02-10 07:45:43 +0000
commit476c4db3dc56bee43df384704c75ccc71cfa7a1d (patch)
tree5d0dcec3cc12fc53532fc84029892b98711a2596 /lib/asan
parentca9211ecdede9bdedb812b2243a4abdb8dacd1b9 (diff)
downloadsrc-476c4db3dc56bee43df384704c75ccc71cfa7a1d.tar.gz
src-476c4db3dc56bee43df384704c75ccc71cfa7a1d.zip
Notes
Diffstat (limited to 'lib/asan')
-rw-r--r--lib/asan/CMakeLists.txt60
-rw-r--r--lib/asan/asan_activation.cc142
-rw-r--r--lib/asan/asan_activation.h2
-rw-r--r--lib/asan/asan_activation_flags.inc35
-rw-r--r--lib/asan/asan_allocator.cc909
-rw-r--r--lib/asan/asan_allocator.h24
-rw-r--r--lib/asan/asan_allocator2.cc792
-rw-r--r--lib/asan/asan_debugging.cc4
-rw-r--r--lib/asan/asan_fake_stack.cc23
-rw-r--r--lib/asan/asan_flags.cc141
-rw-r--r--lib/asan/asan_flags.h48
-rw-r--r--lib/asan/asan_flags.inc144
-rw-r--r--lib/asan/asan_globals.cc8
-rw-r--r--lib/asan/asan_init_version.h6
-rw-r--r--lib/asan/asan_interceptors.cc54
-rw-r--r--lib/asan/asan_internal.h5
-rw-r--r--lib/asan/asan_linux.cc4
-rw-r--r--lib/asan/asan_mac.cc141
-rw-r--r--lib/asan/asan_malloc_mac.cc79
-rw-r--r--lib/asan/asan_mapping.h31
-rw-r--r--lib/asan/asan_poisoning.cc19
-rw-r--r--lib/asan/asan_poisoning.h11
-rw-r--r--lib/asan/asan_report.cc4
-rw-r--r--lib/asan/asan_rtl.cc325
-rw-r--r--lib/asan/asan_stack.cc15
-rw-r--r--lib/asan/asan_stack.h10
-rwxr-xr-xlib/asan/scripts/asan_device_setup3
-rwxr-xr-xlib/asan/scripts/asan_symbolize.py72
-rw-r--r--lib/asan/tests/CMakeLists.txt37
-rw-r--r--lib/asan/tests/asan_interface_test.cc2
-rw-r--r--lib/asan/tests/asan_noinst_test.cc9
-rw-r--r--lib/asan/tests/asan_test.cc33
32 files changed, 1761 insertions, 1431 deletions
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index 47486b7caf89..d4c5c17d36a2 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -1,10 +1,11 @@
# Build for the AddressSanitizer runtime support library.
set(ASAN_SOURCES
- asan_allocator2.cc
+ asan_allocator.cc
asan_activation.cc
asan_debugging.cc
asan_fake_stack.cc
+ asan_flags.cc
asan_globals.cc
asan_interceptors.cc
asan_linux.cc
@@ -64,8 +65,8 @@ if(APPLE)
add_compiler_rt_darwin_object_library(RTAsan ${os}
ARCH ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
endforeach()
else()
foreach(arch ${ASAN_SUPPORTED_ARCH})
@@ -78,12 +79,10 @@ else()
add_compiler_rt_object_library(RTAsan_preinit ${arch}
SOURCES ${ASAN_PREINIT_SOURCES} CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
- if (COMPILER_RT_BUILD_SHARED_ASAN)
- add_compiler_rt_object_library(RTAsan_dynamic ${arch}
- SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
- CFLAGS ${ASAN_DYNAMIC_CFLAGS}
- DEFS ${ASAN_DYNAMIC_DEFINITIONS})
- endif()
+ add_compiler_rt_object_library(RTAsan_dynamic ${arch}
+ SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
endforeach()
endif()
@@ -97,8 +96,8 @@ if(APPLE)
$<TARGET_OBJECTS:RTInterception.${os}>
$<TARGET_OBJECTS:RTSanitizerCommon.${os}>
$<TARGET_OBJECTS:RTLSanCommon.${os}>
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
add_dependencies(asan clang_rt.asan_${os}_dynamic)
endforeach()
else()
@@ -128,28 +127,25 @@ else()
DEFS ${ASAN_COMMON_DEFINITIONS})
add_dependencies(asan clang_rt.asan_cxx-${arch})
- if (COMPILER_RT_BUILD_SHARED_ASAN)
- add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
- SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
- add_dependencies(asan clang_rt.asan-preinit-${arch})
-
- if (WIN32)
- set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
- else()
- set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
- endif()
-
- add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
- OUTPUT_NAME ${SHARED_ASAN_NAME}
- SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
- ${ASAN_COMMON_RUNTIME_OBJECTS}
- CFLAGS ${ASAN_DYNAMIC_CFLAGS}
- DEFS ${ASAN_DYNAMIC_DEFINITIONS})
- target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
- add_dependencies(asan clang_rt.asan-dynamic-${arch})
+ add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
+ SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
+ CFLAGS ${ASAN_CFLAGS}
+ DEFS ${ASAN_COMMON_DEFINITIONS})
+ add_dependencies(asan clang_rt.asan-preinit-${arch})
+
+ if (WIN32)
+ set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
+ else()
+ set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
endif()
+ add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
+ OUTPUT_NAME ${SHARED_ASAN_NAME}
+ SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
+ ${ASAN_COMMON_RUNTIME_OBJECTS}
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
+ target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
+ add_dependencies(asan clang_rt.asan-dynamic-${arch})
if (UNIX AND NOT ${arch} STREQUAL "i386" AND NOT ${arch} STREQUAL "i686")
add_sanitizer_rt_symbols(clang_rt.asan_cxx-${arch})
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index eb4a6db0b85e..3bc01984898d 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -16,40 +16,106 @@
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
- int quarantine_size;
- int max_redzone;
+ AllocatorOptions allocator_options;
int malloc_context_size;
bool poison_heap;
- bool alloc_dealloc_mismatch;
- bool allocator_may_return_null;
+ bool coverage;
+ const char *coverage_dir;
+
+ void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
+#define ASAN_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &f->Name);
+#define COMMON_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &cf->Name);
+#include "asan_activation_flags.inc"
+#undef ASAN_ACTIVATION_FLAG
+#undef COMMON_ACTIVATION_FLAG
+
+ RegisterIncludeFlag(parser, cf);
+ }
+
+ void OverrideFromActivationFlags() {
+ Flags f;
+ CommonFlags cf;
+ FlagParser parser;
+ RegisterActivationFlags(&parser, &f, &cf);
+
+ // Copy the current activation flags.
+ allocator_options.CopyTo(&f, &cf);
+ cf.malloc_context_size = malloc_context_size;
+ f.poison_heap = poison_heap;
+ cf.coverage = coverage;
+ cf.coverage_dir = coverage_dir;
+ cf.verbosity = Verbosity();
+ cf.help = false; // this is activation-specific help
+
+ // Check if activation flags need to be overriden.
+ if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
+ parser.ParseString(env);
+ }
+
+ // Override from getprop asan.options.
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ parser.ParseString(buf);
+
+ SetVerbosity(cf.verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (cf.help) parser.PrintFlagDescriptions();
+
+ allocator_options.SetFrom(&f, &cf);
+ malloc_context_size = cf.malloc_context_size;
+ poison_heap = f.poison_heap;
+ coverage = cf.coverage;
+ coverage_dir = cf.coverage_dir;
+ }
+
+ void Print() {
+ Report(
+ "quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
+ "malloc_context_size %d, alloc_dealloc_mismatch %d, "
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ allocator_options.quarantine_size_mb, allocator_options.max_redzone,
+ poison_heap, malloc_context_size,
+ allocator_options.alloc_dealloc_mismatch,
+ allocator_options.may_return_null, coverage, coverage_dir);
+ }
} asan_deactivated_flags;
static bool asan_is_deactivated;
-void AsanStartDeactivated() {
+void AsanDeactivate() {
+ CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
- // Save flag values.
- asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
- asan_deactivated_flags.max_redzone = flags()->max_redzone;
- asan_deactivated_flags.poison_heap = flags()->poison_heap;
- asan_deactivated_flags.malloc_context_size =
- common_flags()->malloc_context_size;
- asan_deactivated_flags.alloc_dealloc_mismatch =
- flags()->alloc_dealloc_mismatch;
- asan_deactivated_flags.allocator_may_return_null =
- common_flags()->allocator_may_return_null;
-
- flags()->quarantine_size = 0;
- flags()->max_redzone = 16;
- flags()->poison_heap = false;
- common_flags()->malloc_context_size = 0;
- flags()->alloc_dealloc_mismatch = false;
- common_flags()->allocator_may_return_null = true;
+
+ // Stash runtime state.
+ GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+ asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+ asan_deactivated_flags.poison_heap = CanPoisonMemory();
+ asan_deactivated_flags.coverage = common_flags()->coverage;
+ asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
+
+ // Deactivate the runtime.
+ SetCanPoisonMemory(false);
+ SetMallocContextSize(1);
+ ReInitializeCoverage(false, nullptr);
+
+ AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+ disabled.quarantine_size_mb = 0;
+ disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
+ disabled.max_redzone = 16;
+ disabled.alloc_dealloc_mismatch = false;
+ disabled.may_return_null = true;
+ ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}
@@ -58,31 +124,19 @@ void AsanActivate() {
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
- // Restore flag values.
- // FIXME: this is not atomic, and there may be other threads alive.
- flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
- flags()->max_redzone = asan_deactivated_flags.max_redzone;
- flags()->poison_heap = asan_deactivated_flags.poison_heap;
- common_flags()->malloc_context_size =
- asan_deactivated_flags.malloc_context_size;
- flags()->alloc_dealloc_mismatch =
- asan_deactivated_flags.alloc_dealloc_mismatch;
- common_flags()->allocator_may_return_null =
- asan_deactivated_flags.allocator_may_return_null;
-
- ParseExtraActivationFlags();
+ asan_deactivated_flags.OverrideFromActivationFlags();
- ReInitializeAllocator();
+ SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
+ SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
+ ReInitializeCoverage(asan_deactivated_flags.coverage,
+ asan_deactivated_flags.coverage_dir);
+ ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false;
- VReport(
- 1,
- "quarantine_size %d, max_redzone %d, poison_heap %d, "
- "malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d\n",
- flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
- common_flags()->malloc_context_size, flags()->alloc_dealloc_mismatch,
- common_flags()->allocator_may_return_null);
+ if (Verbosity()) {
+ Report("Activated with flags:\n");
+ asan_deactivated_flags.Print();
+ }
}
} // namespace __asan
diff --git a/lib/asan/asan_activation.h b/lib/asan/asan_activation.h
index dafb840a6042..d5e1ce433001 100644
--- a/lib/asan/asan_activation.h
+++ b/lib/asan/asan_activation.h
@@ -16,7 +16,7 @@
#define ASAN_ACTIVATION_H
namespace __asan {
-void AsanStartDeactivated();
+void AsanDeactivate();
void AsanActivate();
} // namespace __asan
diff --git a/lib/asan/asan_activation_flags.inc b/lib/asan/asan_activation_flags.inc
new file mode 100644
index 000000000000..d4c089ec6538
--- /dev/null
+++ b/lib/asan/asan_activation_flags.inc
@@ -0,0 +1,35 @@
+//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A subset of ASan (and common) runtime flags supported at activation time.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ACTIVATION_FLAG
+# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+#ifndef COMMON_ACTIVATION_FLAG
+# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+// ASAN_ACTIVATION_FLAG(Type, Name)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_ACTIVATION_FLAG(int, redzone)
+ASAN_ACTIVATION_FLAG(int, max_redzone)
+ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
+ASAN_ACTIVATION_FLAG(bool, poison_heap)
+
+COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
+COMMON_ACTIVATION_FLAG(int, malloc_context_size)
+COMMON_ACTIVATION_FLAG(bool, coverage)
+COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
+COMMON_ACTIVATION_FLAG(int, verbosity)
+COMMON_ACTIVATION_FLAG(bool, help)
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
new file mode 100644
index 000000000000..fd63ac68c09e
--- /dev/null
+++ b/lib/asan/asan_allocator.cc
@@ -0,0 +1,909 @@
+//===-- asan_allocator.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+ CHECK_LT(rz_log, 8);
+ return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+ CHECK_GE(rz_size, 16);
+ CHECK_LE(rz_size, 2048);
+ CHECK(IsPowerOfTwo(rz_size));
+ u32 res = Log2(rz_size) - 4;
+ CHECK_EQ(rz_size, RZLog2Size(res));
+ return res;
+}
+
+static AsanAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+// L -- left redzone words (0 or more bytes)
+// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+// U -- user memory.
+// R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If the left redzone is greater than the ChunkHeader size we store a magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B L L L L L L L L L H H U U U U U U
+// | ^
+// ---------------------|
+// M -- magic value kAllocBegMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kAllocBegMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+ // 1-st 8 bytes.
+ u32 chunk_state : 8; // Must be first.
+ u32 alloc_tid : 24;
+
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ u32 lsan_tag : 2;
+ // 2-nd 8 bytes
+ // This field is used for small sizes. For large sizes it is equal to
+ // SizeClassMap::kMaxSize and the actual size is stored in the
+ // SecondaryAllocator's metadata.
+ u32 user_requested_size;
+ u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+ // Header2, intersects with user memory.
+ u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+ CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
+ CHUNK_ALLOCATED = 2,
+ CHUNK_QUARANTINE = 3
+};
+
+struct AsanChunk: ChunkBase {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize(bool locked_version = false) {
+ if (user_requested_size != SizeClassMap::kMaxSize)
+ return user_requested_size;
+ return *reinterpret_cast<uptr *>(
+ get_allocator().GetMetaData(AllocBeg(locked_version)));
+ }
+ void *AllocBeg(bool locked_version = false) {
+ if (from_memalign) {
+ if (locked_version)
+ return get_allocator().GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(this));
+ return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+ }
+ return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+ }
+ bool AddrIsInside(uptr addr, bool locked_version = false) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+ }
+};
+
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCache *cache)
+ : cache_(cache) {
+ }
+
+ void Recycle(AsanChunk *m) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
+ CHECK_NE(m->alloc_tid, kInvalidTid);
+ CHECK_NE(m->free_tid, kInvalidTid);
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ void *p = reinterpret_cast<void *>(m->AllocBeg());
+ if (p != m) {
+ uptr *alloc_magic = reinterpret_cast<uptr *>(p);
+ CHECK_EQ(alloc_magic[0], kAllocBegMagic);
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ alloc_magic[0] = 0;
+ CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ }
+
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ void *Allocate(uptr size) {
+ return get_allocator().Allocate(cache_, size, 1, false);
+ }
+
+ void Deallocate(void *p) {
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ AllocatorCache *cache_;
+};
+
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ return &ms->allocator_cache;
+}
+
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+ return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
+
+void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
+ quarantine_size_mb = f->quarantine_size_mb;
+ min_redzone = f->redzone;
+ max_redzone = f->max_redzone;
+ may_return_null = cf->allocator_may_return_null;
+ alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+}
+
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+ f->quarantine_size_mb = quarantine_size_mb;
+ f->redzone = min_redzone;
+ f->max_redzone = max_redzone;
+ cf->allocator_may_return_null = may_return_null;
+ f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+}
+
+struct Allocator {
+ static const uptr kMaxAllowedMallocSize =
+ FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
+ static const uptr kMaxThreadLocalQuarantine =
+ FIRST_32_SECOND_64(1 << 18, 1 << 20);
+
+ AsanAllocator allocator;
+ AsanQuarantine quarantine;
+ StaticSpinMutex fallback_mutex;
+ AllocatorCache fallback_allocator_cache;
+ QuarantineCache fallback_quarantine_cache;
+
+ // ------------------- Options --------------------------
+ atomic_uint16_t min_redzone;
+ atomic_uint16_t max_redzone;
+ atomic_uint8_t alloc_dealloc_mismatch;
+
+ // ------------------- Initialization ------------------------
+ explicit Allocator(LinkerInitialized)
+ : quarantine(LINKER_INITIALIZED),
+ fallback_quarantine_cache(LINKER_INITIALIZED) {}
+
+ void CheckOptions(const AllocatorOptions &options) const {
+ CHECK_GE(options.min_redzone, 16);
+ CHECK_GE(options.max_redzone, options.min_redzone);
+ CHECK_LE(options.max_redzone, 2048);
+ CHECK(IsPowerOfTwo(options.min_redzone));
+ CHECK(IsPowerOfTwo(options.max_redzone));
+ }
+
+ void SharedInitCode(const AllocatorOptions &options) {
+ CheckOptions(options);
+ quarantine.Init((uptr)options.quarantine_size_mb << 20,
+ kMaxThreadLocalQuarantine);
+ atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
+ memory_order_release);
+ atomic_store(&min_redzone, options.min_redzone, memory_order_release);
+ atomic_store(&max_redzone, options.max_redzone, memory_order_release);
+ }
+
+ void Initialize(const AllocatorOptions &options) {
+ allocator.Init(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void ReInitialize(const AllocatorOptions &options) {
+ allocator.SetMayReturnNull(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void GetOptions(AllocatorOptions *options) const {
+ options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+ options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+ options->may_return_null = allocator.MayReturnNull();
+ options->alloc_dealloc_mismatch =
+ atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ }
+
+ // -------------------- Helper methods. -------------------------
+ uptr ComputeRZLog(uptr user_requested_size) {
+ u32 rz_log =
+ user_requested_size <= 64 - 16 ? 0 :
+ user_requested_size <= 128 - 32 ? 1 :
+ user_requested_size <= 512 - 64 ? 2 :
+ user_requested_size <= 4096 - 128 ? 3 :
+ user_requested_size <= (1 << 14) - 256 ? 4 :
+ user_requested_size <= (1 << 15) - 512 ? 5 :
+ user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+ u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
+ u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
+ return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ }
+
+ // We have an address between two chunks, and we want to report just one.
+ AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
+ AsanChunk *right_chunk) {
+ // Prefer an allocated chunk over freed chunk and freed chunk
+ // over available chunk.
+ if (left_chunk->chunk_state != right_chunk->chunk_state) {
+ if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ return right_chunk;
+ if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ return right_chunk;
+ }
+ // Same chunk_state: choose based on offset.
+ sptr l_offset = 0, r_offset = 0;
+ CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+ CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+ if (l_offset < r_offset)
+ return left_chunk;
+ return right_chunk;
+ }
+
+ // -------------------- Allocation/Deallocation routines ---------------
+ void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+ AllocType alloc_type, bool can_fill) {
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
+ Flags &fl = *flags();
+ CHECK(stack);
+ const uptr min_alignment = SHADOW_GRANULARITY;
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc
+ // would not return NULL even for zero-size allocations. Moreover, it
+ // looks like operator new should never return NULL, and results of
+ // consecutive "new" calls must be different even if the allocated size
+ // is zero.
+ size = 1;
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rz_log = ComputeRZLog(size);
+ uptr rz_size = RZLog2Size(rz_log);
+ uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
+ uptr needed_size = rounded_size + rz_size;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ bool using_primary_allocator = true;
+ // If we are allocating from the secondary allocator, there will be no
+ // automatic right redzone, so add the right redzone manually.
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ needed_size += rz_size;
+ using_primary_allocator = false;
+ }
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+ (void*)size);
+ return allocator.ReturnNullOrDie();
+ }
+
+ AsanThread *t = GetCurrentThread();
+ void *allocated;
+ bool check_rss_limit = true;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ }
+
+ if (!allocated)
+ return allocator.ReturnNullOrDie();
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if CanPoisonMemory() was false for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_redzone = alloc_beg + rz_size;
+ uptr user_beg = beg_plus_redzone;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ m->alloc_type = alloc_type;
+ m->rz_log = rz_log;
+ u32 alloc_tid = t ? t->tid() : 0;
+ m->alloc_tid = alloc_tid;
+ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
+ m->free_tid = kInvalidTid;
+ m->from_memalign = user_beg != beg_plus_redzone;
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+ reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
+ reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
+ }
+ if (using_primary_allocator) {
+ CHECK(size);
+ m->user_requested_size = size;
+ CHECK(allocator.FromPrimary(allocated));
+ } else {
+ CHECK(!allocator.FromPrimary(allocated));
+ m->user_requested_size = SizeClassMap::kMaxSize;
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+ meta[0] = size;
+ meta[1] = chunk_beg;
+ }
+
+ m->alloc_context_id = StackDepotPut(*stack);
+
+ uptr size_rounded_down_to_granularity =
+ RoundDownTo(size, SHADOW_GRANULARITY);
+ // Unpoison the bulk of the memory region.
+ if (size_rounded_down_to_granularity)
+ PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+ // Deal with the end of the region if size is not aligned to granularity.
+ if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
+ u8 *shadow =
+ (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
+ *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ }
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_redzones += needed_size - size;
+ uptr class_id =
+ Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
+ thread_stats.malloced_by_size[class_id]++;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ if (can_fill && fl.max_malloc_fill_size) {
+ uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+ REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+ }
+#if CAN_SANITIZE_LEAKS
+ m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked;
+#endif
+ // Must be the last mutation of metadata in this function.
+ atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ ASAN_MALLOC_HOOK(res, size);
+ return res;
+ }
+
+ void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+ BufferedStackTrace *stack) {
+ u8 old_chunk_state = CHUNK_ALLOCATED;
+ // Flip the chunk_state atomically to avoid race on double-free.
+ if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
+ CHUNK_QUARANTINE, memory_order_acquire))
+ ReportInvalidFree(ptr, old_chunk_state, stack);
+ CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ }
+
+ // Expects the chunk to already be marked as quarantined by using
+ // AtomicallySetQuarantineFlag.
+ void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+
+ if (m->alloc_type != alloc_type) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+ (AllocType)alloc_type);
+ }
+ }
+
+ CHECK_GE(m->alloc_tid, 0);
+ if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
+ CHECK_EQ(m->free_tid, kInvalidTid);
+ AsanThread *t = GetCurrentThread();
+ m->free_tid = t ? t->tid() : 0;
+ m->free_context_id = StackDepotPut(*stack);
+ // Poison the region.
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
+ // Push into quarantine.
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
+ m->UsedSize());
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *ac = &fallback_allocator_cache;
+ quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
+ m->UsedSize());
+ }
+ }
+
+ void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0) return;
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
+ ASAN_FREE_HOOK(ptr);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ AtomicallySetQuarantineFlag(m, ptr, stack);
+ QuarantineChunk(m, ptr, stack, alloc_type);
+ }
+
+ void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
+ if (new_ptr) {
+ u8 chunk_state = m->chunk_state;
+ if (chunk_state != CHUNK_ALLOCATED)
+ ReportInvalidFree(old_ptr, chunk_state, stack);
+ CHECK_NE(REAL(memcpy), (void*)0);
+ uptr memcpy_size = Min(new_size, m->UsedSize());
+ // If realloc() races with free(), we may start copying freed memory.
+ // However, we will report racy double-free later anyway.
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+ }
+
+ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+ return allocator.ReturnNullOrDie();
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+ }
+
+ void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
+ if (chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ }
+
+ void CommitBack(AsanThreadLocalMallocStorage *ms) {
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
+ allocator.SwallowCache(ac);
+ }
+
+ // -------------------------- Chunk lookup ----------------------
+
+ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ AsanChunk *GetAsanChunk(void *alloc_beg) {
+ if (!alloc_beg) return 0;
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+ return m;
+ }
+ uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
+ if (alloc_magic[0] == kAllocBegMagic)
+ return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
+ return reinterpret_cast<AsanChunk *>(alloc_beg);
+ }
+
+ AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ // Allocator must be locked when this function is called.
+ AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ uptr AllocationSize(uptr p) {
+ AsanChunk *m = GetAsanChunkByAddr(p);
+ if (!m) return 0;
+ if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (m->Beg() != p) return 0;
+ return m->UsedSize();
+ }
+
+ AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ AsanChunk *m1 = GetAsanChunkByAddr(addr);
+ if (!m1) return AsanChunkView(m1);
+ sptr offset = 0;
+ if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ // The address is in the chunk's left redzone, so maybe it is actually
+ // a right buffer overflow from the other chunk to the left.
+ // Search a bit to the left to see if there is another chunk.
+ AsanChunk *m2 = 0;
+ for (uptr l = 1; l < GetPageSizeCached(); l++) {
+ m2 = GetAsanChunkByAddr(addr - l);
+ if (m2 == m1) continue; // Still the same chunk.
+ break;
+ }
+ if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+ m1 = ChooseChunk(addr, m2, m1);
+ }
+ return AsanChunkView(m1);
+ }
+
+ void PrintStats() {
+ allocator.PrintStats();
+ }
+
+ void ForceLock() {
+ allocator.ForceLock();
+ fallback_mutex.Lock();
+ }
+
+ void ForceUnlock() {
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
+ }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static AsanAllocator &get_allocator() {
+ return instance.allocator;
+}
+
+bool AsanChunkView::IsValid() {
+ return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
+}
+uptr AsanChunkView::Beg() { return chunk_->Beg(); }
+uptr AsanChunkView::End() { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+StackTrace AsanChunkView::GetAllocStack() {
+ return GetStackTraceFromId(chunk_->alloc_context_id);
+}
+
+StackTrace AsanChunkView::GetFreeStack() {
+ return GetStackTraceFromId(chunk_->free_context_id);
+}
+
+void InitializeAllocator(const AllocatorOptions &options) {
+ instance.Initialize(options);
+}
+
+void ReInitializeAllocator(const AllocatorOptions &options) {
+ instance.ReInitialize(options);
+}
+
+void GetAllocatorOptions(AllocatorOptions *options) {
+ instance.GetOptions(options);
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ return instance.FindHeapChunkByAddress(addr);
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+ instance.CommitBack(this);
+}
+
+void PrintInternalAllocatorStats() {
+ instance.PrintStats();
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ return instance.Allocate(size, alignment, stack, alloc_type, true);
+}
+
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ instance.Deallocate(ptr, size, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+}
+
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ return instance.Calloc(nmemb, size, stack);
+}
+
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+ if (p == 0)
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+ if (size == 0) {
+ instance.Deallocate(p, 0, stack, FROM_MALLOC);
+ return 0;
+ }
+ return instance.Reallocate(p, size, stack);
+}
+
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
+}
+
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
+ if (size == 0) {
+ // pvalloc(0) should allocate one page.
+ size = PageSize;
+ }
+ return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
+ if (ptr == 0) return 0;
+ uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ GET_STACK_TRACE_FATAL(pc, bp);
+ ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+}
+
+void asan_mz_force_lock() {
+ instance.ForceLock();
+}
+
+void asan_mz_force_unlock() {
+ instance.ForceUnlock();
+}
+
+void AsanSoftRssLimitExceededCallback(bool exceeded) {
+ instance.allocator.SetRssLimitIsExceeded(exceeded);
+}
+
+} // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+ __asan::get_allocator().ForceLock();
+}
+
+void UnlockAllocator() {
+ __asan::get_allocator().ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__asan::get_allocator();
+ *end = *begin + sizeof(__asan::get_allocator());
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
+ if (!m) return 0;
+ uptr chunk = m->Beg();
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
+ CHECK(m);
+ return m->Beg();
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->UsedSize(/*locked_version=*/true);
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->alloc_context_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __asan::get_allocator().ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
+ if (!m) return kIgnoreObjectInvalid;
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return instance.AllocationSize(ptr) > 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ if (p == 0) return 0;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ uptr allocated_size = instance.AllocationSize(ptr);
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
+ }
+ return allocated_size;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+} // extern "C"
+#endif
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 6d3a99282a4a..3208d1f950cd 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -9,12 +9,13 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// ASan-private header for asan_allocator2.cc.
+// ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H
+#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
@@ -31,8 +32,20 @@ enum AllocType {
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
-void InitializeAllocator();
-void ReInitializeAllocator();
+struct AllocatorOptions {
+ u32 quarantine_size_mb;
+ u16 min_redzone;
+ u16 max_redzone;
+ u8 may_return_null;
+ u8 alloc_dealloc_mismatch;
+
+ void SetFrom(const Flags *f, const CommonFlags *cf);
+ void CopyTo(Flags *f, CommonFlags *cf);
+};
+
+void InitializeAllocator(const AllocatorOptions &options);
+void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
@@ -127,12 +140,12 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
+ SecondaryAllocator> AsanAllocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
- AllocatorCache allocator2_cache;
+ AllocatorCache allocator_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
@@ -160,6 +173,7 @@ void asan_mz_force_lock();
void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
+void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
deleted file mode 100644
index 52bdcf607f57..000000000000
--- a/lib/asan/asan_allocator2.cc
+++ /dev/null
@@ -1,792 +0,0 @@
-//===-- asan_allocator2.cc ------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Implementation of ASan's memory allocator, 2-nd version.
-// This variant uses the allocator from sanitizer_common, i.e. the one shared
-// with ThreadSanitizer and MemorySanitizer.
-//
-//===----------------------------------------------------------------------===//
-#include "asan_allocator.h"
-
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
-
-namespace __asan {
-
-void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
- PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mmaps++;
- thread_stats.mmaped += size;
-}
-void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
- PoisonShadow(p, size, 0);
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- FlushUnneededASanShadowMemory(p, size);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.munmaps++;
- thread_stats.munmaped += size;
-}
-
-// We can not use THREADLOCAL because it is not supported on some of the
-// platforms we care about (OSX 10.6, Android).
-// static THREADLOCAL AllocatorCache cache;
-AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- return &ms->allocator2_cache;
-}
-
-static Allocator allocator;
-
-static const uptr kMaxAllowedMallocSize =
- FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
-
-static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
-
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
-enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
-};
-
-// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
-// We use adaptive redzones: for larger allocation larger redzones are used.
-static u32 RZLog2Size(u32 rz_log) {
- CHECK_LT(rz_log, 8);
- return 16 << rz_log;
-}
-
-static u32 RZSize2Log(u32 rz_size) {
- CHECK_GE(rz_size, 16);
- CHECK_LE(rz_size, 2048);
- CHECK(IsPowerOfTwo(rz_size));
- u32 res = Log2(rz_size) - 4;
- CHECK_EQ(rz_size, RZLog2Size(res));
- return res;
-}
-
-static uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
- RZSize2Log(flags()->max_redzone));
-}
-
-// The memory chunk allocated from the underlying allocator looks like this:
-// L L L L L L H H U U U U U U R R
-// L -- left redzone words (0 or more bytes)
-// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
-// U -- user memory.
-// R -- right redzone (0 or more bytes)
-// ChunkBase consists of ChunkHeader and other bytes that overlap with user
-// memory.
-
-// If the left redzone is greater than the ChunkHeader size we store a magic
-// value in the first uptr word of the memory block and store the address of
-// ChunkBase in the next uptr.
-// M B L L L L L L L L L H H U U U U U U
-// | ^
-// ---------------------|
-// M -- magic value kAllocBegMagic
-// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size;
- u32 alloc_context_id;
-};
-
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
-};
-
-static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
-static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
-COMPILER_CHECK(kChunkHeaderSize == 16);
-COMPILER_CHECK(kChunkHeader2Size <= 16);
-
-struct AsanChunk: ChunkBase {
- uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- allocator.GetMetaData(AllocBeg(locked_version)));
- }
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return allocator.GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
- }
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
- }
-};
-
-bool AsanChunkView::IsValid() {
- return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
-}
-uptr AsanChunkView::Beg() { return chunk_->Beg(); }
-uptr AsanChunkView::End() { return Beg() + UsedSize(); }
-uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
-uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-
-static StackTrace GetStackTraceFromId(u32 id) {
- CHECK(id);
- StackTrace res = StackDepotGet(id);
- CHECK(res.trace);
- return res;
-}
-
-StackTrace AsanChunkView::GetAllocStack() {
- return GetStackTraceFromId(chunk_->alloc_context_id);
-}
-
-StackTrace AsanChunkView::GetFreeStack() {
- return GetStackTraceFromId(chunk_->free_context_id);
-}
-
-struct QuarantineCallback;
-typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
-typedef AsanQuarantine::Cache QuarantineCache;
-static AsanQuarantine quarantine(LINKER_INITIALIZED);
-static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
-static AllocatorCache fallback_allocator_cache;
-static SpinMutex fallback_mutex;
-
-QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
- return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
-}
-
-struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCache *cache)
- : cache_(cache) {
- }
-
- void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
- if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
- // Clear the magic value, as allocator internals may overwrite the
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
- }
-
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.real_frees++;
- thread_stats.really_freed += m->UsedSize();
-
- allocator.Deallocate(cache_, p);
- }
-
- void *Allocate(uptr size) {
- return allocator.Allocate(cache_, size, 1, false);
- }
-
- void Deallocate(void *p) {
- allocator.Deallocate(cache_, p);
- }
-
- AllocatorCache *cache_;
-};
-
-void InitializeAllocator() {
- allocator.Init();
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-void ReInitializeAllocator() {
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
- AllocType alloc_type, bool can_fill) {
- if (UNLIKELY(!asan_inited))
- AsanInitFromRtl();
- Flags &fl = *flags();
- CHECK(stack);
- const uptr min_alignment = SHADOW_GRANULARITY;
- if (alignment < min_alignment)
- alignment = min_alignment;
- if (size == 0) {
- // We'd be happy to avoid allocating memory for zero-size requests, but
- // some programs/tests depend on this behavior and assume that malloc would
- // not return NULL even for zero-size allocations. Moreover, it looks like
- // operator new should never return NULL, and results of consecutive "new"
- // calls must be different even if the allocated size is zero.
- size = 1;
- }
- CHECK(IsPowerOfTwo(alignment));
- uptr rz_log = ComputeRZLog(size);
- uptr rz_size = RZLog2Size(rz_log);
- uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
- uptr needed_size = rounded_size + rz_size;
- if (alignment > min_alignment)
- needed_size += alignment;
- bool using_primary_allocator = true;
- // If we are allocating from the secondary allocator, there will be no
- // automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
- needed_size += rz_size;
- using_primary_allocator = false;
- }
- CHECK(IsAligned(needed_size, min_alignment));
- if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
- Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
- (void*)size);
- return AllocatorReturnNull();
- }
-
- AsanThread *t = GetCurrentThread();
- void *allocated;
- if (t) {
- AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *cache = &fallback_allocator_cache;
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- }
-
- if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
- // Heap poisoning is enabled, but the allocator provides an unpoisoned
- // chunk. This is possible if flags()->poison_heap was disabled for some
- // time, for example, due to flags()->start_disabled.
- // Anyway, poison the block before using it for anything else.
- uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
- PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
- }
-
- uptr alloc_beg = reinterpret_cast<uptr>(allocated);
- uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
- if (!IsAligned(user_beg, alignment))
- user_beg = RoundUpTo(user_beg, alignment);
- uptr user_end = user_beg + size;
- CHECK_LE(user_end, alloc_end);
- uptr chunk_beg = user_beg - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
-
- m->alloc_context_id = StackDepotPut(*stack);
-
- uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
- // Unpoison the bulk of the memory region.
- if (size_rounded_down_to_granularity)
- PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
- // Deal with the end of the region if size is not aligned to granularity.
- if (size != size_rounded_down_to_granularity && fl.poison_heap) {
- u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
- *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
- }
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mallocs++;
- thread_stats.malloced += size;
- thread_stats.malloced_redzones += needed_size - size;
- uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
- thread_stats.malloced_by_size[class_id]++;
- if (needed_size > SizeClassMap::kMaxSize)
- thread_stats.malloc_large++;
-
- void *res = reinterpret_cast<void *>(user_beg);
- if (can_fill && fl.max_malloc_fill_size) {
- uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
- REAL(memset)(res, fl.malloc_fill_byte, fill_size);
- }
-#if CAN_SANITIZE_LEAKS
- m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
- : __lsan::kDirectlyLeaked;
-#endif
- // Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
- ASAN_MALLOC_HOOK(res, size);
- return res;
-}
-
-static void ReportInvalidFree(void *ptr, u8 chunk_state,
- BufferedStackTrace *stack) {
- if (chunk_state == CHUNK_QUARANTINE)
- ReportDoubleFree((uptr)ptr, stack);
- else
- ReportFreeNotMalloced((uptr)ptr, stack);
-}
-
-static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
- BufferedStackTrace *stack) {
- u8 old_chunk_state = CHUNK_ALLOCATED;
- // Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
- CHUNK_QUARANTINE, memory_order_acquire))
- ReportInvalidFree(ptr, old_chunk_state, stack);
- CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
-}
-
-// Expects the chunk to already be marked as quarantined by using
-// AtomicallySetQuarantineFlag.
-static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
- AllocType alloc_type) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-
- if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
- ReportAllocTypeMismatch((uptr)ptr, stack,
- (AllocType)m->alloc_type, (AllocType)alloc_type);
-
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
- AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(*stack);
- // Poison the region.
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapFreeMagic);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.frees++;
- thread_stats.freed += m->UsedSize();
-
- // Push into quarantine.
- if (t) {
- AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
- AllocatorCache *ac = GetAllocatorCache(ms);
- quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
- m, m->UsedSize());
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *ac = &fallback_allocator_cache;
- quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
- m, m->UsedSize());
- }
-}
-
-static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- uptr p = reinterpret_cast<uptr>(ptr);
- if (p == 0) return;
-
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- if (delete_size && flags()->new_delete_type_mismatch &&
- delete_size != m->UsedSize()) {
- ReportNewDeleteSizeMismatch(p, delete_size, stack);
- }
- ASAN_FREE_HOOK(ptr);
- // Must mark the chunk as quarantined before any changes to its metadata.
- AtomicallySetQuarantineFlag(m, ptr, stack);
- QuarantineChunk(m, ptr, stack, alloc_type);
-}
-
-static void *Reallocate(void *old_ptr, uptr new_size,
- BufferedStackTrace *stack) {
- CHECK(old_ptr && new_size);
- uptr p = reinterpret_cast<uptr>(old_ptr);
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.reallocs++;
- thread_stats.realloced += new_size;
-
- void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
- if (new_ptr) {
- u8 chunk_state = m->chunk_state;
- if (chunk_state != CHUNK_ALLOCATED)
- ReportInvalidFree(old_ptr, chunk_state, stack);
- CHECK_NE(REAL(memcpy), (void*)0);
- uptr memcpy_size = Min(new_size, m->UsedSize());
- // If realloc() races with free(), we may start copying freed memory.
- // However, we will report racy double-free later anyway.
- REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, 0, stack, FROM_MALLOC);
- }
- return new_ptr;
-}
-
-// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
-static AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return 0;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
- }
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
-}
-
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
- void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-// Allocator must be locked when this function is called.
-static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
- void *alloc_beg =
- allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-static uptr AllocationSize(uptr p) {
- AsanChunk *m = GetAsanChunkByAddr(p);
- if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
- if (m->Beg() != p) return 0;
- return m->UsedSize();
-}
-
-// We have an address between two chunks, and we want to report just one.
-AsanChunk *ChooseChunk(uptr addr,
- AsanChunk *left_chunk, AsanChunk *right_chunk) {
- // Prefer an allocated chunk over freed chunk and freed chunk
- // over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
- return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
- return right_chunk;
- }
- // Same chunk_state: choose based on offset.
- sptr l_offset = 0, r_offset = 0;
- CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
- CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
- if (l_offset < r_offset)
- return left_chunk;
- return right_chunk;
-}
-
-AsanChunkView FindHeapChunkByAddress(uptr addr) {
- AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
- sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
- // The address is in the chunk's left redzone, so maybe it is actually
- // a right buffer overflow from the other chunk to the left.
- // Search a bit to the left to see if there is another chunk.
- AsanChunk *m2 = 0;
- for (uptr l = 1; l < GetPageSizeCached(); l++) {
- m2 = GetAsanChunkByAddr(addr - l);
- if (m2 == m1) continue; // Still the same chunk.
- break;
- }
- if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
- m1 = ChooseChunk(addr, m2, m1);
- }
- return AsanChunkView(m1);
-}
-
-void AsanThreadLocalMallocStorage::CommitBack() {
- AllocatorCache *ac = GetAllocatorCache(this);
- quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
- allocator.SwallowCache(GetAllocatorCache(this));
-}
-
-void PrintInternalAllocatorStats() {
- allocator.PrintStats();
-}
-
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- return Allocate(size, alignment, stack, alloc_type, true);
-}
-
-void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, 0, stack, alloc_type);
-}
-
-void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- Deallocate(ptr, size, stack, alloc_type);
-}
-
-void *asan_malloc(uptr size, BufferedStackTrace *stack) {
- return Allocate(size, 8, stack, FROM_MALLOC, true);
-}
-
-void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
- void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
- // If the memory comes from the secondary allocator no need to clear it
- // as it comes directly from mmap.
- if (ptr && allocator.FromPrimary(ptr))
- REAL(memset)(ptr, 0, nmemb * size);
- return ptr;
-}
-
-void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
- if (p == 0)
- return Allocate(size, 8, stack, FROM_MALLOC, true);
- if (size == 0) {
- Deallocate(p, 0, stack, FROM_MALLOC);
- return 0;
- }
- return Reallocate(p, size, stack);
-}
-
-void *asan_valloc(uptr size, BufferedStackTrace *stack) {
- return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
-}
-
-void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
- uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
- }
- return Allocate(size, PageSize, stack, FROM_MALLOC, true);
-}
-
-int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- BufferedStackTrace *stack) {
- void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
- CHECK(IsAligned((uptr)ptr, alignment));
- *memptr = ptr;
- return 0;
-}
-
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
- if (ptr == 0) return 0;
- uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
- if (flags()->check_malloc_usable_size && (usable_size == 0)) {
- GET_STACK_TRACE_FATAL(pc, bp);
- ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
- }
- return usable_size;
-}
-
-uptr asan_mz_size(const void *ptr) {
- return AllocationSize(reinterpret_cast<uptr>(ptr));
-}
-
-void asan_mz_force_lock() {
- allocator.ForceLock();
- fallback_mutex.Lock();
-}
-
-void asan_mz_force_unlock() {
- fallback_mutex.Unlock();
- allocator.ForceUnlock();
-}
-
-} // namespace __asan
-
-// --- Implementation of LSan-specific functions --- {{{1
-namespace __lsan {
-void LockAllocator() {
- __asan::allocator.ForceLock();
-}
-
-void UnlockAllocator() {
- __asan::allocator.ForceUnlock();
-}
-
-void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
- *begin = (uptr)&__asan::allocator;
- *end = *begin + sizeof(__asan::allocator);
-}
-
-uptr PointsIntoChunk(void* p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if (m->chunk_state != __asan::CHUNK_ALLOCATED)
- return 0;
- if (m->AddrIsInside(addr, /*locked_version=*/true))
- return chunk;
- if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
- addr))
- return chunk;
- return 0;
-}
-
-uptr GetUserBegin(uptr chunk) {
- __asan::AsanChunk *m =
- __asan::GetAsanChunkByAddrFastLocked(chunk);
- CHECK(m);
- return m->Beg();
-}
-
-LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
-}
-
-bool LsanMetadata::allocated() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
-}
-
-ChunkTag LsanMetadata::tag() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return static_cast<ChunkTag>(m->lsan_tag);
-}
-
-void LsanMetadata::set_tag(ChunkTag value) {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- m->lsan_tag = value;
-}
-
-uptr LsanMetadata::requested_size() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
-}
-
-u32 LsanMetadata::stack_trace_id() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
-}
-
-void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- __asan::allocator.ForEachChunk(callback, arg);
-}
-
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
- return kIgnoreObjectInvalid;
- }
-}
-} // namespace __lsan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-// ASan allocator doesn't reserve extra bytes, so normally we would
-// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __sanitizer_get_estimated_allocated_size(uptr size) {
- return size;
-}
-
-int __sanitizer_get_ownership(const void *p) {
- uptr ptr = reinterpret_cast<uptr>(p);
- return (AllocationSize(ptr) > 0);
-}
-
-uptr __sanitizer_get_allocated_size(const void *p) {
- if (p == 0) return 0;
- uptr ptr = reinterpret_cast<uptr>(p);
- uptr allocated_size = AllocationSize(ptr);
- // Die if p is not malloced or if it is already freed.
- if (allocated_size == 0) {
- GET_STACK_TRACE_FATAL_HERE;
- ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
- }
- return allocated_size;
-}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
- (void)ptr;
-}
-} // extern "C"
-#endif
diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc
index 2b66dd5265fc..6fc5b690de99 100644
--- a/lib/asan/asan_debugging.cc
+++ b/lib/asan/asan_debugging.cc
@@ -81,8 +81,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) {
GetInfoForHeapAddress(addr, descr);
}
-uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
- bool alloc_stack) {
+static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+ bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc
index c95bc11f6cd3..bf4f1eb4c781 100644
--- a/lib/asan/asan_fake_stack.cc
+++ b/lib/asan/asan_fake_stack.cc
@@ -60,7 +60,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
void FakeStack::Destroy(int tid) {
PoisonAll(0);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
@@ -192,20 +192,19 @@ static FakeStack *GetFakeStackFast() {
return GetFakeStack();
}
-ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
- if (!fs) return real_stack;
+ if (!fs) return 0;
+ uptr local_stack;
+ uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
- if (!ff)
- return real_stack; // Out of fake stack, return the real one.
+ if (!ff) return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
-ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
- if (ptr == real_stack)
- return;
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
@@ -216,12 +215,12 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
- return OnMalloc(class_id, size, real_stack); \
+ __asan_stack_malloc_##class_id(uptr size) { \
+ return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
- uptr ptr, uptr size, uptr real_stack) { \
- OnFree(ptr, class_id, size, real_stack); \
+ uptr ptr, uptr size) { \
+ OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
new file mode 100644
index 000000000000..1d82ab0e725f
--- /dev/null
+++ b/lib/asan/asan_flags.cc
@@ -0,0 +1,141 @@
+//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "asan_stack.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __asan {
+
+Flags asan_flags_dont_use_directly; // use via flags().
+
+static const char *MaybeCallAsanDefaultOptions() {
+ return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
+#ifdef ASAN_DEFAULT_OPTIONS
+// Stringize the macro value.
+# define ASAN_STRINGIZE(x) #x
+# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
+ return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void RegisterAsanFlags(FlagParser *parser, Flags *f) {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void InitializeFlags(Flags *f) {
+ FlagParser parser;
+ RegisterAsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.detect_leaks = CAN_SANITIZE_LEAKS;
+ cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = kDefaultMallocContextSize;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+
+ const int kDefaultQuarantineSizeMb = (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ f->SetDefaults();
+
+ // Override from compile definition.
+ const char *compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
+ parser.ParseString(compile_def);
+
+ // Override from user-specified string.
+ const char *default_options = MaybeCallAsanDefaultOptions();
+ parser.ParseString(default_options);
+
+ // Override from command line.
+ const char *env = GetEnv("ASAN_OPTIONS");
+ if (env) parser.ParseString(env);
+
+ // Let activation flags override current settings. On Android they come
+ // from a system property. On other platforms this is no-op.
+ if (!flags()->start_deactivated) {
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ parser.ParseString(buf);
+ }
+
+ SetVerbosity(common_flags()->verbosity);
+
+ // TODO(eugenis): dump all flags at verbosity>=2?
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
+ // Make "strict_init_order" imply "check_initialization_order".
+ // TODO(samsonov): Use a single runtime flag for an init-order checker.
+ if (f->strict_init_order) {
+ f->check_initialization_order = true;
+ }
+ CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+ CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
+ CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
+ CHECK(IsPowerOfTwo(f->redzone));
+ CHECK(IsPowerOfTwo(f->max_redzone));
+
+ // quarantine_size is deprecated but we still honor it.
+ // quarantine_size can not be used together with quarantine_size_mb.
+ if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
+ Report("%s: please use either 'quarantine_size' (deprecated) or "
+ "quarantine_size_mb, but not both\n", SanitizerToolName);
+ Die();
+ }
+ if (f->quarantine_size >= 0)
+ f->quarantine_size_mb = f->quarantine_size >> 20;
+ if (f->quarantine_size_mb < 0)
+ f->quarantine_size_mb = kDefaultQuarantineSizeMb;
+}
+
+} // namespace __asan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h
index 3df4dd3050bc..7653543f6d88 100644
--- a/lib/asan/asan_flags.h
+++ b/lib/asan/asan_flags.h
@@ -16,6 +16,7 @@
#define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
@@ -24,55 +25,24 @@
// 3) overriden from string returned by user-specified function
// __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS.
+// 5) overriden during ASan activation (for now used on Android only).
namespace __asan {
struct Flags {
- // Flag descriptions are in asan_rtl.cc.
- int quarantine_size;
- int redzone;
- int max_redzone;
- bool debug;
- int report_globals;
- bool check_initialization_order;
- bool replace_str;
- bool replace_intrin;
- bool mac_ignore_invalid_free;
- bool detect_stack_use_after_return;
- int min_uar_stack_size_log;
- int max_uar_stack_size_log;
- bool uar_noreserve;
- int max_malloc_fill_size, malloc_fill_byte;
- int exitcode;
- bool allow_user_poisoning;
- int sleep_before_dying;
- bool check_malloc_usable_size;
- bool unmap_shadow_on_exit;
- bool abort_on_error;
- bool print_stats;
- bool print_legend;
- bool atexit;
- bool allow_reexec;
- bool print_full_thread_history;
- bool poison_heap;
- bool poison_partial;
- bool poison_array_cookie;
- bool alloc_dealloc_mismatch;
- bool new_delete_type_mismatch;
- bool strict_memcmp;
- bool strict_init_order;
- bool start_deactivated;
- int detect_invalid_pointer_pairs;
- bool detect_container_overflow;
- int detect_odr_violation;
- bool dump_instruction_bytes;
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+
+ void SetDefaults();
};
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
-void InitializeFlags(Flags *f, const char *env);
+void RegisterAsanFlags(FlagParser *parser, Flags *f);
+void InitializeFlags(Flags *f);
} // namespace __asan
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
new file mode 100644
index 000000000000..ec9d41980d9c
--- /dev/null
+++ b/lib/asan/asan_flags.inc
@@ -0,0 +1,144 @@
+//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_FLAG
+# error "Define ASAN_FLAG prior to including this file!"
+#endif
+
+// ASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_FLAG(int, quarantine_size, -1,
+ "Deprecated, please use quarantine_size_mb.")
+ASAN_FLAG(int, quarantine_size_mb, -1,
+ "Size (in Mb) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.")
+ASAN_FLAG(int, redzone, 16,
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.")
+ASAN_FLAG(int, max_redzone, 2048,
+ "Maximal size (in bytes) of redzones around heap objects.")
+ASAN_FLAG(
+ bool, debug, false,
+ "If set, prints some debugging information and does additional checks.")
+ASAN_FLAG(
+ int, report_globals, 1,
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).")
+ASAN_FLAG(bool, check_initialization_order, false,
+ "If set, attempts to catch initialization order issues.")
+ASAN_FLAG(
+ bool, replace_str, true,
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.")
+ASAN_FLAG(bool, replace_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
+ASAN_FLAG(bool, mac_ignore_invalid_free, false,
+ "Ignore invalid free() calls to work around some bugs. Used on OS X "
+ "only.")
+ASAN_FLAG(bool, detect_stack_use_after_return, false,
+ "Enables stack-use-after-return checking at run-time.")
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+ "Minimum fake stack size log.")
+ASAN_FLAG(int, max_uar_stack_size_log,
+ 20, // 1Mb per size class, i.e. ~11Mb per thread
+ "Maximum fake stack size log.")
+ASAN_FLAG(bool, uar_noreserve, false,
+ "Use mmap with 'noreserve' flag to allocate fake stack.")
+ASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, exitcode, ASAN_DEFAULT_FAILURE_EXITCODE,
+ "Override the program exit status if the tool found an error.")
+ASAN_FLAG(bool, allow_user_poisoning, true,
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.")
+ASAN_FLAG(
+ int, sleep_before_dying, 0,
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).")
+ASAN_FLAG(bool, check_malloc_usable_size, true,
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.")
+ASAN_FLAG(bool, unmap_shadow_on_exit, false,
+ "If set, explicitly unmaps the (huge) shadow at exit.")
+ASAN_FLAG(
+ bool, abort_on_error, false,
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.")
+ASAN_FLAG(bool, print_stats, false,
+ "Print various statistics after printing an error message or if "
+ "atexit=1.")
+ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, atexit, false,
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.")
+ASAN_FLAG(
+ bool, print_full_thread_history, true,
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.")
+ASAN_FLAG(
+ bool, poison_heap, true,
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.")
+ASAN_FLAG(bool, poison_partial, true,
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.")
+ASAN_FLAG(bool, poison_array_cookie, true,
+ "Poison (or not) the array cookie after operator new[].")
+
+// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=131
+// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+// TODO(glider,timurrrr): Fix known issues and enable this back.
+ASAN_FLAG(bool, alloc_dealloc_mismatch,
+ (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+ASAN_FLAG(bool, new_delete_type_mismatch, true,
+ "Report errors on mismatch betwen size of new and delete.")
+ASAN_FLAG(bool, strict_memcmp, true,
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.")
+ASAN_FLAG(
+ bool, strict_init_order, false,
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.")
+ASAN_FLAG(
+ bool, start_deactivated, false,
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ")
+ASAN_FLAG(
+ int, detect_invalid_pointer_pairs, 0,
+ "If non-zero, try to detect operations like <, <=, >, >= and - on "
+ "invalid pointer pairs (e.g. when pointers belong to different objects). "
+ "The bigger the value the harder we try.")
+ASAN_FLAG(
+ bool, detect_container_overflow, true,
+ "If true, honor the container overflow annotations. "
+ "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
+ASAN_FLAG(int, detect_odr_violation, 2,
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes")
+ASAN_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index be111d4fb4cf..c4571953c408 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -164,7 +164,7 @@ static void RegisterGlobal(const Global *g) {
}
}
}
- if (flags()->poison_heap)
+ if (CanPoisonMemory())
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g;
@@ -186,7 +186,7 @@ static void UnregisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
- if (flags()->poison_heap)
+ if (CanPoisonMemory())
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
@@ -249,7 +249,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
@@ -275,7 +275,7 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
diff --git a/lib/asan/asan_init_version.h b/lib/asan/asan_init_version.h
index 77aea81bd298..6cf57c4aa2a8 100644
--- a/lib/asan/asan_init_version.h
+++ b/lib/asan/asan_init_version.h
@@ -25,8 +25,10 @@ extern "C" {
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
// v3=>v4: added '__asan_global_source_location' to __asan_global.
- #define __asan_init __asan_init_v4
- #define __asan_init_name "__asan_init_v4"
+ // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
+ // __asan_stack_free_ functions.
+ #define __asan_init __asan_init_v5
+ #define __asan_init_name "__asan_init_v5"
}
#endif // ASAN_INIT_VERSION_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index 910cd3addcb0..3dc7ec67a3e5 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -142,14 +142,17 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \
if (asan_init_is_running) \
return REAL(func)(__VA_ARGS__); \
- ASAN_INTERCEPTOR_ENTER(ctx, func); \
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
} while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
@@ -169,8 +172,9 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
@@ -196,6 +200,12 @@ struct ThreadStartParam {
};
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+#if SANITIZER_WINDOWS
+ // FIXME: this is a bandaid fix for PR22025.
+ AsanThread *t = (AsanThread*)arg;
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+#else
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
AsanThread *t = nullptr;
while ((t = reinterpret_cast<AsanThread *>(
@@ -203,6 +213,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
internal_sched_yield();
SetCurrentThread(t);
return t->ThreadStart(GetTid(), &param->is_registered);
+#endif
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
@@ -236,22 +247,26 @@ INTERCEPTOR(int, pthread_create, void *thread,
}
return result;
}
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return real_pthread_join(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
}
#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
@@ -260,8 +275,7 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
@@ -802,23 +816,14 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
+ // FIXME: The CreateThread interceptor is not the same as a pthread_create
+ // one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
- ThreadStartParam param;
- atomic_store(&param.t, 0, memory_order_relaxed);
- atomic_store(&param.is_registered, 0, memory_order_relaxed);
- DWORD result = REAL(CreateThread)(security, stack_size, asan_thread_start,
- &param, thr_flags, tid);
- if (result) {
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
- atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
- // The pthread_create interceptor waits here, so we do the same for
- // consistency.
- while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
- internal_sched_yield();
- }
- return result;
+ return REAL(CreateThread)(security, stack_size,
+ asan_thread_start, t, thr_flags, tid);
}
namespace __asan {
@@ -902,6 +907,7 @@ void InitializeAsanInterceptors() {
// Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE
ASAN_INTERCEPT_FUNC(pthread_create);
+ ASAN_INTERCEPT_FUNC(pthread_join);
#endif
// Intercept atexit function.
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index 65d4a47d3d9e..a8e23ce772ff 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -94,7 +94,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
void MaybeReexec();
-bool AsanInterceptsSignal(int signum);
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@@ -107,10 +106,10 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
-void ParseExtraActivationFlags();
-
void *AsanDlSymNext(const char *sym);
+void ReserveShadowMemoryRange(uptr beg, uptr end);
+
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index fdd009c960d8..65605009005f 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -220,10 +220,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#endif
}
-bool AsanInterceptsSignal(int signum) {
- return signum == SIGSEGV && common_flags()->handle_segv;
-}
-
void AsanPlatformThreadInit() {
// Nothing here for now.
}
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index ae0fa15b6523..5c2caeae4934 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -102,7 +102,6 @@ void LeakyResetEnv(const char *name, const char *name_value) {
}
void MaybeReexec() {
- if (!flags()->allow_reexec) return;
// Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
@@ -113,8 +112,10 @@ void MaybeReexec() {
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
if (!dyld_insert_libraries ||
- !REAL(strstr)(dyld_insert_libraries, StripModuleName(info.dli_fname))) {
+ !REAL(strstr)(dyld_insert_libraries, dylib_name)) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
char program_name[1024];
@@ -140,58 +141,74 @@ void MaybeReexec() {
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
- VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
- } else {
- // DYLD_INSERT_LIBRARIES is set and contains the runtime library.
- if (old_env_len == fname_len) {
- // It's just the runtime library name - fine to unset the variable.
- LeakyResetEnv(kDyldInsertLibraries, NULL);
+
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for ASan to work. ASan tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ CHECK("execv failed" && 0);
+ }
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = REAL(strchr)(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
} else {
- uptr env_name_len = internal_strlen(kDyldInsertLibraries);
- // Allocate memory to hold the previous env var name, its value, the '='
- // sign and the '\0' char.
- char *new_env = (char*)allocator_for_env.Allocate(
- old_env_len + 2 + env_name_len);
- CHECK(new_env);
- internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
- internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
- new_env[env_name_len] = '=';
- char *new_env_pos = new_env + env_name_len + 1;
-
- // Iterate over colon-separated pieces of |dyld_insert_libraries|.
- char *piece_start = dyld_insert_libraries;
- char *piece_end = NULL;
- char *old_env_end = dyld_insert_libraries + old_env_len;
- do {
- if (piece_start[0] == ':') piece_start++;
- piece_end = REAL(strchr)(piece_start, ':');
- if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
- if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
- uptr piece_len = piece_end - piece_start;
-
- // If the current piece isn't the runtime library name,
- // append it to new_env.
- if ((piece_len != fname_len) ||
- (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
- if (new_env_pos != new_env + env_name_len + 1) {
- new_env_pos[0] = ':';
- new_env_pos++;
- }
- internal_strncpy(new_env_pos, piece_start, piece_len);
- }
- // Move on to the next piece.
- new_env_pos += piece_len;
- piece_start = piece_end;
- } while (piece_start < old_env_end);
-
- // Can't use setenv() here, because it requires the allocator to be
- // initialized.
- // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
- // a separate function called after InitializeAllocator().
- LeakyResetEnv(kDyldInsertLibraries, new_env);
+ filename_start = piece_start;
}
- }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// No-op. Mac does not support static linkage anyway.
@@ -205,11 +222,6 @@ void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
-bool AsanInterceptsSignal(int signum) {
- return (signum == SIGSEGV || signum == SIGBUS) &&
- common_flags()->handle_segv;
-}
-
void AsanPlatformThreadInit() {
}
@@ -312,7 +324,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
- if (common_flags()->verbosity >= 2) { \
+ if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
@@ -330,7 +342,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
@@ -343,7 +355,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
@@ -373,13 +385,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
work(); \
}
-// Forces the compiler to generate a frame pointer in the function.
-#define ENABLE_FRAME_POINTER \
- do { \
- volatile uptr enable_fp; \
- enable_fp = GET_CURRENT_FRAME(); \
- } while (0)
-
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
ENABLE_FRAME_POINTER;
@@ -403,6 +408,10 @@ INTERCEPTOR(void, dispatch_after,
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc
index 79b6dfae10f6..d7a6307c9bdc 100644
--- a/lib/asan/asan_malloc_mac.cc
+++ b/lib/asan/asan_malloc_mac.cc
@@ -152,13 +152,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
namespace {
-// TODO(glider): the mz_* functions should be united with the Linux wrappers,
-// as they are basically copied from there.
-size_t mz_size(malloc_zone_t* zone, const void* ptr) {
+// TODO(glider): the __asan_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
return asan_mz_size(ptr);
}
-void *mz_malloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
@@ -167,7 +171,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
return asan_malloc(size, &stack);
}
-void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
@@ -183,7 +189,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
return asan_calloc(nmemb, size, &stack);
}
-void *mz_valloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
@@ -210,11 +218,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
}
// TODO(glider): the allocation callbacks need to be refactored.
-void mz_free(malloc_zone_t *zone, void *ptr) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr);
}
-void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
@@ -233,15 +245,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
}
}
-void mz_destroy(malloc_zone_t* zone) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
- Report("mz_destroy() called -- ignoring\n");
+ Report("__asan_mz_destroy() called -- ignoring\n");
}
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
@@ -252,12 +265,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
// This function is currently unused, and we build with -Werror.
#if 0
-void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
+void __asan_mz_free_definite_size(
+ malloc_zone_t* zone, void *ptr, size_t size) {
// TODO(glider): check that |size| is valid.
UNIMPLEMENTED();
}
#endif
-#endif
kern_return_t mi_enumerator(task_t task, void *,
unsigned type_mask, vm_address_t zone_address,
@@ -299,13 +312,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
}
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
boolean_t mi_zone_locked(malloc_zone_t *zone) {
// UNIMPLEMENTED();
return false;
}
-#endif
} // unnamed namespace
@@ -324,32 +334,25 @@ void ReplaceSystemMalloc() {
asan_introspection.force_lock = &mi_force_lock;
asan_introspection.force_unlock = &mi_force_unlock;
asan_introspection.statistics = &mi_statistics;
+ asan_introspection.zone_locked = &mi_zone_locked;
internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
- // Start with a version 4 zone which is used for OS X 10.4 and 10.5.
- asan_zone.version = 4;
+ // Use version 6 for OSX >= 10.6.
+ asan_zone.version = 6;
asan_zone.zone_name = "asan";
- asan_zone.size = &mz_size;
- asan_zone.malloc = &mz_malloc;
- asan_zone.calloc = &mz_calloc;
- asan_zone.valloc = &mz_valloc;
- asan_zone.free = &mz_free;
- asan_zone.realloc = &mz_realloc;
- asan_zone.destroy = &mz_destroy;
+ asan_zone.size = &__asan_mz_size;
+ asan_zone.malloc = &__asan_mz_malloc;
+ asan_zone.calloc = &__asan_mz_calloc;
+ asan_zone.valloc = &__asan_mz_valloc;
+ asan_zone.free = &__asan_mz_free;
+ asan_zone.realloc = &__asan_mz_realloc;
+ asan_zone.destroy = &__asan_mz_destroy;
asan_zone.batch_malloc = 0;
asan_zone.batch_free = 0;
- asan_zone.introspect = &asan_introspection;
-
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
- // Switch to version 6 on OSX 10.6 to support memalign.
- asan_zone.version = 6;
asan_zone.free_definite_size = 0;
- asan_zone.memalign = &mz_memalign;
- asan_introspection.zone_locked = &mi_zone_locked;
-#endif
+ asan_zone.memalign = &__asan_mz_memalign;
+ asan_zone.introspect = &asan_introspection;
// Register the ASan zone.
malloc_zone_register(&asan_zone);
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 2746754152b6..5cb011d683d3 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -59,13 +59,20 @@
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
-// Default Linux/MIPS mapping:
+// Default Linux/MIPS32 mapping:
// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
//
+// Default Linux/MIPS64 mapping:
+// || `[0x4000000000, 0xffffffffff]` || HighMem ||
+// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
+// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
+// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
+// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
+//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -79,6 +86,15 @@
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
+//
+// Default Windows/i386 mapping:
+// (the exact location of HighShadow/HighMem may vary depending
+// on WoW64, /LARGEADDRESSAWARE, etc).
+// || `[0x50000000, 0xffffffff]` || HighMem ||
+// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
+// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
+// || `[0x30000000, 0x35ffffff]` || LowShadow ||
+// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
@@ -87,10 +103,11 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
@@ -101,12 +118,12 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# elif SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset32
+# elif SANITIZER_WINDOWS
+# define SHADOW_OFFSET kWindowsShadowOffset32
# else
-# if SANITIZER_IOS
-# define SHADOW_OFFSET kIosShadowOffset32
-# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
-# endif
+# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# else
# if defined(__aarch64__)
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 1c6e92f69c65..e2b1f4dc4d5e 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -15,13 +15,24 @@
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
+static atomic_uint8_t can_poison_memory;
+
+void SetCanPoisonMemory(bool value) {
+ atomic_store(&can_poison_memory, value, memory_order_release);
+}
+
+bool CanPoisonMemory() {
+ return atomic_load(&can_poison_memory, memory_order_acquire);
+}
+
void PoisonShadow(uptr addr, uptr size, u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
@@ -34,7 +45,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@@ -63,10 +74,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
- if (common_flags()->verbosity) {
+ if (Verbosity()) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
poison ? "" : "un", ptr, end, size);
- if (common_flags()->verbosity >= 2)
+ if (Verbosity() >= 2)
PRINT_CURRENT_STACK();
}
CHECK(size);
diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h
index feda1a984544..3fc94649fb39 100644
--- a/lib/asan/asan_poisoning.h
+++ b/lib/asan/asan_poisoning.h
@@ -19,6 +19,10 @@
namespace __asan {
+// Enable/disable memory poisoning.
+void SetCanPoisonMemory(bool value);
+bool CanPoisonMemory();
+
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
@@ -34,7 +38,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
@@ -60,15 +64,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
- void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
- CHECK_EQ(page_beg, res);
+ ReserveShadowMemoryRange(page_beg, page_end - 1);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 0fb50276186b..8706d5decc0b 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -53,7 +53,7 @@ void AppendToErrorMessageBuffer(const char *buffer) {
buffer, remaining);
error_message_buffer[error_message_buffer_size - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message.
- error_message_buffer_pos += remaining > length ? length : remaining;
+ error_message_buffer_pos += Min(remaining, length);
}
}
@@ -937,6 +937,8 @@ using namespace __asan; // NOLINT
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
uptr access_size) {
+ ENABLE_FRAME_POINTER;
+
// Determine the error type.
const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 34fb111d5607..0b2a23d40379 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -56,8 +56,6 @@ static void AsanDie() {
}
if (common_flags()->coverage)
__sanitizer_cov_dump();
- if (death_callback)
- death_callback();
if (flags()->abort_on_error)
Abort();
internal__exit(flags()->exitcode);
@@ -72,265 +70,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
Die();
}
-// -------------------------- Flags ------------------------- {{{1
-static const int kDefaultMallocContextSize = 30;
-
-Flags asan_flags_dont_use_directly; // use via flags().
-
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
-static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
-#ifdef ASAN_DEFAULT_OPTIONS
-// Stringize the macro value.
-# define ASAN_STRINGIZE(x) #x
-# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
- return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
-#else
- return "";
-#endif
-}
-
-static void ParseFlagsFromString(Flags *f, const char *str) {
- CommonFlags *cf = common_flags();
- ParseCommonFlagsFromString(cf, str);
- CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
- // Please write meaningful flag descriptions when adding new flags.
- ParseFlag(str, &f->quarantine_size, "quarantine_size",
- "Size (in bytes) of quarantine used to detect use-after-free "
- "errors. Lower value may reduce memory usage but increase the "
- "chance of false negatives.");
- ParseFlag(str, &f->redzone, "redzone",
- "Minimal size (in bytes) of redzones around heap objects. "
- "Requirement: redzone >= 16, is a power of two.");
- ParseFlag(str, &f->max_redzone, "max_redzone",
- "Maximal size (in bytes) of redzones around heap objects.");
- CHECK_GE(f->redzone, 16);
- CHECK_GE(f->max_redzone, f->redzone);
- CHECK_LE(f->max_redzone, 2048);
- CHECK(IsPowerOfTwo(f->redzone));
- CHECK(IsPowerOfTwo(f->max_redzone));
-
- ParseFlag(str, &f->debug, "debug",
- "If set, prints some debugging information and does additional checks.");
- ParseFlag(str, &f->report_globals, "report_globals",
- "Controls the way to handle globals (0 - don't detect buffer overflow on "
- "globals, 1 - detect buffer overflow, 2 - print data about registered "
- "globals).");
-
- ParseFlag(str, &f->check_initialization_order,
- "check_initialization_order",
- "If set, attempts to catch initialization order issues.");
-
- ParseFlag(str, &f->replace_str, "replace_str",
- "If set, uses custom wrappers and replacements for libc string functions "
- "to find more errors.");
-
- ParseFlag(str, &f->replace_intrin, "replace_intrin",
- "If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
- ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
- "Ignore invalid free() calls to work around some bugs. Used on OS X "
- "only.");
- ParseFlag(str, &f->detect_stack_use_after_return,
- "detect_stack_use_after_return",
- "Enables stack-use-after-return checking at run-time.");
- ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
- "Minimum fake stack size log.");
- ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
- "Maximum fake stack size log.");
- ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
- "Use mmap with 'norserve' flag to allocate fake stack.");
- ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
- "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
- "bytes that will be filled with malloc_fill_byte on malloc.");
- ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
- "Value used to fill the newly allocated memory.");
- ParseFlag(str, &f->exitcode, "exitcode",
- "Override the program exit status if the tool found an error.");
- ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
- "If set, user may manually mark memory regions as poisoned or "
- "unpoisoned.");
- ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
- "Number of seconds to sleep between printing an error report and "
- "terminating the program. Useful for debugging purposes (e.g. when one "
- "needs to attach gdb).");
-
- ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
- "Allows the users to work around the bug in Nvidia drivers prior to "
- "295.*.");
-
- ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
- "If set, explicitly unmaps the (huge) shadow at exit.");
- ParseFlag(str, &f->abort_on_error, "abort_on_error",
- "If set, the tool calls abort() instead of _exit() after printing the "
- "error report.");
- ParseFlag(str, &f->print_stats, "print_stats",
- "Print various statistics after printing an error message or if "
- "atexit=1.");
- ParseFlag(str, &f->print_legend, "print_legend",
- "Print the legend for the shadow bytes.");
- ParseFlag(str, &f->atexit, "atexit",
- "If set, prints ASan exit stats even after program terminates "
- "successfully.");
-
- ParseFlag(str, &f->allow_reexec, "allow_reexec",
- "Allow the tool to re-exec the program. This may interfere badly with "
- "the debugger.");
-
- ParseFlag(str, &f->print_full_thread_history,
- "print_full_thread_history",
- "If set, prints thread creation stacks for the threads involved in the "
- "report and their ancestors up to the main thread.");
-
- ParseFlag(str, &f->poison_heap, "poison_heap",
- "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
- "for benchmarking the allocator or instrumentator.");
-
- ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
- "Poison (or not) the array cookie after operator new[].");
-
- ParseFlag(str, &f->poison_partial, "poison_partial",
- "If true, poison partially addressable 8-byte aligned words "
- "(default=true). This flag affects heap and global buffers, but not "
- "stack buffers.");
-
- ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
- "Report errors on malloc/delete, new/free, new/delete[], etc.");
-
- ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
- "Report errors on mismatch betwen size of new and delete.");
-
- ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
- "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
- "comparing p1 and p2.");
-
- ParseFlag(str, &f->strict_init_order, "strict_init_order",
- "If true, assume that dynamic initializers can never access globals from "
- "other modules, even if the latter are already initialized.");
-
- ParseFlag(str, &f->start_deactivated, "start_deactivated",
- "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
- "poisoning) to reduce memory consumption as much as possible, and "
- "restores them to original values when the first instrumented module is "
- "loaded into the process. This is mainly intended to be used on "
- "Android. ");
-
- ParseFlag(str, &f->detect_invalid_pointer_pairs,
- "detect_invalid_pointer_pairs",
- "If non-zero, try to detect operations like <, <=, >, >= and - on "
- "invalid pointer pairs (e.g. when pointers belong to different objects). "
- "The bigger the value the harder we try.");
-
- ParseFlag(str, &f->detect_container_overflow,
- "detect_container_overflow",
- "If true, honor the container overflow annotations. "
- "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
-
- ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
- "If >=2, detect violation of One-Definition-Rule (ODR); "
- "If ==1, detect ODR-violation only if the two variables "
- "have different sizes");
-
- ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
- "If true, dump 16 bytes starting at the instruction that caused SEGV");
-}
-
-void InitializeFlags(Flags *f, const char *env) {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->detect_leaks = CAN_SANITIZE_LEAKS;
- cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = kDefaultMallocContextSize;
- cf->intercept_tls_get_addr = true;
- cf->coverage = false;
-
- internal_memset(f, 0, sizeof(*f));
- f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
- f->redzone = 16;
- f->max_redzone = 2048;
- f->debug = false;
- f->report_globals = 1;
- f->check_initialization_order = false;
- f->replace_str = true;
- f->replace_intrin = true;
- f->mac_ignore_invalid_free = false;
- f->detect_stack_use_after_return = false; // Also needs the compiler flag.
- f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
- f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
- f->uar_noreserve = false;
- f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
- f->malloc_fill_byte = 0xbe;
- f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
- f->allow_user_poisoning = true;
- f->sleep_before_dying = 0;
- f->check_malloc_usable_size = true;
- f->unmap_shadow_on_exit = false;
- f->abort_on_error = false;
- f->print_stats = false;
- f->print_legend = true;
- f->atexit = false;
- f->allow_reexec = true;
- f->print_full_thread_history = true;
- f->poison_heap = true;
- f->poison_array_cookie = true;
- f->poison_partial = true;
- // Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
- // https://code.google.com/p/address-sanitizer/issues/detail?id=131
- // https://code.google.com/p/address-sanitizer/issues/detail?id=309
- // TODO(glider,timurrrr): Fix known issues and enable this back.
- f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
- f->new_delete_type_mismatch = true;
- f->strict_memcmp = true;
- f->strict_init_order = false;
- f->start_deactivated = false;
- f->detect_invalid_pointer_pairs = 0;
- f->detect_container_overflow = true;
- f->detect_odr_violation = 2;
- f->dump_instruction_bytes = false;
-
- // Override from compile definition.
- ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
-
- // Override from user-specified string.
- ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
- VReport(1, "Using the defaults from __asan_default_options: %s\n",
- MaybeCallAsanDefaultOptions());
-
- // Override from command line.
- ParseFlagsFromString(f, env);
- if (common_flags()->help) {
- PrintFlagDescriptions();
- }
-
- if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
- Report("%s: detect_leaks is not supported on this platform.\n",
- SanitizerToolName);
- cf->detect_leaks = false;
- }
-
- // Make "strict_init_order" imply "check_initialization_order".
- // TODO(samsonov): Use a single runtime flag for an init-order checker.
- if (f->strict_init_order) {
- f->check_initialization_order = true;
- }
-}
-
-// Parse flags that may change between startup and activation.
-// On Android they come from a system property.
-// On other platforms this is no-op.
-void ParseExtraActivationFlags() {
- char buf[100];
- GetExtraActivationFlags(buf, sizeof(buf));
- ParseFlagsFromString(flags(), buf);
- if (buf[0] != '\0')
- VReport(1, "Extra activation flags: %s\n", buf);
-}
-
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
-void (*death_callback)(void);
#if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@@ -344,7 +86,8 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
-static void ReserveShadowMemoryRange(uptr beg, uptr end) {
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
@@ -355,6 +98,10 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end) {
"Perhaps you're using ulimit -v\n", size);
Abort();
}
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
@@ -500,7 +247,13 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr a, uptr size) {
- CHECK_EQ(a, (uptr)Mprotect(a, size));
+ void *res = Mprotect(a, size);
+ if (a == (uptr)res)
+ return;
+ Report("ERROR: Failed to protect the shadow gap. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
}
static void PrintAddressSpaceLayout() {
@@ -539,7 +292,7 @@ static void PrintAddressSpaceLayout() {
Printf("\n");
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
- Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
+ Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -561,8 +314,10 @@ static void AsanInitInternal() {
// Initialize flags. This must be done early, because most of the
// initialization steps look at flags().
- const char *options = GetEnv("ASAN_OPTIONS");
- InitializeFlags(flags(), options);
+ InitializeFlags(flags());
+
+ SetCanPoisonMemory(flags()->poison_heap);
+ SetMallocContextSize(common_flags()->malloc_context_size);
InitializeHighMemEnd();
@@ -574,20 +329,11 @@ static void AsanInitInternal() {
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
- if (!flags()->start_deactivated)
- ParseExtraActivationFlags();
-
__sanitizer_set_report_path(common_flags()->log_path);
+
+ // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
- CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
-
- if (options) {
- VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
- }
-
- if (flags()->start_deactivated)
- AsanStartDeactivated();
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -618,8 +364,7 @@ static void AsanInitInternal() {
}
#endif
- if (common_flags()->verbosity)
- PrintAddressSpaceLayout();
+ if (Verbosity()) PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary();
@@ -649,6 +394,8 @@ static void AsanInitInternal() {
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
@@ -656,7 +403,12 @@ static void AsanInitInternal() {
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
- InitializeAllocator();
+ AllocatorOptions allocator_options;
+ allocator_options.SetFrom(flags(), common_flags());
+ InitializeAllocator(allocator_options);
+
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@@ -666,10 +418,12 @@ static void AsanInitInternal() {
if (flags()->atexit)
Atexit(asan_atexit);
- if (common_flags()->coverage) {
- __sanitizer_cov_init();
- Atexit(__sanitizer_cov_dump);
- }
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ // Now that ASan runtime is (mostly) initialized, deactivate it if
+ // necessary, so that it can be re-activated when requested.
+ if (flags()->start_deactivated)
+ AsanDeactivate();
// interceptors
InitTlsSize();
@@ -724,13 +478,6 @@ static AsanInitializer asan_initializer;
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
-
int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode;
flags()->exitcode = exit_code;
@@ -764,7 +511,7 @@ void NOINLINE __asan_handle_no_return() {
}
void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
- death_callback = callback;
+ SetUserDieCallback(callback);
}
// Initialize as requested from instrumented application code.
diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc
index 8188f3b5b6e9..cf7a587fa65a 100644
--- a/lib/asan/asan_stack.cc
+++ b/lib/asan/asan_stack.cc
@@ -13,6 +13,21 @@
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __asan {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+} // namespace __asan
// ------------------ Interface -------------- {{{1
diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h
index a995256212e1..122967a152f8 100644
--- a/lib/asan/asan_stack.h
+++ b/lib/asan/asan_stack.h
@@ -21,6 +21,11 @@
namespace __asan {
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
@@ -93,9 +98,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
-#define GET_STACK_TRACE_MALLOC \
- GET_STACK_TRACE(common_flags()->malloc_context_size, \
- common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup
index a620f51b0836..c143b9fab13c 100755
--- a/lib/asan/scripts/asan_device_setup
+++ b/lib/asan/scripts/asan_device_setup
@@ -87,6 +87,7 @@ if [[ x$device != x ]]; then
fi
echo '>> Remounting /system rw'
+$ADB wait-for-device
$ADB root
$ADB wait-for-device
$ADB remount
@@ -184,7 +185,7 @@ cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/"
ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0
# On Android-L not allowing user segv handler breaks some applications.
-if $ADB shell 'echo $LD_PRELOAD' | grep libsigchain.so >&/dev/null; then
+if [[ PRE_L -eq 0 ]]; then
ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1"
fi
diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py
index 5fca136b6950..59fceaaed814 100755
--- a/lib/asan/scripts/asan_symbolize.py
+++ b/lib/asan/scripts/asan_symbolize.py
@@ -11,11 +11,9 @@ import argparse
import bisect
import getopt
import os
-import pty
import re
import subprocess
import sys
-import termios
symbolizers = {}
DEBUG = False
@@ -171,6 +169,9 @@ class UnbufferedLineConverter(object):
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
+ # Local imports so that the script can start on Windows.
+ import pty
+ import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
@@ -341,17 +342,23 @@ class BreakpadSymbolizer(Symbolizer):
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
- # Used by clients who may want to supply a different binary name.
- # E.g. in Chrome several binaries may share a single .dSYM.
- self.binary_name_filter = binary_name_filter
- self.dsym_hint_producer = dsym_hint_producer
- self.system = os.uname()[0]
- if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
- raise Exception('Unknown system')
- self.llvm_symbolizers = {}
- self.last_llvm_symbolizer = None
- self.dsym_hints = set([])
- self.frame_no = 0
+ if sys.platform == 'win32':
+ # ASan on Windows uses dbghelp.dll to symbolize in-process, which works
+ # even in sandboxed processes. Nothing needs to be done here.
+ self.process_line = self.process_line_echo
+ else:
+ # Used by clients who may want to supply a different binary name.
+ # E.g. in Chrome several binaries may share a single .dSYM.
+ self.binary_name_filter = binary_name_filter
+ self.dsym_hint_producer = dsym_hint_producer
+ self.system = os.uname()[0]
+ if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
+ raise Exception('Unknown system')
+ self.llvm_symbolizers = {}
+ self.last_llvm_symbolizer = None
+ self.dsym_hints = set([])
+ self.frame_no = 0
+ self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
@@ -366,12 +373,12 @@ class SymbolizationLoop(object):
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
if not binary in self.llvm_symbolizers:
- use_last_symbolizer = True
+ use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
- use_last_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
+ use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
- if self.last_llvm_symbolizer and use_last_symbolizer:
+ if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
@@ -405,14 +412,14 @@ class SymbolizationLoop(object):
def process_logfile(self):
self.frame_no = 0
- while True:
- line = logfile.readline()
- if not line:
- break
+ for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
- def process_line(self, line):
+ def process_line_echo(self, line):
+ return [line.rstrip()]
+
+ def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
@@ -437,20 +444,23 @@ class SymbolizationLoop(object):
if __name__ == '__main__':
- parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
- description='ASan symbolization script',
- epilog='''Example of use:
- asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log''')
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description='ASan symbolization script',
+ epilog='Example of use:\n'
+ 'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
+ '-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
- help='pattern to be cut from the result file path ')
+ help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
- help='demangle function names')
+ help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
- help='set path to sysroot for sanitized binaries')
+ help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
- help='set prefix for binutils')
- parser.add_argument('-l','--logfile', default=sys.stdin, type=argparse.FileType('r'),
- help='set log file name to parse, default is stdin')
+ help='set prefix for binutils')
+ parser.add_argument('-l','--logfile', default=sys.stdin,
+ type=argparse.FileType('r'),
+ help='set log file name to parse, default is stdin')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
index 7b363714e8a5..513d1282f8a6 100644
--- a/lib/asan/tests/CMakeLists.txt
+++ b/lib/asan/tests/CMakeLists.txt
@@ -30,7 +30,8 @@ set(ASAN_UNITTEST_COMMON_CFLAGS
-fno-rtti
-O2
-Wno-format
- -Werror=sign-compare)
+ -Werror=sign-compare
+ -Wno-non-virtual-dtor)
append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros ASAN_UNITTEST_COMMON_CFLAGS)
# -gline-tables-only must be enough for ASan, so use it if possible.
@@ -46,6 +47,11 @@ list(APPEND ASAN_UNITTEST_COMMON_CFLAGS
-DASAN_HAS_EXCEPTIONS=1
-DASAN_UAR=0)
+if(APPLE)
+ list(APPEND ASAN_UNITTEST_COMMON_CFLAGS ${DARWIN_osx_CFLAGS})
+ list(APPEND ASAN_UNITTEST_COMMON_LINKFLAGS ${DARWIN_osx_LINKFLAGS})
+endif()
+
set(ASAN_BLACKLIST_FILE "${CMAKE_CURRENT_SOURCE_DIR}/asan_test.ignore")
set(ASAN_UNITTEST_INSTRUMENTED_CFLAGS
${ASAN_UNITTEST_COMMON_CFLAGS}
@@ -117,7 +123,7 @@ endmacro()
# Link ASan unit test for a given architecture from a set
# of objects in with given linker flags.
macro(add_asan_test test_suite test_name arch kind)
- parse_arguments(TEST "OBJECTS;LINKFLAGS" "WITH_TEST_RUNTIME" ${ARGN})
+ parse_arguments(TEST "OBJECTS;LINKFLAGS;SUBDIR" "WITH_TEST_RUNTIME" ${ARGN})
get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
set(TEST_DEPS ${TEST_OBJECTS})
if(NOT COMPILER_RT_STANDALONE_BUILD)
@@ -132,6 +138,7 @@ macro(add_asan_test test_suite test_name arch kind)
endif()
endif()
add_compiler_rt_test(${test_suite} ${test_name}
+ SUBDIR ${TEST_SUBDIR}
OBJECTS ${TEST_OBJECTS}
DEPS ${TEST_DEPS}
LINK_FLAGS ${TEST_LINKFLAGS}
@@ -141,6 +148,11 @@ endmacro()
# Main AddressSanitizer unit tests.
add_custom_target(AsanUnitTests)
set_target_properties(AsanUnitTests PROPERTIES FOLDER "ASan unit tests")
+# AddressSanitizer unit tests with dynamic runtime (on platforms where it's
+# not the default).
+add_custom_target(AsanDynamicUnitTests)
+set_target_properties(AsanDynamicUnitTests
+ PROPERTIES FOLDER "ASan unit tests with dynamic runtime")
# ASan benchmarks (not actively used now).
add_custom_target(AsanBenchmarks)
set_target_properties(AsanBenchmarks PROPERTIES FOLDER "Asan benchmarks")
@@ -182,11 +194,15 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test_helpers.mm ${arch} ${kind}
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC ${ARGN})
endif()
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test" ${arch} ${kind}
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/default")
+ add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
- if(COMPILER_RT_BUILD_SHARED_ASAN)
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Dynamic-Test" ${arch} ${kind}
+ if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME)
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynamic")
+ add_asan_test(AsanDynamicUnitTests "Asan-${arch}${kind}-Dynamic-Test"
+ ${arch} ${kind} SUBDIR "dynamic"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
endif()
@@ -220,7 +236,8 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
asan_compile(ASAN_NOINST_TEST_OBJECTS ${src} ${arch} ${kind}
${ASAN_UNITTEST_COMMON_CFLAGS} ${ARGN})
endforeach()
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test" ${arch} ${kind}
+ add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_NOINST_TEST_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_NOINST_LINKFLAGS}
WITH_TEST_RUNTIME)
@@ -231,14 +248,10 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
asan_compile(ASAN_BENCHMARKS_OBJECTS ${src} ${arch} ${kind}
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} ${ARGN})
endforeach()
- add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark" ${arch} ${kind}
+ add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
- if(COMPILER_RT_BUILD_SHARED_ASAN)
- add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Dynamic-Benchmark" ${arch} ${kind}
- OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
- LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
- endif()
endmacro()
if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index 50fdf1119f0b..a34c8528eae0 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -87,7 +87,7 @@ TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
}
TEST(AddressSanitizerInterface, GetHeapSizeTest) {
- // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
+ // ASan allocator does not keep huge chunks in free list, but unmaps them.
// The chunk should be greater than the quarantine size,
// otherwise it will be stuck in quarantine instead of being unmaped.
static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index bb6af45bddf9..c469d62a6766 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -31,15 +31,6 @@
// in this test. The static runtime library is linked explicitly (without
// -fsanitize=address), thus the interceptors do not work correctly on OS X.
-#if !defined(_WIN32)
-extern "C" {
-// Set specific ASan options for uninstrumented unittest.
-const char* __asan_default_options() {
- return "allow_reexec=0";
-}
-} // extern "C"
-#endif
-
// Make sure __asan_init is called before any test case is run.
struct AsanInitCaller {
AsanInitCaller() { __asan_init(); }
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 67bcbaca1e40..952b05e21fe2 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -603,7 +603,8 @@ NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) {
}
#if !defined(__ANDROID__) && !defined(__arm__) && \
- !defined(__powerpc64__) && !defined(__powerpc__)
+ !defined(__powerpc64__) && !defined(__powerpc__) && \
+ !defined(__aarch64__)
// Does not work on Power and ARM:
// https://code.google.com/p/address-sanitizer/issues/detail?id=185
TEST(AddressSanitizer, BuiltinLongJmpTest) {
@@ -1284,3 +1285,33 @@ TEST(AddressSanitizer, pthread_getschedparam) {
ASSERT_EQ(0, res);
}
#endif
+
+#if SANITIZER_TEST_HAS_PRINTF_L
+static int vsnprintf_l_wrapper(char *s, size_t n,
+ locale_t l, const char *format, ...) {
+ va_list va;
+ va_start(va, format);
+ int res = vsnprintf_l(s, n , l, format, va);
+ va_end(va);
+ return res;
+}
+
+TEST(AddressSanitizer, snprintf_l) {
+ char buff[5];
+ // Check that snprintf_l() works fine with Asan.
+ int res = snprintf_l(buff, 5,
+ _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()");
+ EXPECT_EQ(12, res);
+ // Check that vsnprintf_l() works fine with Asan.
+ res = vsnprintf_l_wrapper(buff, 5,
+ _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()");
+ EXPECT_EQ(13, res);
+
+ EXPECT_DEATH(snprintf_l(buff, 10,
+ _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"),
+ "AddressSanitizer: stack-buffer-overflow");
+ EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10,
+ _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"),
+ "AddressSanitizer: stack-buffer-overflow");
+}
+#endif