summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common')
-rw-r--r--lib/sanitizer_common/.clang-tidy4
-rw-r--r--lib/sanitizer_common/CMakeLists.txt21
-rw-r--r--lib/sanitizer_common/sanitizer_addrhashmap.h12
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc34
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h1452
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_bytemap.h103
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h233
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_interface.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_internal.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h249
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h310
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary64.h522
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h282
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_size_class_map.h217
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_stats.h107
-rw-r--r--lib/sanitizer_common/sanitizer_atomic.h5
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_common.h96
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc330
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors_format.inc7
-rwxr-xr-xlib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc3
-rw-r--r--lib/sanitizer_common/sanitizer_common_libcdep.cc22
-rw-r--r--lib/sanitizer_common/sanitizer_common_nolibc.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep.cc45
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc165
-rw-r--r--lib/sanitizer_common/sanitizer_dbghelp.h42
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc5
-rw-r--r--lib/sanitizer_common/sanitizer_flags.inc21
-rw-r--r--lib/sanitizer_common/sanitizer_interface_internal.h11
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h23
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h10
-rw-r--r--lib/sanitizer_common/sanitizer_libignore.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc24
-rw-r--r--lib/sanitizer_common/sanitizer_linux.h2
-rw-r--r--lib/sanitizer_common/sanitizer_linux_libcdep.cc9
-rw-r--r--lib/sanitizer_common/sanitizer_linux_mips64.S23
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc126
-rw-r--r--lib/sanitizer_common/sanitizer_mac.h6
-rw-r--r--lib/sanitizer_common/sanitizer_malloc_mac.inc36
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h8
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h12
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_linux.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.cc13
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.h93
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h13
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_freebsd.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_linux.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_mac.cc91
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h1
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc57
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.cc29
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.h7
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc32
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_mac.cc7
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc113
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_win.cc40
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc92
-rwxr-xr-xlib/sanitizer_common/scripts/gen_dynamic_list.py8
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc72
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc175
-rwxr-xr-xlib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh187
-rw-r--r--lib/sanitizer_common/symbolizer/scripts/global_symbols.txt137
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt5
-rw-r--r--lib/sanitizer_common/tests/malloc_stress_transfer_test.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc267
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc53
-rw-r--r--lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_libc_test.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_procmaps_test.cc21
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_main.cc2
77 files changed, 4464 insertions, 1839 deletions
diff --git a/lib/sanitizer_common/.clang-tidy b/lib/sanitizer_common/.clang-tidy
index aa695cc924a4..6c71abff0d38 100644
--- a/lib/sanitizer_common/.clang-tidy
+++ b/lib/sanitizer_common/.clang-tidy
@@ -8,5 +8,9 @@ CheckOptions:
value: CamelCase
- key: readability-identifier-naming.UnionCase
value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantCase
+ value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantPrefix
+ value: "k"
- key: readability-identifier-naming.VariableCase
value: lower_case
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index 4af0009196e8..0d9a7f067a77 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -37,6 +37,8 @@ set(SANITIZER_SOURCES_NOTERMINATION
if(UNIX AND NOT APPLE)
list(APPEND SANITIZER_SOURCES_NOTERMINATION
sanitizer_linux_x86_64.S)
+ list(APPEND SANITIZER_SOURCES_NOTERMINATION
+ sanitizer_linux_mips64.S)
endif()
set(SANITIZER_SOURCES
@@ -51,6 +53,7 @@ set(SANITIZER_NOLIBC_SOURCES
set(SANITIZER_LIBCDEP_SOURCES
sanitizer_common_libcdep.cc
sanitizer_coverage_libcdep.cc
+ sanitizer_coverage_libcdep_new.cc
sanitizer_coverage_mapping_libcdep.cc
sanitizer_linux_libcdep.cc
sanitizer_posix_libcdep.cc
@@ -66,8 +69,16 @@ set(SANITIZER_LIBCDEP_SOURCES
set(SANITIZER_HEADERS
sanitizer_addrhashmap.h
sanitizer_allocator.h
+ sanitizer_allocator_bytemap.h
+ sanitizer_allocator_combined.h
sanitizer_allocator_interface.h
sanitizer_allocator_internal.h
+ sanitizer_allocator_local_cache.h
+ sanitizer_allocator_primary32.h
+ sanitizer_allocator_primary64.h
+ sanitizer_allocator_secondary.h
+ sanitizer_allocator_size_class_map.h
+ sanitizer_allocator_stats.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
sanitizer_atomic_msvc.h
@@ -118,14 +129,6 @@ set(SANITIZER_HEADERS
set(SANITIZER_COMMON_DEFINITIONS)
-if(MSVC)
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=0)
-else()
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=1)
-endif()
-
include(CheckIncludeFile)
append_have_file_definition(rpc/xdr.h HAVE_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
append_have_file_definition(tirpc/rpc/xdr.h HAVE_TIRPC_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
@@ -147,6 +150,8 @@ if (LLVM_ENABLE_PEDANTIC AND UNIX AND NOT APPLE)
# CMAKE_C*_FLAGS and re-add as a source property to all the non-.S files).
set_source_files_properties(sanitizer_linux_x86_64.S
PROPERTIES COMPILE_FLAGS "-w")
+ set_source_files_properties(sanitizer_linux_mips64.S
+ PROPERTIES COMPILE_FLAGS "-w")
endif ()
if(APPLE)
diff --git a/lib/sanitizer_common/sanitizer_addrhashmap.h b/lib/sanitizer_common/sanitizer_addrhashmap.h
index e55fc4f95a9a..2ca3c405bff3 100644
--- a/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -73,6 +73,8 @@ class AddrHashMap {
~Handle();
T *operator->();
+ T &operator*();
+ const T &operator*() const;
bool created() const;
bool exists() const;
@@ -136,6 +138,16 @@ T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index df298c62271d..d47b5b41413c 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -13,27 +13,33 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
+
#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
+# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
+# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
@@ -41,10 +47,20 @@ static void *__libc_memalign(uptr alignment, uptr size) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
+#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
@@ -62,7 +78,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
-#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -78,7 +94,8 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(/* may_return_null*/ false);
+ internal_allocator_instance->Init(
+ /* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -115,7 +132,7 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
@@ -145,7 +162,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDie();
+ return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@@ -192,7 +209,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void NORETURN ReportAllocatorCannotReturnNull() {
+static atomic_uint8_t reporting_out_of_memory = {0};
+
+bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
+ if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index f0f002004709..9a37a2f2145f 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -20,271 +20,16 @@
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
+#include "sanitizer_procmaps.h"
namespace __sanitizer {
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull();
-
-// SizeClassMap maps allocation sizes into size classes and back.
-// Class 0 corresponds to size 0.
-// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
-// Next 4 classes: 256 + i * 64 (i = 1 to 4).
-// Next 4 classes: 512 + i * 128 (i = 1 to 4).
-// ...
-// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
-// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
-//
-// This structure of the size class map gives us:
-// - Efficient table-free class-to-size and size-to-class functions.
-// - Difference between two consequent size classes is betweed 14% and 25%
-//
-// This class also gives a hint to a thread-caching allocator about the amount
-// of chunks that need to be cached per-thread:
-// - kMaxNumCached is the maximal number of chunks per size class.
-// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
-//
-// Part of output of SizeClassMap::Print():
-// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
-// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
-// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
-// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
-// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
-// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
-// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
-// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
-//
-// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
-// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
-// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
-// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
-// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
-// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
-// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
-// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
-//
-// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
-// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
-// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
-// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
-//
-// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
-// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
-// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
-// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
-//
-// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
-// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
-// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
-// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
-//
-// ...
-//
-// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
-// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
-// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
-// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
-//
-// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
-class SizeClassMap {
- static const uptr kMinSizeLog = 4;
- static const uptr kMidSizeLog = kMinSizeLog + 4;
- static const uptr kMinSize = 1 << kMinSizeLog;
- static const uptr kMidSize = 1 << kMidSizeLog;
- static const uptr kMidClass = kMidSize / kMinSize;
- static const uptr S = 2;
- static const uptr M = (1 << S) - 1;
-
- public:
- static const uptr kMaxNumCached = kMaxNumCachedT;
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we use one of the chunks to store the batch.
- struct TransferBatch {
- TransferBatch *next;
- uptr count;
- void *batch[kMaxNumCached];
- };
-
- static const uptr kMaxSize = 1UL << kMaxSizeLog;
- static const uptr kNumClasses =
- kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
- COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
- static const uptr kNumClassesRounded =
- kNumClasses == 32 ? 32 :
- kNumClasses <= 64 ? 64 :
- kNumClasses <= 128 ? 128 : 256;
-
- static uptr Size(uptr class_id) {
- if (class_id <= kMidClass)
- return kMinSize * class_id;
- class_id -= kMidClass;
- uptr t = kMidSize << (class_id >> S);
- return t + (t >> S) * (class_id & M);
- }
-
- static uptr ClassID(uptr size) {
- if (size <= kMidSize)
- return (size + kMinSize - 1) >> kMinSizeLog;
- if (size > kMaxSize) return 0;
- uptr l = MostSignificantSetBitIndex(size);
- uptr hbits = (size >> (l - S)) & M;
- uptr lbits = size & ((1 << (l - S)) - 1);
- uptr l1 = l - kMidSizeLog;
- return kMidClass + (l1 << S) + hbits + (lbits > 0);
- }
-
- static uptr MaxCached(uptr class_id) {
- if (class_id == 0) return 0;
- uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max<uptr>(1, Min(kMaxNumCached, n));
- }
-
- static void Print() {
- uptr prev_s = 0;
- uptr total_cached = 0;
- for (uptr i = 0; i < kNumClasses; i++) {
- uptr s = Size(i);
- if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
- Printf("\n");
- uptr d = s - prev_s;
- uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = s ? MostSignificantSetBitIndex(s) : 0;
- uptr cached = MaxCached(i) * s;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
- total_cached += cached;
- prev_s = s;
- }
- Printf("Total cached: %zd\n", total_cached);
- }
-
- static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
- return Size(class_id) < sizeof(TransferBatch) -
- sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
- }
-
- static void Validate() {
- for (uptr c = 1; c < kNumClasses; c++) {
- // Printf("Validate: c%zd\n", c);
- uptr s = Size(c);
- CHECK_NE(s, 0U);
- CHECK_EQ(ClassID(s), c);
- if (c != kNumClasses - 1)
- CHECK_EQ(ClassID(s + 1), c + 1);
- CHECK_EQ(ClassID(s - 1), c);
- if (c)
- CHECK_GT(Size(c), Size(c-1));
- }
- CHECK_EQ(ClassID(kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= kMaxSize; s++) {
- uptr c = ClassID(s);
- // Printf("s%zd => c%zd\n", s, c);
- CHECK_LT(c, kNumClasses);
- CHECK_GE(Size(c), s);
- if (c > 0)
- CHECK_LT(Size(c-1), s);
- }
- }
-};
+// Returns true if ReportAllocatorCannotReturnNull(true) was called.
+// Can be use to avoid memory hungry operations.
+bool IsReportingOOM();
-typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
-template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
-
-// Memory allocator statistics
-enum AllocatorStat {
- AllocatorStatAllocated,
- AllocatorStatMapped,
- AllocatorStatCount
-};
-
-typedef uptr AllocatorStatCounters[AllocatorStatCount];
-
-// Per-thread stats, live in per-thread cache.
-class AllocatorStats {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
- void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Set(AllocatorStat i, uptr v) {
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- uptr Get(AllocatorStat i) const {
- return atomic_load(&stats_[i], memory_order_relaxed);
- }
-
- private:
- friend class AllocatorGlobalStats;
- AllocatorStats *next_;
- AllocatorStats *prev_;
- atomic_uintptr_t stats_[AllocatorStatCount];
-};
-
-// Global stats, used for aggregation and querying.
-class AllocatorGlobalStats : public AllocatorStats {
- public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
- }
-
- void Register(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->next_ = next_;
- s->prev_ = this;
- next_->prev_ = s;
- next_ = s;
- }
-
- void Unregister(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- for (int i = 0; i < AllocatorStatCount; i++)
- Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
- }
-
- void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
- SpinMutexLock l(&mu_);
- const AllocatorStats *stats = this;
- for (;;) {
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] += stats->Get(AllocatorStat(i));
- stats = stats->next_;
- if (stats == this)
- break;
- }
- // All stats must be non-negative.
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
- }
-
- private:
- mutable SpinMutex mu_;
-};
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
@@ -295,1185 +40,18 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-// SizeClassAllocator64 -- allocator for 64-bit address space.
-//
-// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
-// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
-// Otherwise SpaceBeg=kSpaceBeg (fixed address).
-// kSpaceSize is a power of two.
-// At the beginning the entire space is mprotect-ed, then small parts of it
-// are mapped on demand.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator64 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
- if (kUsingConstantSpaceBeg) {
- CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
- MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
- } else {
- NonConstSpaceBeg =
- reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
- CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
- }
- MapWithCallback(SpaceEnd(), AdditionalSize());
- }
-
- void MapWithCallback(uptr beg, uptr size) {
- CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
- MapUnmapCallback().OnMap(beg, size);
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- Batch *b = region->free_list.Pop();
- if (!b)
- b = PopulateFreeList(stat, c, class_id, region);
- region->n_allocated += b->count;
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- RegionInfo *region = GetRegionInfo(class_id);
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- region->n_freed += b->count;
- }
-
- bool PointerIsMine(const void *p) {
- uptr P = reinterpret_cast<uptr>(p);
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return P / kSpaceSize == kSpaceBeg / kSpaceSize;
- return P >= SpaceBeg() && P < SpaceEnd();
- }
-
- uptr GetSizeClass(const void *p) {
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
- return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
- kNumClassesRounded;
- }
-
- void *GetBlockBegin(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- if (!size) return nullptr;
- uptr chunk_idx = GetChunkIdx((uptr)p, size);
- uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr beg = chunk_idx * size;
- uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user >= next_beg)
- return reinterpret_cast<void*>(reg_beg + beg);
- return nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
- return reinterpret_cast<void *>(SpaceBeg() +
- (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
- }
-
- void PrintStats() {
- uptr total_mapped = 0;
- uptr n_allocated = 0;
- uptr n_freed = 0;
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- total_mapped += region->mapped_user;
- n_allocated += region->n_allocated;
- n_freed += region->n_freed;
- }
- Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
- "remains %zd\n",
- total_mapped >> 20, n_allocated, n_allocated - n_freed);
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user == 0) continue;
- Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
- class_id,
- SizeClassMap::Size(class_id),
- region->mapped_user >> 10,
- region->n_allocated,
- region->n_allocated - region->n_freed);
- }
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetRegionInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = (int)kNumClasses - 1; i >= 0; i--) {
- GetRegionInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- uptr chunk_size = SizeClassMap::Size(class_id);
- uptr region_beg = SpaceBeg() + class_id * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + region->allocated_user;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
-
- private:
- static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
-
- static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
- uptr NonConstSpaceBeg;
- uptr SpaceBeg() const {
- return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
- }
- uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 14;
- // Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
- // Call mmap for metadata memory with at least this size.
- static const uptr kMetaMapSize = 1 << 16;
-
- struct RegionInfo {
- BlockingMutex mutex;
- LFStack<Batch> free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- uptr mapped_user; // Bytes mapped for user memory.
- uptr mapped_meta; // Bytes mapped for metadata.
- uptr n_allocated, n_freed; // Just stats.
- };
- COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions =
- reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
- return &regions[class_id];
- }
-
- static uptr GetChunkIdx(uptr chunk, uptr size) {
- uptr offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // size always fits into 32-bits. If the offset fits too, use 32-bit div.
- if (offset >> (SANITIZER_WORDSIZE / 2))
- return offset / size;
- return (u32)offset / (u32)size;
- }
-
- NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id, RegionInfo *region) {
- BlockingMutexLock l(&region->mutex);
- Batch *b = region->free_list.Pop();
- if (b)
- return b;
- uptr size = SizeClassMap::Size(class_id);
- uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + count * size;
- uptr region_beg = SpaceBeg() + kRegionSize * class_id;
- if (end_idx + size > region->mapped_user) {
- // Do the mmap for the user memory.
- uptr map_size = kUserMapSize;
- while (end_idx + size > region->mapped_user + map_size)
- map_size += kUserMapSize;
- CHECK_GE(region->mapped_user + map_size, end_idx);
- MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- region->mapped_user += map_size;
- }
- uptr total_count = (region->mapped_user - beg_idx - size)
- / size / count * count;
- region->allocated_meta += total_count * kMetadataSize;
- if (region->allocated_meta > region->mapped_meta) {
- uptr map_size = kMetaMapSize;
- while (region->allocated_meta > region->mapped_meta + map_size)
- map_size += kMetaMapSize;
- // Do the mmap for the metadata.
- CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
- MapWithCallback(region_beg + kRegionSize -
- region->mapped_meta - map_size, map_size);
- region->mapped_meta += map_size;
- }
- CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->mapped_user + region->mapped_meta > kRegionSize) {
- Printf("%s: Out of memory. Dying. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- for (;;) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)(region_beg + beg_idx);
- b->count = count;
- for (uptr i = 0; i < count; i++)
- b->batch[i] = (void*)(region_beg + beg_idx + i * size);
- region->allocated_user += count * size;
- CHECK_LE(region->allocated_user, region->mapped_user);
- beg_idx += count * size;
- if (beg_idx + count * size + size > region->mapped_user)
- break;
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- }
- return b;
- }
-};
-
-// Maps integers in rage [0, kSize) to u8 values.
-template<u64 kSize>
-class FlatByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- return map2[idx % kSize2];
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
-// SizeClassAllocator32 -- allocator for 32-bit address space.
-// This allocator can theoretically be used on 64-bit arch, but there it is less
-// efficient than SizeClassAllocator64.
-//
-// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
-// be returned by MmapOrDie().
-//
-// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
-// Since the regions are aligned by kRegionSize, there are exactly
-// kNumPossibleRegions possible regions in the address space and so we keep
-// a ByteMap possible_regions to store the size classes of each Region.
-// 0 size class means the region is not used by the allocator.
-//
-// One Region is used to allocate chunks of a single size class.
-// A Region looks like this:
-// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
-//
-// In order to avoid false sharing the objects of this class should be
-// chache-line aligned.
-template <const uptr kSpaceBeg, const u64 kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- const uptr kRegionSizeLog,
- class ByteMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator32 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- possible_regions.TestOnlyInit();
- internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- }
-
- void *MapWithCallback(uptr size) {
- size = RoundUpTo(size, GetPageSizeCached());
- void *res = MmapOrDie(size, "SizeClassAllocator32");
- MapUnmapCallback().OnMap((uptr)res, size);
- return res;
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *GetMetaData(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- uptr n = offset / (u32)size; // 32-bit division
- uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
- return reinterpret_cast<void*>(meta);
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
- CHECK(!sci->free_list.empty());
- Batch *b = sci->free_list.front();
- sci->free_list.pop_front();
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- CHECK_GT(b->count, 0);
- sci->free_list.push_front(b);
- }
-
- bool PointerIsMine(const void *p) {
- uptr mem = reinterpret_cast<uptr>(p);
- if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
- return false;
- return GetSizeClass(p) != 0;
- }
-
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
- }
-
- void *GetBlockBegin(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- u32 n = offset / (u32)size; // 32-bit division
- uptr res = beg + (n * (u32)size);
- return reinterpret_cast<void*>(res);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- uptr TotalMemoryUsed() {
- // No need to lock here.
- uptr res = 0;
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- res += kRegionSize;
- return res;
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- UnmapWithCallback((i * kRegionSize), kRegionSize);
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetSizeClassInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = kNumClasses - 1; i >= 0; i--) {
- GetSizeClassInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
- uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
- uptr region_beg = region * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + max_chunks_in_region * chunk_size;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- void PrintStats() {
- }
-
- static uptr AdditionalSize() {
- return 0;
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
-
- private:
- static const uptr kRegionSize = 1 << kRegionSizeLog;
- static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
-
- struct SizeClassInfo {
- SpinMutex mutex;
- IntrusiveList<Batch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
- };
- COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
-
- uptr ComputeRegionId(uptr mem) {
- uptr res = mem >> kRegionSizeLog;
- CHECK_LT(res, kNumPossibleRegions);
- return res;
- }
-
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
-
- uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
- MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
- return res;
- }
-
- SizeClassInfo *GetSizeClassInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- return &size_class_info_array[class_id];
- }
-
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- SizeClassInfo *sci, uptr class_id) {
- uptr size = SizeClassMap::Size(class_id);
- uptr reg = AllocateRegion(stat, class_id);
- uptr n_chunks = kRegionSize / (size + kMetadataSize);
- uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = nullptr;
- for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (!b) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)i;
- b->count = 0;
- }
- b->batch[b->count++] = (void*)i;
- if (b->count == max_count) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- b = nullptr;
- }
- }
- if (b) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- }
- }
-
- ByteMap possible_regions;
- SizeClassInfo size_class_info_array[kNumClasses];
-};
-
-// Objects of this type should be used as local caches for SizeClassAllocator64
-// or SizeClassAllocator32. Since the typical use of this class is to have one
-// object per thread in TLS, is has to be POD.
-template<class SizeClassAllocator>
-struct SizeClassAllocatorLocalCache {
- typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
-
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
-
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
-
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
- void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
- return res;
- }
-
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- CHECK_NE(c->max_count, 0UL);
- if (UNLIKELY(c->count == c->max_count))
- Drain(allocator, class_id);
- c->batch[c->count++] = p;
- }
-
- void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
- while (c->count > 0)
- Drain(allocator, class_id);
- }
- }
-
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
- struct PerClass {
- uptr count;
- uptr max_count;
- void *batch[2 * SizeClassMap::kMaxNumCached];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
-
- void InitCache() {
- if (per_class_[1].max_count)
- return;
- for (uptr i = 0; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- c->max_count = 2 * SizeClassMap::MaxCached(i);
- }
- }
-
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
- CHECK_GT(b->count, 0);
- for (uptr i = 0; i < b->count; i++)
- c->batch[i] = b->batch[i];
- c->count = b->count;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
- }
-
- NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)c->batch[0];
- uptr cnt = Min(c->max_count / 2, c->count);
- for (uptr i = 0; i < cnt; i++) {
- b->batch[i] = c->batch[i];
- c->batch[i] = c->batch[i + c->max_count / 2];
- }
- b->count = cnt;
- c->count -= cnt;
- CHECK_GT(b->count, 0);
- allocator->DeallocateBatch(&stats_, class_id, b);
- }
-};
-
-// This class can (de)allocate only large chunks of memory using mmap/unmap.
-// The main purpose of this allocator is to cover large and rare allocation
-// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
-class LargeMmapAllocator {
- public:
- void InitLinkerInitialized(bool may_return_null) {
- page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void Init(bool may_return_null) {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
- }
-
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = RoundUpMapSize(size);
- if (alignment > page_size_)
- map_size += alignment;
- // Overflow.
- if (map_size < size)
- return ReturnNullOrDie();
- uptr map_beg = reinterpret_cast<uptr>(
- MmapOrDie(map_size, "LargeMmapAllocator"));
- CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
- uptr map_end = map_beg + map_size;
- uptr res = map_beg + page_size_;
- if (res & (alignment - 1)) // Align.
- res += alignment - (res & (alignment - 1));
- CHECK(IsAligned(res, alignment));
- CHECK(IsAligned(res, page_size_));
- CHECK_GE(res + size, map_beg);
- CHECK_LE(res + size, map_end);
- Header *h = GetHeader(res);
- h->size = size;
- h->map_beg = map_beg;
- h->map_size = map_size;
- uptr size_log = MostSignificantSetBitIndex(map_size);
- CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
- {
- SpinMutexLock l(&mutex_);
- uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
- h->chunk_idx = idx;
- chunks_[idx] = h;
- stats.n_allocs++;
- stats.currently_allocated += map_size;
- stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
- stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatAllocated, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- }
- return reinterpret_cast<void*>(res);
- }
-
- void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- void Deallocate(AllocatorStats *stat, void *p) {
- Header *h = GetHeader(p);
- {
- SpinMutexLock l(&mutex_);
- uptr idx = h->chunk_idx;
- CHECK_EQ(chunks_[idx], h);
- CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
- chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
- chunks_sorted_ = false;
- stats.n_frees++;
- stats.currently_allocated -= h->map_size;
- stat->Sub(AllocatorStatAllocated, h->map_size);
- stat->Sub(AllocatorStatMapped, h->map_size);
- }
- MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
- UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
- }
-
- uptr TotalMemoryUsed() {
- SpinMutexLock l(&mutex_);
- uptr res = 0;
- for (uptr i = 0; i < n_chunks_; i++) {
- Header *h = chunks_[i];
- CHECK_EQ(h->chunk_idx, i);
- res += RoundUpMapSize(h->size);
- }
- return res;
- }
-
- bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpTo(GetHeader(p)->size, page_size_);
- }
-
- // At least page_size_/2 metadata bytes is available.
- void *GetMetaData(const void *p) {
- // Too slow: CHECK_EQ(p, GetBlockBegin(p));
- if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
- Printf("%s: bad pointer %p\n", SanitizerToolName, p);
- CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
- }
- return GetHeader(p) + 1;
- }
-
- void *GetBlockBegin(const void *ptr) {
- uptr p = reinterpret_cast<uptr>(ptr);
- SpinMutexLock l(&mutex_);
- uptr nearest_chunk = 0;
- // Cache-friendly linear search.
- for (uptr i = 0; i < n_chunks_; i++) {
- uptr ch = reinterpret_cast<uptr>(chunks_[i]);
- if (p < ch) continue; // p is at left to this chunk, skip it.
- if (p - ch < p - nearest_chunk)
- nearest_chunk = ch;
- }
- if (!nearest_chunk)
- return nullptr;
- Header *h = reinterpret_cast<Header *>(nearest_chunk);
- CHECK_GE(nearest_chunk, h->map_beg);
- CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
- CHECK_LE(nearest_chunk, p);
- if (h->map_beg + h->map_size <= p)
- return nullptr;
- return GetUser(h);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
- mutex_.CheckLocked();
- uptr p = reinterpret_cast<uptr>(ptr);
- uptr n = n_chunks_;
- if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
- if (p < min_mmap_ || p >= max_mmap_)
- return nullptr;
- uptr beg = 0, end = n - 1;
- // This loop is a log(n) lower_bound. It does not check for the exact match
- // to avoid expensive cache-thrashing loads.
- while (end - beg >= 2) {
- uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
- if (p < reinterpret_cast<uptr>(chunks_[mid]))
- end = mid - 1; // We are not interested in chunks_[mid].
- else
- beg = mid; // chunks_[mid] may still be what we want.
- }
-
- if (beg < end) {
- CHECK_EQ(beg + 1, end);
- // There are 2 chunks left, choose one.
- if (p >= reinterpret_cast<uptr>(chunks_[end]))
- beg = end;
- }
-
- Header *h = chunks_[beg];
- if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return nullptr;
- return GetUser(h);
- }
-
- void PrintStats() {
- Printf("Stats: LargeMmapAllocator: allocated %zd times, "
- "remains %zd (%zd K) max %zd M; by size logs: ",
- stats.n_allocs, stats.n_allocs - stats.n_frees,
- stats.currently_allocated >> 10, stats.max_allocated >> 20);
- for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
- uptr c = stats.by_size_log[i];
- if (!c) continue;
- Printf("%zd:%zd; ", i, c);
- }
- Printf("\n");
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
-
- void ForceUnlock() {
- mutex_.Unlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
- }
-
- private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
- struct Header {
- uptr map_beg;
- uptr map_size;
- uptr size;
- uptr chunk_idx;
- };
-
- Header *GetHeader(uptr p) {
- CHECK(IsAligned(p, page_size_));
- return reinterpret_cast<Header*>(p - page_size_);
- }
- Header *GetHeader(const void *p) {
- return GetHeader(reinterpret_cast<uptr>(p));
- }
-
- void *GetUser(Header *h) {
- CHECK(IsAligned((uptr)h, page_size_));
- return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
- }
-
- uptr RoundUpMapSize(uptr size) {
- return RoundUpTo(size, page_size_) + page_size_;
- }
-
- uptr page_size_;
- Header *chunks_[kMaxNumChunks];
- uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
- bool chunks_sorted_;
- struct Stats {
- uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
- } stats;
- atomic_uint8_t may_return_null_;
- SpinMutex mutex_;
-};
-
-// This class implements a complete memory allocator by using two
-// internal allocators:
-// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
-// When allocating 2^x bytes it should return 2^x aligned chunk.
-// PrimaryAllocator is used via a local AllocatorCache.
-// SecondaryAllocator can allocate anything, but is not efficient.
-template <class PrimaryAllocator, class AllocatorCache,
- class SecondaryAllocator> // NOLINT
-class CombinedAllocator {
- public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void InitLinkerInitialized(bool may_return_null) {
- secondary_.InitLinkerInitialized(may_return_null);
- stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
- }
-
- void Init(bool may_return_null) {
- secondary_.Init(may_return_null);
- stats_.Init();
- InitCommon(may_return_null);
- }
-
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
- // Returning 0 on malloc(0) may break a lot of code.
- if (size == 0)
- size = 1;
- if (size + alignment < size)
- return ReturnNullOrDie();
- if (check_rss_limit && RssLimitIsExceeded())
- return ReturnNullOrDie();
- if (alignment > 8)
- size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
- if (from_primary)
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
- res = secondary_.Allocate(&stats_, size, alignment);
- if (alignment > 8)
- CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared && res && from_primary)
- internal_bzero_aligned16(res, RoundUpTo(size, 16));
- return res;
- }
-
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDie() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
- }
-
- void Deallocate(AllocatorCache *cache, void *p) {
- if (!p) return;
- if (primary_.PointerIsMine(p))
- cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
- else
- secondary_.Deallocate(&stats_, p);
- }
-
- void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
- uptr alignment) {
- if (!p)
- return Allocate(cache, new_size, alignment);
- if (!new_size) {
- Deallocate(cache, p);
- return nullptr;
- }
- CHECK(PointerIsMine(p));
- uptr old_size = GetActuallyAllocatedSize(p);
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = Allocate(cache, new_size, alignment);
- if (new_p)
- internal_memcpy(new_p, p, memcpy_size);
- Deallocate(cache, p);
- return new_p;
- }
-
- bool PointerIsMine(void *p) {
- if (primary_.PointerIsMine(p))
- return true;
- return secondary_.PointerIsMine(p);
- }
-
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
-
- void *GetMetaData(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetMetaData(p);
- return secondary_.GetMetaData(p);
- }
-
- void *GetBlockBegin(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBegin(p);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBeginFastLocked(p);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetActuallyAllocatedSize(p);
- return secondary_.GetActuallyAllocatedSize(p);
- }
-
- uptr TotalMemoryUsed() {
- return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
- }
-
- void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
-
- void InitCache(AllocatorCache *cache) {
- cache->Init(&stats_);
- }
-
- void DestroyCache(AllocatorCache *cache) {
- cache->Destroy(&primary_, &stats_);
- }
-
- void SwallowCache(AllocatorCache *cache) {
- cache->Drain(&primary_);
- }
-
- void GetStats(AllocatorStatCounters s) const {
- stats_.Get(s);
- }
-
- void PrintStats() {
- primary_.PrintStats();
- secondary_.PrintStats();
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- primary_.ForceLock();
- secondary_.ForceLock();
- }
-
- void ForceUnlock() {
- secondary_.ForceUnlock();
- primary_.ForceUnlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- primary_.ForEachChunk(callback, arg);
- secondary_.ForEachChunk(callback, arg);
- }
-
- private:
- PrimaryAllocator primary_;
- SecondaryAllocator secondary_;
- AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
-};
-
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_bytemap.h b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
new file mode 100644
index 000000000000..92472cdf5150
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -0,0 +1,103 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ return map2[idx % kSize2];
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
new file mode 100644
index 000000000000..19e1ae9b9f75
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -0,0 +1,233 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(
+ bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void Init(bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.Init(may_return_null);
+ stats_.Init();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false, bool check_rss_limit = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
+ if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ bool from_primary = primary_.CanAllocate(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
+ if (from_primary)
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ // When serviced by the secondary, the chunk comes from a mmap allocation
+ // and will be zero'd out anyway. We only need to clear our the chunk if
+ // it was serviced by the primary, hence using the rounded up 'size'.
+ if (cleared && res && from_primary)
+ internal_bzero_aligned16(res, RoundUpTo(size, 16));
+ return res;
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_interface.h b/lib/sanitizer_common/sanitizer_allocator_interface.h
index 797c38a79885..5ff6edba0a1a 100644
--- a/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -37,6 +37,10 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_print_memory_profile(int top_percent);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index a7ea454ff17b..e939cbe01c3c 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -61,8 +61,8 @@ enum InternalAllocEnum {
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- InternalAllocEnum) {
- return InternalAlloc(size);
+ __sanitizer::InternalAllocEnum) {
+ return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
new file mode 100644
index 000000000000..e1172e0c2820
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -0,0 +1,249 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache
+ : SizeClassAllocator::AllocatorCache {
+};
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(c, allocator, class_id);
+ CHECK_GT(c->count, 0);
+ CompactPtrT chunk = c->chunks[--c->count];
+ void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(c, allocator, class_id, c->count);
+ }
+ }
+
+ // private:
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ }
+ }
+
+ NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache();
+ uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
+ allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks);
+ c->count = num_requested_chunks;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ InitCache();
+ CHECK_GE(c->count, count);
+ uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * TransferBatch::MaxCached(i);
+ }
+ }
+
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (Allocator::ClassIdToSize(class_id) <
+ TransferBatch::AllocationSizeRequiredForNElements(
+ TransferBatch::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ // For small size classes allocates the batch from the allocator.
+ // For large size classes simply returns b.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ // For small size classes deallocates b to the allocator.
+ // Does notthing for large size classes.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ }
+
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ uptr first_idx_to_drain = c->count - cnt;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
+ &c->batch[first_idx_to_drain], cnt);
+ c->count -= cnt;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
new file mode 100644
index 000000000000..2882afd1fe1d
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -0,0 +1,310 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ CHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init(s32 release_to_os_interval_ms) {
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(stat, c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_front(b);
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {
+ }
+
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) -
+ sizeof(IntrusiveList<TransferBatch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ uptr size = ClassIdToSize(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = TransferBatch::MaxCached(class_id);
+ TransferBatch *b = nullptr;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ b->Clear();
+ }
+ b->Add((void*)i);
+ if (b->Count() == max_count) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h
new file mode 100644
index 000000000000..f2d94a07a523
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -0,0 +1,522 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
+ MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
+ } else {
+ NonConstSpaceBeg =
+ reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallback(SpaceEnd(), AdditionalSize());
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id);
+ }
+
+ NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks);
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->n_allocated += n_chunks;
+ }
+
+
+ bool PointerIsMine(const void *p) {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->n_allocated - region->n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
+ "num_freed_chunks %zd"
+ " avail: %zd rss: %zdK releases: %zd\n",
+ class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
+ region->n_allocated, region->n_freed, in_use,
+ region->num_freed_chunks, avail_chunks, rss >> 10,
+ region->rtoi.num_releases);
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region->allocated_user;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ u64 last_release_at_ns;
+ };
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ uptr n_allocated, n_freed; // Just stats.
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+ }
+
+ u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+ void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(rand_state, i + 1)]);
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions =
+ reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) {
+ return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
+ kFreeArraySize);
+ }
+
+ void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ CHECK_LE(needed_space, kFreeArraySize);
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ MapWithCallback(current_map_end, new_map_size);
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ }
+
+
+ NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ uptr size = ClassIdToSize(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + requested_count * size;
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ if (end_idx > region->mapped_user) {
+ if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
+ region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ region->mapped_user += map_size;
+ }
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ uptr total_count = (region->mapped_user - beg_idx) / size;
+ uptr num_freed_chunks = region->num_freed_chunks;
+ EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
+ for (uptr i = 0; i < total_count; i++) {
+ uptr chunk = beg_idx + i * size;
+ free_array[num_freed_chunks + total_count - 1 - i] =
+ PointerToCompactPtr(0, chunk);
+ }
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[num_freed_chunks], total_count,
+ &region->rand_state);
+ region->num_freed_chunks += total_count;
+ region->allocated_user += total_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(GetMetadataEnd(region_beg) -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->mapped_user + region->mapped_meta >
+ kRegionSize - kFreeArraySize) {
+ Printf("%s: Out of memory. Dying. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ }
+
+ void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
+ CompactPtrT first, CompactPtrT last) {
+ uptr beg_ptr = CompactPtrToPointer(region_beg, first);
+ uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
+ ReleaseMemoryPagesToOS(beg_ptr, end_ptr);
+ }
+
+ // Attempts to release some RAM back to OS. The region is expected to be
+ // locked.
+ // Algorithm:
+ // * Sort the chunks.
+ // * Find ranges fully covered by free-d chunks
+ // * Release them to OS with madvise.
+ void MaybeReleaseToOS(uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
+ page_size) {
+ return; // Nothing new to release.
+ }
+
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ u64 now_ns = NanoTime();
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
+ return; // Memory was returned recently.
+ region->rtoi.last_release_at_ns = now_ns;
+
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ SortArray(free_array, n);
+
+ const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
+ const uptr kScaledGranularity = page_size >> kCompactPtrScale;
+
+ uptr range_beg = free_array[0];
+ uptr prev = free_array[0];
+ for (uptr i = 1; i < n; i++) {
+ uptr chunk = free_array[i];
+ CHECK_GT(chunk, prev);
+ if (chunk - prev != scaled_chunk_size) {
+ CHECK_GT(chunk - prev, scaled_chunk_size);
+ if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
+ MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
+ region->rtoi.n_freed_at_last_release = region->n_freed;
+ region->rtoi.num_releases++;
+ }
+ range_beg = chunk;
+ }
+ prev = chunk;
+ }
+ }
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
new file mode 100644
index 000000000000..2e98e591b432
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -0,0 +1,282 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void InitLinkerInitialized(bool may_return_null) {
+ page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) return ReturnNullOrDieOnBadRequest();
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ chunks_sorted_ = false;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ EnsureSortedChunks();
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ auto max_mmap_ =
+ reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks_[mid]))
+ end = mid - 1; // We are not interested in chunks_[mid].
+ else
+ beg = mid; // chunks_[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks_[end]))
+ beg = end;
+ }
+
+ Header *h = chunks_[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ auto t = chunks_[i];
+ callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks_[i], t);
+ CHECK_EQ(chunks_[i]->chunk_idx, i);
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ atomic_uint8_t may_return_null_;
+ SpinMutex mutex_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
new file mode 100644
index 000000000000..7151a4636056
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -0,0 +1,217 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = MostSignificantSetBitIndex(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max<uptr>(1, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
diff --git a/lib/sanitizer_common/sanitizer_allocator_stats.h b/lib/sanitizer_common/sanitizer_allocator_stats.h
new file mode 100644
index 000000000000..38b088b8446e
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_stats.h
@@ -0,0 +1,107 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_atomic.h b/lib/sanitizer_common/sanitizer_atomic.h
index b26693e24f8d..8f400acc999c 100644
--- a/lib/sanitizer_common/sanitizer_atomic.h
+++ b/lib/sanitizer_common/sanitizer_atomic.h
@@ -37,6 +37,11 @@ struct atomic_uint16_t {
volatile Type val_dont_use;
};
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 79fcbb1183f9..1c6fc3ef86a3 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -114,7 +114,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
@@ -157,6 +157,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
template<class T>
static inline bool CompareLess(const T &a, const T &b) {
@@ -167,6 +168,10 @@ void SortArray(uptr *array, uptr size) {
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
+void SortArray(u32 *array, uptr size) {
+ InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess);
+}
+
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (!filepath) return nullptr;
@@ -202,7 +207,7 @@ void ReportErrorSummary(const char *error_message) {
__sanitizer_report_error_summary(buff.data());
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
@@ -254,9 +259,18 @@ void LoadedModule::set(const char *module_name, uptr base_address) {
base_address_ = base_address;
}
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize]) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
@@ -483,4 +497,11 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
+
+#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_print_memory_profile(int top_percent) {
+ (void)top_percent;
+}
+#endif
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 6c1d6a00a10c..66c2d26fa4f5 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -98,9 +98,14 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
+
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-void FlushUnneededShadowMemory(uptr addr, uptr size);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
@@ -115,16 +120,14 @@ void RunFreeHooks(const void *ptr);
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
// InternalAlloc is made libc-free.
-template<typename T>
+template <typename T>
class InternalScopedBuffer {
public:
explicit InternalScopedBuffer(uptr cnt) {
cnt_ = cnt;
- ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
- }
- ~InternalScopedBuffer() {
- UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
}
+ ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
T &operator[](uptr i) { return ptr_[i]; }
T *data() { return ptr_; }
uptr size() { return cnt_ * sizeof(T); }
@@ -132,9 +135,11 @@ class InternalScopedBuffer {
private:
T *ptr_;
uptr cnt_;
- // Disallow evil constructors.
- InternalScopedBuffer(const InternalScopedBuffer&);
- void operator=(const InternalScopedBuffer&);
+ // Disallow copies and moves.
+ InternalScopedBuffer(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer(InternalScopedBuffer &&) = delete;
+ InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
};
class InternalScopedString : public InternalScopedBuffer<char> {
@@ -330,6 +335,7 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+void SortArray(u32 *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit
@@ -389,7 +395,7 @@ void ReportErrorSummary(const char *error_message);
// error_type file:line[:column][ function]
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -446,8 +452,8 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
if (IsPowerOfTwo(size)) return size;
uptr up = MostSignificantSetBitIndex(size);
- CHECK(size < (1ULL << (up + 1)));
- CHECK(size > (1ULL << up));
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
return 1ULL << (up + 1);
}
@@ -541,6 +547,13 @@ class InternalMmapVectorNoCtor {
uptr capacity() const {
return capacity_;
}
+ void resize(uptr new_size) {
+ Resize(new_size);
+ if (new_size > size_) {
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
@@ -625,34 +638,55 @@ void InternalSort(Container *v, uptr size, Compare comp) {
}
}
-template<class Container, class Value, class Compare>
-uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
- uptr not_found = last + 1;
- while (last >= first) {
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
- else if (comp(val, v[mid]))
- last = mid - 1;
else
- return mid;
+ last = mid;
}
- return not_found;
+ return first;
}
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+const uptr kModuleUUIDSize = 16;
+
// Represents a binary loaded into virtual memory (e.g. this can be an
// executable or a shared object).
class LoadedModule {
public:
- LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
+ LoadedModule()
+ : full_name_(nullptr), base_address_(0), arch_(kModuleArchUnknown) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize]);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
struct AddressRange {
AddressRange *next;
@@ -669,6 +703,8 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
IntrusiveList<AddressRange> ranges_;
};
@@ -789,6 +825,8 @@ struct SignalContext {
is_memory_access(is_memory_access),
write_flag(write_flag) {}
+ static void DumpAllRegisters(void *context);
+
// Creates signal context in a platform-specific manner.
static SignalContext Create(void *siginfo, void *context);
@@ -827,6 +865,15 @@ void AvoidCVE_2016_2143();
INLINE void AvoidCVE_2016_2143() {}
#endif
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
@@ -834,9 +881,4 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr allocated;
-};
-
#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index c95b3580af2c..ca571d1a9fd5 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -30,6 +30,9 @@
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
@@ -67,6 +70,19 @@
#define iconv __bsd_iconv
#endif
+// Platform-specific options.
+#if SANITIZER_MAC
+namespace __sanitizer {
+bool PlatformHasDifferentMemcpyAndMemmove();
+}
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif
@@ -163,6 +179,47 @@
COMMON_INTERCEPT_FUNCTION(fn)
#endif
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
@@ -304,8 +361,14 @@ INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
s2, size, result);
@@ -348,24 +411,30 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
- const char *s1, const char *s2, uptr n,
+ const char *s1, const char *s2, uptr size,
int result)
-INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCaseCmp(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
- s1, s2, n, result);
+ s1, s2, size, result);
return result;
}
@@ -390,7 +459,7 @@ static inline void StrstrCheck(void *ctx, char *r, const char *s1,
#if SANITIZER_INTERCEPT_STRSTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
@@ -413,7 +482,7 @@ INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_STRCASESTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
void *ctx;
@@ -434,7 +503,7 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_MEMMEM
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
const void *s1, SIZE_T len1, const void *s2,
- SIZE_T len2, void *result);
+ SIZE_T len2, void *result)
INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
SIZE_T len2) {
@@ -553,14 +622,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#endif
#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memset(dst, v, size);
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);
- if (common_flags()->intercept_intrin)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- return REAL(memset)(dst, v, size);
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
}
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
@@ -569,16 +633,9 @@ INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memmove(dst, src, size);
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
- return REAL(memmove)(dst, src, size);
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
}
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
@@ -587,25 +644,30 @@ INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
- }
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
- return REAL(memcpy)(dst, src, size);
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
}
-#define INIT_MEMCPY COMMON_INTERCEPT_FUNCTION(memcpy)
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
#else
#define INIT_MEMCPY
#endif
@@ -663,7 +725,16 @@ INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
return internal_memchr(s, c, n);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
void *res = REAL(memchr)(s, c, n);
+#endif
uptr len = res ? (char *)res - (const char *)s + 1 : n;
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
return res;
@@ -1218,12 +1289,12 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#if SANITIZER_INTERCEPT_SCANF
#define INIT_SCANF \
- COMMON_INTERCEPT_FUNCTION(scanf); \
- COMMON_INTERCEPT_FUNCTION(sscanf); \
- COMMON_INTERCEPT_FUNCTION(fscanf); \
- COMMON_INTERCEPT_FUNCTION(vscanf); \
- COMMON_INTERCEPT_FUNCTION(vsscanf); \
- COMMON_INTERCEPT_FUNCTION(vfscanf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
#else
#define INIT_SCANF
#endif
@@ -1396,16 +1467,16 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
#if SANITIZER_INTERCEPT_PRINTF
#define INIT_PRINTF \
- COMMON_INTERCEPT_FUNCTION(printf); \
- COMMON_INTERCEPT_FUNCTION(sprintf); \
- COMMON_INTERCEPT_FUNCTION(snprintf); \
- COMMON_INTERCEPT_FUNCTION(asprintf); \
- COMMON_INTERCEPT_FUNCTION(fprintf); \
- COMMON_INTERCEPT_FUNCTION(vprintf); \
- COMMON_INTERCEPT_FUNCTION(vsprintf); \
- COMMON_INTERCEPT_FUNCTION(vsnprintf); \
- COMMON_INTERCEPT_FUNCTION(vasprintf); \
- COMMON_INTERCEPT_FUNCTION(vfprintf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
#else
#define INIT_PRINTF
#endif
@@ -4174,6 +4245,20 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TEMPNAM
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
@@ -4802,47 +4887,67 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#endif
#if SANITIZER_INTERCEPT_AEABI_MEM
-DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr)
-
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
// Note the argument order.
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT_AEABI_MEM \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
@@ -4861,11 +4966,11 @@ INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
#endif // SANITIZER_INTERCEPT_AEABI_MEM
#if SANITIZER_INTERCEPT___BZERO
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
-
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
#else
#define INIT___BZERO
@@ -5855,6 +5960,72 @@ INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
// FIXME: add other *stat interceptor
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline);
+#else
+#define INIT_UTMPX
+#endif
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
@@ -5999,6 +6170,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_TTYNAME_R;
INIT_TEMPNAM;
INIT_PTHREAD_SETNAME_NP;
INIT_SINCOS;
@@ -6050,4 +6222,6 @@ static void InitializeCommonInterceptors() {
INIT___LXSTAT;
INIT___LXSTAT64;
// FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
}
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 92318cda35fd..12563499c515 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -435,10 +435,6 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
}
static int printf_get_value_size(PrintfDirective *dir) {
- if (dir->convSpecifier == 'm') {
- return sizeof(char *);
- }
-
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
@@ -519,6 +515,9 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
// Dynamic precision
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
int size = printf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("WARNING: unexpected format specifier in printf "
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 959c622a32f9..4ed9afedf84a 100755
--- a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -583,7 +583,8 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
return;
if (request == IOCTL_SIOCGIFCONF) {
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
}
}
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 596f5bcd3173..49ca961f3cb0 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -13,6 +13,7 @@
#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
@@ -46,7 +47,7 @@ void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
@@ -69,12 +70,15 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
SoftRssLimitExceededCallback = Callback;
}
+#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ bool heap_profile = common_flags()->heap_profile;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
+ uptr rss_during_last_reported_profile = 0;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
@@ -116,8 +120,15 @@ void BackgroundThread(void *arg) {
SoftRssLimitExceededCallback(false);
}
}
+ if (heap_profile &&
+ current_rss_mb > rss_during_last_reported_profile * 1.1) {
+ Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
+ __sanitizer_print_memory_profile(90);
+ rss_during_last_reported_profile = current_rss_mb;
+ }
}
}
+#endif
void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize);
@@ -142,7 +153,8 @@ void MaybeStartBackgroudThread() {
!SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb) return;
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
@@ -152,7 +164,7 @@ void MaybeStartBackgroudThread() {
void NOINLINE
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
- PrepareForSandboxing(args);
- if (sandboxing_callback)
- sandboxing_callback();
+ __sanitizer::PrepareForSandboxing(args);
+ if (__sanitizer::sandboxing_callback)
+ __sanitizer::sandboxing_callback();
}
diff --git a/lib/sanitizer_common/sanitizer_common_nolibc.cc b/lib/sanitizer_common/sanitizer_common_nolibc.cc
index e24cf998ec69..ba54c739a9e0 100644
--- a/lib/sanitizer_common/sanitizer_common_nolibc.cc
+++ b/lib/sanitizer_common/sanitizer_common_nolibc.cc
@@ -17,6 +17,9 @@
namespace __sanitizer {
+// The Windows implementations of these functions use the win32 API directly,
+// bypassing libc.
+#if !SANITIZER_WINDOWS
#if SANITIZER_LINUX
bool ShouldLogAfterPrintf() { return false; }
void LogMessageOnPrintf(const char *str) {}
@@ -24,5 +27,10 @@ void LogMessageOnPrintf(const char *str) {}
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+#endif // !SANITIZER_WINDOWS
+
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+void ListOfModules::init() {}
+#endif
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index 51b53d345ab8..ebdee33d7d5b 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -47,6 +47,8 @@
#include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h"
+using namespace __sanitizer;
+
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
@@ -110,7 +112,6 @@ class CoverageData {
uptr *data();
uptr size() const;
- uptr *buffer() const { return pc_buffer; }
private:
struct NamedPcRange {
@@ -125,9 +126,8 @@ class CoverageData {
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
- 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
- 1 << 27);
+ static const uptr kPcArrayMaxSize =
+ FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@@ -143,8 +143,6 @@ class CoverageData {
// Descriptor of the file mapped pc array.
fd_t pc_fd;
- uptr *pc_buffer;
-
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
@@ -216,11 +214,6 @@ void CoverageData::Enable() {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
}
- pc_buffer = nullptr;
- if (common_flags()->coverage_pc_buffer)
- pc_buffer = reinterpret_cast<uptr *>(MmapNoReserveOrDie(
- sizeof(uptr) * kPcArrayMaxSize, "CovInit::pc_buffer"));
-
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
@@ -258,10 +251,6 @@ void CoverageData::Disable() {
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
cc_array = nullptr;
}
- if (pc_buffer) {
- UnmapOrDie(pc_buffer, sizeof(uptr) * kPcArrayMaxSize);
- pc_buffer = nullptr;
- }
if (tr_event_array) {
UnmapOrDie(tr_event_array,
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
@@ -430,7 +419,6 @@ void CoverageData::Add(uptr pc, u32 *guard) {
atomic_load(&pc_array_size, memory_order_acquire));
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
pc_array[idx] = BundlePcAndCounter(pc, counter);
- if (pc_buffer) pc_buffer[counter] = pc;
}
// Registers a pair caller=>callee.
@@ -966,6 +954,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
coverage_data.DumpAll();
+ __sanitizer_dump_trace_pc_guard_coverage();
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
@@ -1019,12 +1008,6 @@ uptr __sanitizer_get_coverage_guards(uptr **data) {
}
SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_coverage_pc_buffer(uptr **data) {
- *data = coverage_data.buffer();
- return __sanitizer_get_total_unique_coverage();
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_number_of_counters() {
return coverage_data.GetNumberOf8bitCounters();
}
@@ -1034,8 +1017,26 @@ uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
// Default empty implementations (weak). Users should redefine them.
+#if !SANITIZER_WINDOWS // weak does not work on Windows.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp() {}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp1() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp2() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_gep() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_indir() {}
+#endif // !SANITIZER_WINDOWS
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
new file mode 100644
index 000000000000..d83b77917bda
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
@@ -0,0 +1,165 @@
+//===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage Controller for Trace PC Guard.
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+using AddressRange = LoadedModule::AddressRange;
+
+namespace {
+
+static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
+
+static fd_t OpenFile(const char* path) {
+ error_t err;
+ fd_t fd = OpenFile(path, WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path, err);
+ return fd;
+}
+
+static void GetCoverageFilename(char* path, const char* name,
+ const char* extension) {
+ CHECK(name);
+ internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
+ common_flags()->coverage_dir, name, internal_getpid(),
+ extension);
+}
+
+static void WriteModuleCoverage(char* file_path, const char* module_name,
+ const uptr* pcs, uptr len) {
+ GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
+ fd_t fd = OpenFile(file_path);
+ WriteToFile(fd, &Magic, sizeof(Magic));
+ WriteToFile(fd, pcs, len * sizeof(*pcs));
+ CloseFile(fd);
+ Printf("SanitizerCoverage: %s %zd PCs written\n", file_path, len);
+}
+
+static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
+ if (!len) return;
+
+ char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));
+
+ internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
+ SortArray(pcs, len);
+
+ bool module_found = false;
+ uptr last_base = 0;
+ uptr module_start_idx = 0;
+
+ for (uptr i = 0; i < len; ++i) {
+ const uptr pc = pcs[i];
+ if (!pc) continue;
+
+ if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: bad pc %x\n", pc);
+ continue;
+ }
+ uptr module_base = pc - pcs[i];
+
+ if (module_base != last_base || !module_found) {
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ i - module_start_idx);
+ }
+
+ last_base = module_base;
+ module_start_idx = i;
+ module_found = true;
+ __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
+ &pcs[i]);
+ }
+ }
+
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ len - module_start_idx);
+ }
+
+ InternalFree(file_path);
+ InternalFree(module_name);
+ InternalFree(pcs);
+}
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ void Initialize() {
+ CHECK(!initialized);
+
+ initialized = true;
+ pc_vector.Initialize(0);
+ }
+
+ void InitTracePcGuard(u32* start, u32* end) {
+ if (!initialized) Initialize();
+ CHECK(!*start);
+ CHECK_NE(start, end);
+
+ u32 i = pc_vector.size();
+ for (u32* p = start; p < end; p++) *p = ++i;
+ pc_vector.resize(i);
+ }
+
+ void TracePcGuard(u32* guard, uptr pc) {
+ atomic_uint32_t* guard_ptr = reinterpret_cast<atomic_uint32_t*>(guard);
+ u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
+ if (!idx) return;
+ // we start indices from 1.
+ pc_vector[idx - 1] = pc;
+ }
+
+ void Dump() {
+ if (!initialized || !common_flags()->coverage) return;
+ __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
+ }
+
+ private:
+ bool initialized;
+ InternalMmapVectorNoCtor<uptr> pc_vector;
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr* pcs, uptr len) {
+ return SanitizerDumpCoverage(pcs, len);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard(u32* guard) {
+ if (!*guard) return;
+ pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard_init(u32* start, u32* end) {
+ if (start == end || *start) return;
+ pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ pc_guard_controller.Dump();
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_dbghelp.h b/lib/sanitizer_common/sanitizer_dbghelp.h
new file mode 100644
index 000000000000..1689edbf92db
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_dbghelp.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index c2f19d425bdb..913ce3cb423e 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -30,11 +30,6 @@ struct FlagDescription {
IntrusiveList<FlagDescription> flag_descriptions;
-// If set, the tool will install its own SEGV signal handler by default.
-#ifndef SANITIZER_NEEDS_SEGV
-# define SANITIZER_NEEDS_SEGV 1
-#endif
-
void CommonFlags::SetDefaults() {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sanitizer_flags.inc"
diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc
index 203f41ca3d2a..43900f87b330 100644
--- a/lib/sanitizer_common/sanitizer_flags.inc
+++ b/lib/sanitizer_common/sanitizer_flags.inc
@@ -75,7 +75,7 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
-COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+COMMON_FLAG(bool, handle_segv, true,
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
@@ -118,6 +118,12 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
+ "Experimental. Only affects a 64-bit allocator. If set, tries to "
+ "release unused memory to the OS, but not more often than this "
+ "interval (in milliseconds). Negative values mean do not attempt "
+ "to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
@@ -144,19 +150,16 @@ COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
-COMMON_FLAG(bool, coverage_pc_buffer, true,
- "If set (and if 'coverage' is set too), the pcs would be collected "
- "in a buffer.")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
- bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
- "default and for sanitizers that don't reserve lots of virtual memory.")
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
@@ -216,7 +219,7 @@ COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, SANITIZER_MAC,
+ bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,
diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h
index 7f43c84c2e7d..174d5e92ba44 100644
--- a/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -46,8 +46,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
@@ -60,6 +64,11 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg, const void *mid, const void *end);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index 720672d2908a..02a1e527312e 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -24,7 +24,7 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
+#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
#else
@@ -32,7 +32,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@@ -296,12 +296,12 @@ inline void Trap() {
}
#else
extern "C" void* _ReturnAddress(void);
+extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
+# pragma intrinsic(_AddressOfReturnAddress)
# define GET_CALLER_PC() (uptr)_ReturnAddress()
// CaptureStackBackTrace doesn't need to know BP on Windows.
-// FIXME: This macro is still used when printing error reports though it's not
-// clear if the BP value is needed in the ASan reports on Windows.
-# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+# define GET_CURRENT_FRAME() (((uptr)_AddressOfReturnAddress()) + sizeof(uptr))
extern "C" void __ud2(void);
# pragma intrinsic(__ud2)
@@ -328,6 +328,17 @@ inline void Trap() {
} // namespace __sanitizer
-using namespace __sanitizer; // NOLINT
+namespace __asan { using namespace __sanitizer; } // NOLINT
+namespace __dsan { using namespace __sanitizer; } // NOLINT
+namespace __dfsan { using namespace __sanitizer; } // NOLINT
+namespace __esan { using namespace __sanitizer; } // NOLINT
+namespace __lsan { using namespace __sanitizer; } // NOLINT
+namespace __msan { using namespace __sanitizer; } // NOLINT
+namespace __tsan { using namespace __sanitizer; } // NOLINT
+namespace __scudo { using namespace __sanitizer; } // NOLINT
+namespace __ubsan { using namespace __sanitizer; } // NOLINT
+namespace __xray { using namespace __sanitizer; } // NOLINT
+namespace __interception { using namespace __sanitizer; } // NOLINT
+
#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
index 155bbc49a548..9c11fb0ad2be 100644
--- a/lib/sanitizer_common/sanitizer_libc.h
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -62,10 +62,12 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
bool mem_is_zero(const char *mem, uptr size);
// I/O
-const fd_t kInvalidFd = (fd_t)-1;
-const fd_t kStdinFd = 0;
-const fd_t kStdoutFd = (fd_t)1;
-const fd_t kStderrFd = (fd_t)2;
+// Define these as macros so we can use them in linker initialized global
+// structs without dynamic initialization.
+#define kInvalidFd ((fd_t)-1)
+#define kStdinFd ((fd_t)0)
+#define kStdoutFd ((fd_t)1)
+#define kStderrFd ((fd_t)2)
uptr internal_ftruncate(fd_t fd, uptr size);
diff --git a/lib/sanitizer_common/sanitizer_libignore.cc b/lib/sanitizer_common/sanitizer_libignore.cc
index 545393966b38..33a1763d28ea 100644
--- a/lib/sanitizer_common/sanitizer_libignore.cc
+++ b/lib/sanitizer_common/sanitizer_libignore.cc
@@ -50,23 +50,23 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
}
// Scan suppressions list and find newly loaded and unloaded libraries.
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedString module(kMaxPathLength);
+ ListOfModules modules;
+ modules.init();
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
- proc_maps.Reset();
- uptr b, e, off, prot;
- while (proc_maps.Next(&b, &e, &off, module.data(), module.size(), &prot)) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- if (TemplateMatch(lib->templ, module.data()) ||
- (lib->real_name &&
- internal_strcmp(lib->real_name, module.data()) == 0)) {
+ for (const auto &mod : modules) {
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (!TemplateMatch(lib->templ, mod.full_name()) &&
+ !(lib->real_name &&
+ internal_strcmp(lib->real_name, mod.full_name()) == 0))
+ continue;
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
" 2 libraries: '%s' and '%s'\n",
- SanitizerToolName, lib->templ, lib->name, module.data());
+ SanitizerToolName, lib->templ, lib->name, mod.full_name());
Die();
}
loaded = true;
@@ -75,13 +75,14 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
VReport(1,
"Matched called_from_lib suppression '%s' against library"
" '%s'\n",
- lib->templ, module.data());
+ lib->templ, mod.full_name());
lib->loaded = true;
- lib->name = internal_strdup(module.data());
+ lib->name = internal_strdup(mod.full_name());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
- code_ranges_[idx].begin = b;
- code_ranges_[idx].end = e;
+ code_ranges_[idx].begin = range.beg;
+ code_ranges_[idx].end = range.end;
atomic_store(&loaded_count_, idx + 1, memory_order_release);
+ break;
}
}
if (lib->loaded && !loaded) {
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 5c5a1a61a657..76cdc72a0f0c 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -99,7 +99,7 @@ const int FUTEX_WAKE = 1;
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) || SANITIZER_MIPS64
extern "C" {
extern void internal_sigreturn();
}
@@ -671,7 +671,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
// Invokes sigaction via a raw syscall with a restorer, but does not support
// all platforms yet.
// We disable for Go simply because we have not yet added to buildgo.sh.
-#if defined(__x86_64__) && !SANITIZER_GO
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
int internal_sigaction_syscall(int signum, const void *act, void *oldact) {
if (act == nullptr)
return internal_sigaction_norestorer(signum, act, oldact);
@@ -801,8 +801,9 @@ bool ThreadLister::GetDirectoryEntries() {
uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
-#if (SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))) || \
- SANITIZER_ANDROID
+#if SANITIZER_ANDROID
+ return 4096;
+#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@@ -1229,7 +1230,7 @@ bool IsHandledDeadlySignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
@@ -1291,10 +1292,6 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
#elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
- // FSR bits 5:0 describe the abort type, and are never 0 (or so it seems).
- // Zero FSR indicates an older kernel that does not pass this information to
- // the userspace.
- if (fsr == 0) return UNKNOWN;
return fsr & FSR_WRITE ? WRITE : READ;
#elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
@@ -1307,6 +1304,10 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
#endif
}
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
@@ -1392,6 +1393,11 @@ void MaybeReexec() {
// No need to re-exec on Linux.
}
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ UNREACHABLE("FindAvailableMemoryRange is not available");
+ return 0;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h
index 526fa4426e34..d4d0f47eed02 100644
--- a/lib/sanitizer_common/sanitizer_linux.h
+++ b/lib/sanitizer_common/sanitizer_linux.h
@@ -42,7 +42,7 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
-#if defined(__x86_64__) && !SANITIZER_GO
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
// Uses a raw system call to avoid interceptors.
int internal_sigaction_syscall(int signum, const void *act, void *oldact);
#endif
diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
index a37bdf118d3c..eb14c970a7fb 100644
--- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -34,6 +34,7 @@
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
+#include <syslog.h>
#if SANITIZER_FREEBSD
#include <pthread_np.h>
@@ -51,8 +52,6 @@
#if SANITIZER_ANDROID && __ANDROID_API__ < 21
#include <android/log.h>
-#else
-#include <syslog.h>
#endif
#if !SANITIZER_ANDROID
@@ -299,7 +298,10 @@ uptr ThreadSelf() {
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__) || defined(__s390__)
+# elif defined(__aarch64__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
@@ -521,6 +523,7 @@ uptr GetRSS() {
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
+ openlog(GetProcessName(), 0, LOG_USER);
atomic_store(&android_log_initialized, 1, memory_order_release);
}
diff --git a/lib/sanitizer_common/sanitizer_linux_mips64.S b/lib/sanitizer_common/sanitizer_linux_mips64.S
new file mode 100644
index 000000000000..8729642aa654
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_linux_mips64.S
@@ -0,0 +1,23 @@
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+
+// Avoid being marked as needing an executable stack:
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+// Further contents are mips64 only:
+#if defined(__linux__) && defined(__mips64)
+
+.section .text
+.set noreorder
+.globl internal_sigreturn
+.type internal_sigreturn, @function
+internal_sigreturn:
+
+ li $v0,5211 // #5211 is for SYS_rt_sigreturn
+ syscall
+
+.size internal_sigreturn, .-internal_sigreturn
+
+#endif // defined(__linux__) && defined(__mips64)
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index 69178c69ebc0..73e018c857e8 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -72,12 +72,23 @@ extern "C" {
#include <unistd.h>
#include <util.h>
-// from <crt_externs.h>, but we don't have that file on iOS
+// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
extern char ***_NSGetArgv(void);
extern char ***_NSGetEnviron(void);
}
+// From <mach/mach_vm.h>, but we don't have that file on iOS.
+extern "C" {
+ extern kern_return_t mach_vm_region_recurse(
+ vm_map_t target_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ natural_t *nesting_depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt);
+}
+
namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
@@ -362,7 +373,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
@@ -387,6 +398,10 @@ bool IsHandledDeadlySignal(int signum) {
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
// Handling fatal signals on watchOS and tvOS devices is disallowed.
return false;
+ if (common_flags()->handle_abort && signum == SIGABRT)
+ return true;
+ if (common_flags()->handle_sigill && signum == SIGILL)
+ return true;
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
@@ -433,6 +448,15 @@ MacosVersion GetMacosVersion() {
return result;
}
+bool PlatformHasDifferentMemcpyAndMemmove() {
+ // On OS X 10.7 memcpy() and memmove() are both resolved
+ // into memmove$VARIANT$sse42.
+ // See also https://github.com/google/sanitizers/issues/34.
+ // TODO(glider): need to check dynamically that memcpy() and memmove() are
+ // actually the same function.
+ return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
+}
+
uptr GetRSS() {
struct task_basic_info info;
unsigned count = TASK_BASIC_INFO_COUNT;
@@ -458,12 +482,12 @@ void *internal_start_thread(void(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static BlockingMutex syslog_lock(LINKER_INITIALIZED);
#endif
void WriteOneLineToSyslog(const char *s) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
syslog_lock.CheckLocked();
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
#endif
@@ -476,7 +500,7 @@ void LogMessageOnPrintf(const char *str) {
}
void LogFullErrorReport(const char *buffer) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Log with os_trace. This will make it into the crash log.
#if SANITIZER_OS_TRACE
if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
@@ -549,7 +573,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
@@ -740,6 +764,96 @@ char **GetArgv() {
return *_NSGetArgv();
}
+uptr FindAvailableMemoryRange(uptr shadow_size,
+ uptr alignment,
+ uptr left_padding) {
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+ // Start searching for available memory region past PAGEZERO, which is
+ // 4KB on 32-bit and 4GB on 64-bit.
+ mach_vm_address_t start_address =
+ (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+
+ mach_vm_address_t address = start_address;
+ mach_vm_address_t free_begin = start_address;
+ kern_return_t kr = KERN_SUCCESS;
+ while (kr == KERN_SUCCESS) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ RegionInfo vminfo;
+ mach_msg_type_number_t count = kRegionInfoSize;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+ if (free_begin != address) {
+ // We found a free region [free_begin..address-1].
+ uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding,
+ alignment);
+ if (shadow_address + shadow_size < (uptr)address) {
+ return shadow_address;
+ }
+ }
+ // Move to the next region.
+ address += vmsize;
+ free_begin = address;
+ }
+
+ // We looked at all free regions and could not find one large enough.
+ return 0;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void SignalContext::DumpAllRegisters(void *context) {
+ Report("Register values:\n");
+
+ ucontext_t *ucontext = (ucontext_t*)context;
+# define DUMPREG64(r) \
+ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG32(r) \
+ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG_(r) Printf(" "); DUMPREG(r);
+# define DUMPREG__(r) Printf(" "); DUMPREG(r);
+# define DUMPREG___(r) Printf(" "); DUMPREG(r);
+
+# if defined(__x86_64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
+ DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
+ DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
+ DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
+# elif defined(__i386__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
+ DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
+# elif defined(__aarch64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
+ DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
+ DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
+ DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
+ DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
+ DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
+ DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
+ DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n");
+# elif defined(__arm__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
+ DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
+ DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
+ DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
+# else
+# error "Unknown architecture"
+# endif
+
+# undef DUMPREG64
+# undef DUMPREG32
+# undef DUMPREG_
+# undef DUMPREG__
+# undef DUMPREG___
+# undef DUMPREG
+}
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_mac.h b/lib/sanitizer_common/sanitizer_mac.h
index 6e2b84f432e5..636d9bfeac8c 100644
--- a/lib/sanitizer_common/sanitizer_mac.h
+++ b/lib/sanitizer_common/sanitizer_mac.h
@@ -39,17 +39,21 @@ char **GetEnviron();
} // namespace __sanitizer
extern "C" {
-static char __crashreporter_info_buff__[kErrorMessageBufferSize] = {};
+static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
+ {};
static const char *__crashreporter_info__ __attribute__((__used__)) =
&__crashreporter_info_buff__[0];
asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
+
+namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
INLINE void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }
+} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
diff --git a/lib/sanitizer_common/sanitizer_malloc_mac.inc b/lib/sanitizer_common/sanitizer_malloc_mac.inc
index 149857c168c6..6fbee07c16cc 100644
--- a/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -46,9 +46,45 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
// This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
mprotect(new_zone, allocated_size, PROT_READ);
}
+ // We're explicitly *NOT* registering the zone.
return new_zone;
}
+INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
+ COMMON_MALLOC_ENTER();
+ // We don't need to do anything here. We're not registering new zones, so we
+ // don't to unregister. Just un-mprotect and free() the zone.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
+ }
+ COMMON_MALLOC_FREE(zone);
+}
+
+extern unsigned malloc_num_zones;
+extern malloc_zone_t **malloc_zones;
+
+// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
+// libmalloc tries to set up a different zone as malloc_zones[0], it will call
+// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
+// make sure we are still the first (default) zone.
+INTERCEPTOR(int, mprotect, void *addr, size_t len, int prot) {
+ if (addr == malloc_zones && prot == PROT_READ) {
+ if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
+ for (unsigned i = 1; i < malloc_num_zones; i++) {
+ if (malloc_zones[i] == &sanitizer_zone) {
+ // Swap malloc_zones[0] and malloc_zones[i].
+ malloc_zones[i] = malloc_zones[0];
+ malloc_zones[0] = &sanitizer_zone;
+ break;
+ }
+ }
+ }
+ }
+ return REAL(mprotect)(addr, len, prot);
+}
+
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
COMMON_MALLOC_ENTER();
return &sanitizer_zone;
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
index 0ce23077a5ea..d9a8e8df1573 100644
--- a/lib/sanitizer_common/sanitizer_platform.h
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -168,7 +168,9 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if defined(__mips64) || defined(__aarch64__)
+# if SANITIZER_ANDROID && defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
@@ -247,4 +249,8 @@
#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
#endif // SANITIZER_PLATFORM_H
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
index a4afc0f12bdb..c4f90aec942e 100644
--- a/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -85,14 +85,7 @@
#define SANITIZER_INTERCEPT_MEMCMP 1
// FIXME: enable memmem on Windows.
#define SANITIZER_INTERCEPT_MEMMEM SI_NOT_WINDOWS
-// The function memchr() contains a jump in the first 6 bytes
-// that is problematic to intercept correctly on Win64.
-// Disable memchr() interception for Win64.
-#if SANITIZER_WINDOWS64
-#define SANITIZER_INTERCEPT_MEMCHR 0
-#else
#define SANITIZER_INTERCEPT_MEMCHR 1
-#endif
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
@@ -242,6 +235,7 @@
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
@@ -318,4 +312,8 @@
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_UTMP SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD
+#define SANITIZER_INTERCEPT_UTMPX SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
+
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_linux.cc b/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
index ed16f639c4e6..46e3b181304e 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -38,6 +38,7 @@
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
+#define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h>
#undef ino_t
@@ -55,6 +56,8 @@
#include <linux/perf_event.h>
#endif
+using namespace __sanitizer;
+
namespace __sanitizer {
#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
@@ -62,7 +65,8 @@ namespace __sanitizer {
} // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
- && !defined(__mips__) && !defined(__s390__)
+ && !defined(__mips__) && !defined(__s390__)\
+ && !defined(__sparc__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
index 137cd9a3c52f..fbde5e17dc63 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -51,6 +51,9 @@
#include <termios.h>
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#include <utmp.h>
+#endif
#if !SANITIZER_IOS
#include <net/route.h>
@@ -59,6 +62,7 @@
#if !SANITIZER_ANDROID
#include <sys/mount.h>
#include <sys/timeb.h>
+#include <utmpx.h>
#endif
#if SANITIZER_LINUX
@@ -284,6 +288,13 @@ namespace __sanitizer {
int shmctl_shm_stat = (int)SHM_STAT;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+#endif
+#if !SANITIZER_ANDROID
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+#endif
+
int map_fixed = MAP_FIXED;
int af_inet = (int)AF_INET;
@@ -937,6 +948,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
+using namespace __sanitizer;
+
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 14bc75046a54..477a0ecbe0e4 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -20,13 +20,17 @@
#if SANITIZER_FREEBSD
// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
-// incroporates the map structure.
+// incorporates the map structure.
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
#else
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
#endif // !SANITIZER_FREEBSD
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@@ -87,6 +91,14 @@ namespace __sanitizer {
#elif defined(__s390x__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__sparc__) && defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+ const unsigned struct_kernel_stat64_sz = 144;
+#elif defined(__sparc__) && !defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@@ -109,7 +121,7 @@ namespace __sanitizer {
#if defined(__powerpc64__) || defined(__s390__)
const unsigned struct___old_kernel_stat_sz = 0;
-#else
+#elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
#endif
@@ -194,6 +206,18 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
+#elif defined(__sparc__)
+#if defined(__arch64__)
+ unsigned mode;
+ unsigned short __pad1;
+#else
+ unsigned short __pad1;
+ unsigned short mode;
+ unsigned short __pad2;
+#endif
+ unsigned short __seq;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
unsigned int mode;
unsigned short __seq;
@@ -217,6 +241,26 @@ namespace __sanitizer {
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
+ #if defined(__sparc__)
+ #if !defined(__arch64__)
+ u32 __pad1;
+ #endif
+ long shm_atime;
+ #if !defined(__arch64__)
+ u32 __pad2;
+ #endif
+ long shm_dtime;
+ #if !defined(__arch64__)
+ u32 __pad3;
+ #endif
+ long shm_ctime;
+ uptr shm_segsz;
+ int shm_cpid;
+ int shm_lpid;
+ unsigned long shm_nattch;
+ unsigned long __glibc_reserved1;
+ unsigned long __glibc_reserved2;
+ #else
#ifndef __powerpc__
uptr shm_segsz;
#elif !defined(__powerpc64__)
@@ -254,6 +298,7 @@ namespace __sanitizer {
uptr __unused4;
uptr __unused5;
#endif
+#endif
};
#elif SANITIZER_FREEBSD
struct __sanitizer_ipc_perm {
@@ -588,7 +633,18 @@ namespace __sanitizer {
__sanitizer_sigset_t sa_mask;
#endif
#ifndef __mips__
+#if defined(__sparc__)
+#if __GLIBC_PREREQ (2, 20)
+ // On sparc glibc 2.19 and earlier sa_flags was unsigned long, and
+ // __glibc_reserved0 didn't exist.
+ int __glibc_reserved0;
int sa_flags;
+#else
+ unsigned long sa_flags;
+#endif
+#else
+ int sa_flags;
+#endif
#endif
#endif
#if SANITIZER_LINUX
@@ -607,7 +663,7 @@ namespace __sanitizer {
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
#elif defined(__mips__)
struct __sanitizer_kernel_sigset_t {
- u8 sig[16];
+ uptr sig[2];
};
#else
struct __sanitizer_kernel_sigset_t {
@@ -616,6 +672,17 @@ namespace __sanitizer {
#endif
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_MIPS
+ struct __sanitizer_kernel_sigaction_t {
+ unsigned int sa_flags;
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ __sanitizer_kernel_sigset_t sa_mask;
+ void (*sa_restorer)(void);
+ };
+#else
struct __sanitizer_kernel_sigaction_t {
union {
void (*handler)(int signo);
@@ -625,6 +692,7 @@ namespace __sanitizer {
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
+#endif
extern uptr sig_ign;
extern uptr sig_dfl;
@@ -794,6 +862,13 @@ namespace __sanitizer {
extern int shmctl_shm_stat;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ extern unsigned struct_utmp_sz;
+#endif
+#if !SANITIZER_ANDROID
+ extern unsigned struct_utmpx_sz;
+#endif
+
extern int map_fixed;
// ioctl arguments
@@ -839,7 +914,8 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
@@ -869,7 +945,16 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index f1e8b50a2cf6..dd62140b5e07 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -44,6 +44,8 @@
#define MAP_NORESERVE 0
#endif
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+
namespace __sanitizer {
u32 GetUid() {
@@ -54,8 +56,12 @@ uptr GetThreadSelf() {
return (uptr)pthread_self();
}
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
- madvise((void*)addr, size, MADV_DONTNEED);
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr page_size = GetPageSizeCached();
+ uptr beg_aligned = RoundUpTo(beg, page_size);
+ uptr end_aligned = RoundDownTo(end, page_size);
+ if (beg_aligned < end_aligned)
+ madvise((void*)beg_aligned, end_aligned - beg_aligned, MADV_DONTNEED);
}
void NoHugePagesInRegion(uptr addr, uptr size) {
@@ -126,11 +132,21 @@ void SleepForMillis(int millis) {
}
void Abort() {
+#if !SANITIZER_GO
+ // If we are handling SIGABRT, unhandle it first.
+ if (IsHandledDeadlySignal(SIGABRT)) {
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ internal_sigaction(SIGABRT, &sigact, nullptr);
+ }
+#endif
+
abort();
}
int Atexit(void (*function)(void)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return atexit(function);
#else
return 0;
@@ -141,7 +157,7 @@ bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
@@ -170,7 +186,6 @@ void UnsetAlternateSignalStack() {
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
if (!IsHandledDeadlySignal(signum))
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index 434ebb93dffa..f394e75b05e6 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -213,7 +213,7 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
-#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
+#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h
index 1fe59ab89532..5c26fb77e686 100644
--- a/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/lib/sanitizer_common/sanitizer_procmaps.h
@@ -35,8 +35,9 @@ class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
~MemoryMappingLayout();
- bool Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection);
+ bool Next(uptr *start, uptr *end, uptr *offset, char filename[],
+ uptr filename_size, uptr *protection, ModuleArch *arch = nullptr,
+ u8 *uuid = nullptr);
void Reset();
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
@@ -65,13 +66,15 @@ class MemoryMappingLayout {
static ProcSelfMapsBuff cached_proc_self_maps_;
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
# elif SANITIZER_MAC
- template<u32 kLCSegment, typename SegmentCommand>
- bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
+ template <u32 kLCSegment, typename SegmentCommand>
+ bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, char filename[],
+ uptr filename_size, ModuleArch *arch, u8 *uuid,
uptr *protection);
int current_image_;
u32 current_magic_;
u32 current_filetype_;
+ ModuleArch current_arch_;
+ u8 current_uuid_[kModuleUUIDSize];
int current_load_cmd_count_;
char *current_load_cmd_addr_;
# endif
diff --git a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc b/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
index 5011b1ff14b2..30216456330e 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc
@@ -50,7 +50,9 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
+ CHECK(!arch && "not implemented");
+ CHECK(!uuid && "not implemented");
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
diff --git a/lib/sanitizer_common/sanitizer_procmaps_linux.cc b/lib/sanitizer_common/sanitizer_procmaps_linux.cc
index b6fb7034ded4..fdf85b77a680 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_linux.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -28,7 +28,9 @@ static bool IsOneOf(char c, char c1, char c2) {
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
+ CHECK(!arch && "not implemented");
+ CHECK(!uuid && "not implemented");
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
diff --git a/lib/sanitizer_common/sanitizer_procmaps_mac.cc b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
index 417cc908e247..0dc299c0553a 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_mac.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -19,6 +19,20 @@
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
+// These are not available in older macOS SDKs.
+#ifndef CPU_SUBTYPE_X86_64_H
+#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7S
+#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7K
+#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
+
namespace __sanitizer {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
@@ -53,6 +67,8 @@ void MemoryMappingLayout::Reset() {
current_load_cmd_addr_ = 0;
current_magic_ = 0;
current_filetype_ = 0;
+ current_arch_ = kModuleArchUnknown;
+ internal_memset(current_uuid_, 0, kModuleUUIDSize);
}
// static
@@ -71,11 +87,12 @@ void MemoryMappingLayout::LoadFromCache() {
// and returns the start and end addresses and file offset of the corresponding
// segment.
// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- const char* lc = current_load_cmd_addr_;
+template <u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ ModuleArch *arch, u8 *uuid,
+ uptr *protection) {
+ const char *lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
@@ -97,14 +114,61 @@ bool MemoryMappingLayout::NextSegmentLoad(
internal_strncpy(filename, _dyld_get_image_name(current_image_),
filename_size);
}
+ if (arch) {
+ *arch = current_arch_;
+ }
+ if (uuid) {
+ internal_memcpy(uuid, current_uuid_, kModuleUUIDSize);
+ }
return true;
}
return false;
}
+ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
+ cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ switch (cputype) {
+ case CPU_TYPE_I386:
+ return kModuleArchI386;
+ case CPU_TYPE_X86_64:
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ CHECK(0 && "Invalid subtype of x86_64");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM:
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ CHECK(0 && "Invalid subtype of ARM");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM64:
+ return kModuleArchARM64;
+ default:
+ CHECK(0 && "Invalid CPU type");
+ return kModuleArchUnknown;
+ }
+}
+
+static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
+ const load_command *current_lc = first_lc;
+ while (1) {
+ if (current_lc->cmd == 0) return;
+ if (current_lc->cmd == LC_UUID) {
+ const uuid_command *uuid_lc = (const uuid_command *)current_lc;
+ const uint8_t *uuid = &uuid_lc->uuid[0];
+ internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
+ return;
+ }
+
+ current_lc =
+ (const load_command *)(((char *)current_lc) + current_lc->cmdsize);
+ }
+}
+
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
- uptr *protection) {
+ uptr *protection, ModuleArch *arch, u8 *uuid) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
@@ -113,6 +177,7 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
current_load_cmd_count_ = hdr->ncmds;
current_magic_ = hdr->magic;
current_filetype_ = hdr->filetype;
+ current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
switch (current_magic_) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
@@ -130,20 +195,24 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
}
}
+ FindUUID((const load_command *)current_load_cmd_addr_, &current_uuid_[0]);
+
for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
switch (current_magic_) {
// current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
+ start, end, offset, filename, filename_size, arch, uuid,
+ protection))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
+ start, end, offset, filename, filename_size, arch, uuid,
+ protection))
return true;
break;
}
@@ -159,9 +228,11 @@ void MemoryMappingLayout::DumpListOfModules(
InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, prot;
+ ModuleArch cur_arch;
+ u8 cur_uuid[kModuleUUIDSize];
InternalScopedString module_name(kMaxPathLength);
for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
- module_name.size(), &prot);
+ module_name.size(), &prot, &cur_arch, &cur_uuid[0]);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
@@ -173,7 +244,7 @@ void MemoryMappingLayout::DumpListOfModules(
} else {
modules->push_back(LoadedModule());
cur_module = &modules->back();
- cur_module->set(cur_name, cur_beg);
+ cur_module->set(cur_name, cur_beg, cur_arch, cur_uuid);
}
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
}
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
index ccc22bf0133c..ff8f3fa302da 100644
--- a/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -56,6 +56,7 @@ class Quarantine {
}
uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
+ uptr GetCacheSize() const { return max_cache_size_; }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
index 985193d1ed5e..214dda56df34 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.cc
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -153,9 +153,9 @@ StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
- uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
- IdDescPair::IdComparator);
- if (idx > map_.size())
+ uptr idx =
+ InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
}
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index 59ca927fa5b6..36c98d057bd3 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -82,4 +82,61 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
}
}
+static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
+ uptr module_name_len, uptr *pc_offset) {
+ const char *found_module_name = nullptr;
+ bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
+ pc, &found_module_name, pc_offset);
+
+ if (!ok) return false;
+
+ if (module_name && module_name_len) {
+ internal_strncpy(module_name, found_module_name, module_name_len);
+ module_name[module_name_len - 1] = '\x00';
+ }
+ return true;
+}
+
} // namespace __sanitizer
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size) return;
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ if (!frame) {
+ internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached());
+ RenderFrame(&frame_desc, fmt, 0, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, frame_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
+ char *out_buf, uptr out_buf_size) {
+ if (!out_buf_size) return;
+ out_buf[0] = 0;
+ DataInfo DI;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
+ InternalScopedString data_desc(GetPageSizeCached());
+ RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, data_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc( // NOLINT
+ uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
+ pc_offset);
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
index 669b0ba28265..6fba581bd964 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
@@ -116,6 +116,35 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", DI->line);
+ break;
+ case 'g':
+ buffer->append("%s", DI->name);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index 7f6c5c73b85d..7be1d1977dce 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -59,6 +59,13 @@ void RenderSourceLocation(InternalScopedString *buffer, const char *file,
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, const char *strip_path_prefix);
+// Same as RenderFrame, but for data section (global variables).
+// Accepts %s, %l from above.
+// Also accepts:
+// %g - name of the global variable.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix = "");
+
} // namespace __sanitizer
#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index 1f8861f0516b..eb4c403d3de0 100644
--- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -79,10 +79,10 @@
// thread-local variables used by libc will be shared between the tracer task
// and the thread which spawned it.
-COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
-
namespace __sanitizer {
+COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
StopTheWorldCallback callback;
@@ -190,6 +190,7 @@ void ThreadSuspender::KillAllThreads() {
bool ThreadSuspender::SuspendAllThreads() {
ThreadLister thread_lister(pid_);
bool added_threads;
+ bool first_iteration = true;
do {
// Run through the directory entries once.
added_threads = false;
@@ -199,12 +200,13 @@ bool ThreadSuspender::SuspendAllThreads() {
added_threads = true;
tid = thread_lister.GetNextTID();
}
- if (thread_lister.error()) {
+ if (thread_lister.error() || (first_iteration && !added_threads)) {
// Detach threads and fail.
ResumeAllThreads();
return false;
}
thread_lister.Reset();
+ first_iteration = false;
} while (added_threads);
return true;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 36b4fa91f545..31506fe5c834 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -242,25 +242,21 @@ static const char *ParseFileLineInfo(AddressInfo *info, const char *str) {
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
- // Parse the last :<int>, which must be there.
- char *last_colon = internal_strrchr(file_line_info, ':');
- CHECK(last_colon);
- int line_or_column = internal_atoll(last_colon + 1);
- // Truncate the string at the last colon and find the next-to-last colon.
- *last_colon = '\0';
- last_colon = internal_strrchr(file_line_info, ':');
- if (last_colon && IsDigit(last_colon[1])) {
- // If the second-to-last colon is followed by a digit, it must be the line
- // number, and the previous parsed number was a column.
- info->line = internal_atoll(last_colon + 1);
- info->column = line_or_column;
- *last_colon = '\0';
- } else {
- // Otherwise, we have line info but no column info.
- info->line = line_or_column;
- info->column = 0;
+
+ if (uptr size = internal_strlen(file_line_info)) {
+ char *back = file_line_info + size - 1;
+ for (int i = 0; i < 2; ++i) {
+ while (back > file_line_info && IsDigit(*back)) --back;
+ if (*back != ':' || !IsDigit(back[1])) break;
+ info->column = info->line;
+ info->line = internal_atoll(back + 1);
+ // Truncate the string at the colon to keep only filename.
+ *back = '\0';
+ --back;
+ }
+ ExtractToken(file_line_info, "", &info->file);
}
- ExtractToken(file_line_info, "", &info->file);
+
InternalFree(file_line_info);
return str;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
index d591abca15df..f08cb9f97cf9 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -19,8 +19,6 @@
#include "sanitizer_mac.h"
#include "sanitizer_symbolizer_mac.h"
-namespace __sanitizer {
-
#include <dlfcn.h>
#include <errno.h>
#include <stdlib.h>
@@ -28,12 +26,15 @@ namespace __sanitizer {
#include <unistd.h>
#include <util.h>
+namespace __sanitizer {
+
bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
- stack->info.function = demangled ? internal_strdup(demangled) : nullptr;
+ if (!demangled) return false;
+ stack->info.function = internal_strdup(demangled);
return true;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index 7028da656e15..f50d8b1840ab 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -55,7 +55,7 @@ const char *DemangleCXXABI(const char *name) {
// own demangler (libc++abi's implementation could be adapted so that
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
- if (__cxxabiv1::__cxa_demangle)
+ if (&__cxxabiv1::__cxa_demangle)
if (const char *demangled_name =
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
@@ -101,6 +101,46 @@ const char *DemangleSwiftAndCXX(const char *name) {
return DemangleCXXABI(name);
}
+static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+ infd_[0] = infd[0];
+ infd_[1] = infd[1];
+ outfd_[0] = outfd[0];
+ outfd_[1] = outfd[1];
+ return true;
+}
+
bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (!FileExists(path_)) {
if (!reported_invalid_path_) {
@@ -110,7 +150,18 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
return false;
}
- int pid;
+ int pid = -1;
+
+ int infd[2];
+ internal_memset(&infd, 0, sizeof(infd));
+ int outfd[2];
+ internal_memset(&outfd, 0, sizeof(outfd));
+ if (!CreateTwoHighNumberedPipes(infd, outfd)) {
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ }
+
if (use_forkpty_) {
#if SANITIZER_MAC
fd_t fd = kInvalidFd;
@@ -121,6 +172,10 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
int saved_stderr = dup(STDERR_FILENO);
CHECK_GE(saved_stderr, 0);
+ // We only need one pipe, for stdin of the child.
+ close(outfd[0]);
+ close(outfd[1]);
+
// Use forkpty to disable buffering in the new terminal.
pid = internal_forkpty(&fd);
if (pid == -1) {
@@ -131,6 +186,13 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
} else if (pid == 0) {
// Child subprocess.
+ // infd[0] is the child's reading end.
+ close(infd[1]);
+
+ // Set up stdin to read from the pipe.
+ CHECK_GE(dup2(infd[0], STDIN_FILENO), 0);
+ close(infd[0]);
+
// Restore stderr.
CHECK_GE(dup2(saved_stderr, STDERR_FILENO), 0);
close(saved_stderr);
@@ -141,8 +203,12 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
internal__exit(1);
}
+ // Input for the child, infd[1] is our writing end.
+ output_fd_ = infd[1];
+ close(infd[0]);
+
// Continue execution in parent process.
- input_fd_ = output_fd_ = fd;
+ input_fd_ = fd;
close(saved_stderr);
@@ -156,41 +222,6 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
UNIMPLEMENTED();
#endif // SANITIZER_MAC
} else {
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
- }
- }
- CHECK(infd);
- CHECK(outfd);
-
const char *argv[kArgVMax];
GetArgV(path_, argv);
pid = StartSubprocess(path_, argv, /* stdin */ outfd[0],
@@ -205,6 +236,8 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
output_fd_ = outfd[1];
}
+ CHECK_GT(pid, 0);
+
// Check that symbolizer subprocess started successfully.
SleepForMillis(kSymbolizerStartupTimeMillis);
if (!IsProcessRunning(pid)) {
@@ -463,7 +496,9 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ if (IsReportingOOM()) {
+ VReport(2, "Cannot use internal symbolizer: out of memory\n");
+ } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
index 3cb7e487012d..135823b157de 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -14,15 +14,24 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <dbghelp.h>
-#pragma comment(lib, "dbghelp.lib")
+#include "sanitizer_dbghelp.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
+decltype(::StackWalk64) *StackWalk64;
+decltype(::SymCleanup) *SymCleanup;
+decltype(::SymFromAddr) *SymFromAddr;
+decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+decltype(::SymInitialize) *SymInitialize;
+decltype(::SymSetOptions) *SymSetOptions;
+decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
namespace {
class WinSymbolizerTool : public SymbolizerTool {
@@ -50,6 +59,29 @@ bool TrySymInitialize() {
void InitializeDbgHelpIfNeeded() {
if (is_dbghelp_initialized)
return;
+
+ HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
+ CHECK(dbghelp && "failed to load dbghelp.dll");
+
+#define DBGHELP_IMPORT(name) \
+ do { \
+ name = \
+ reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \
+ CHECK(name != nullptr); \
+ } while (0)
+ DBGHELP_IMPORT(StackWalk64);
+ DBGHELP_IMPORT(SymCleanup);
+ DBGHELP_IMPORT(SymFromAddr);
+ DBGHELP_IMPORT(SymFunctionTableAccess64);
+ DBGHELP_IMPORT(SymGetLineFromAddr64);
+ DBGHELP_IMPORT(SymGetModuleBase64);
+ DBGHELP_IMPORT(SymGetSearchPathW);
+ DBGHELP_IMPORT(SymInitialize);
+ DBGHELP_IMPORT(SymSetOptions);
+ DBGHELP_IMPORT(SymSetSearchPathW);
+ DBGHELP_IMPORT(UnDecorateSymbolName);
+#undef DBGHELP_IMPORT
+
if (!TrySymInitialize()) {
// OK, maybe the client app has called SymInitialize already.
// That's a bit unfortunate for us as all the DbgHelp functions are
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc
index 6e7ddfa64d4b..c2b75e652ce9 100644
--- a/lib/sanitizer_common/sanitizer_thread_registry.cc
+++ b/lib/sanitizer_common/sanitizer_thread_registry.cc
@@ -131,7 +131,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
#else
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index f762731cb639..c4a57f0870cc 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -18,15 +18,16 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <windows.h>
-#include <dbghelp.h>
#include <io.h>
#include <psapi.h>
#include <stdlib.h>
#include "sanitizer_common.h"
+#include "sanitizer_dbghelp.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
@@ -173,10 +174,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
(void)name; // unsupported
-#if SANITIZER_WINDOWS64
- // On Windows64, use MEM_COMMIT would result in error
+#if !SANITIZER_GO && SANITIZER_WINDOWS64
+ // On asan/Windows64, use MEM_COMMIT would result in error
// 1455:ERROR_COMMITMENT_LIMIT.
- // We use exception handler to commit page on demand.
+ // Asan uses exception handler to commit page on demand.
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
#else
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
@@ -220,8 +221,12 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
}
void *MmapNoAccess(uptr size) {
- // FIXME: unsupported.
- return nullptr;
+ void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes (error code: %d)\n",
+ SanitizerToolName, size, size, GetLastError());
+ return res;
}
bool MprotectNoAccess(uptr addr, uptr size) {
@@ -229,14 +234,13 @@ bool MprotectNoAccess(uptr addr, uptr size) {
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
-
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
// This is almost useless on 32-bits.
// FIXME: add madvise-analog when we move to 64-bits.
}
void NoHugePagesInRegion(uptr addr, uptr size) {
- // FIXME: probably similar to FlushUnneededShadowMemory.
+ // FIXME: probably similar to ReleaseMemoryToOS.
}
void DontDumpShadowMemory(uptr addr, uptr length) {
@@ -244,6 +248,26 @@ void DontDumpShadowMemory(uptr addr, uptr length) {
// FIXME: add madvise-analog when we move to 64-bits.
}
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ uptr address = 0;
+ while (true) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return 0;
+
+ if (info.State == MEM_FREE) {
+ uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
+ alignment);
+ if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
+ return shadow_address;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ }
+ return 0;
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MEMORY_BASIC_INFORMATION mbi;
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
@@ -309,7 +333,7 @@ struct ModuleInfo {
uptr end_address;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int CompareModulesBase(const void *pl, const void *pr) {
const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
if (l->base_address < r->base_address)
@@ -319,7 +343,7 @@ int CompareModulesBase(const void *pl, const void *pr) {
#endif
} // namespace
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void DumpProcessMap() {
Report("Dumping process modules:\n");
ListOfModules modules;
@@ -329,8 +353,8 @@ void DumpProcessMap() {
InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
for (size_t i = 0; i < num_modules; ++i) {
module_infos[i].filepath = modules[i].full_name();
- module_infos[i].base_address = modules[i].base_address();
- module_infos[i].end_address = modules[i].ranges().front()->end;
+ module_infos[i].base_address = modules[i].ranges().front()->beg;
+ module_infos[i].end_address = modules[i].ranges().back()->end;
}
qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
CompareModulesBase);
@@ -400,12 +424,10 @@ u64 NanoTime() {
}
void Abort() {
- if (::IsDebuggerPresent())
- __debugbreak();
internal__exit(3);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
// load the image at this address. Therefore, we call it the preferred base. Any
@@ -631,7 +653,13 @@ uptr internal_sched_yield() {
}
void internal__exit(int exitcode) {
- ExitProcess(exitcode);
+ // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
+ // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
+ // so add our own breakpoint here.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ TerminateProcess(GetCurrentProcess(), exitcode);
+ __assume(0);
}
uptr internal_ftruncate(fd_t fd, uptr size) {
@@ -698,7 +726,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
@@ -735,6 +763,8 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
+ InitializeDbgHelpIfNeeded();
+
size = 0;
#if defined(_WIN64)
int machine_type = IMAGE_FILE_MACHINE_AMD64;
@@ -751,8 +781,8 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
- &SymGetModuleBase64, NULL) &&
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
@@ -842,6 +872,10 @@ SignalContext SignalContext::Create(void *siginfo, void *context) {
write_flag);
}
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
// FIXME: Actually implement this function.
CHECK_GT(buf_len, 0);
@@ -882,6 +916,24 @@ bool IsProcessRunning(pid_t pid) {
int WaitForProcess(pid_t pid) { return -1; }
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+
} // namespace __sanitizer
+#if !SANITIZER_GO
+// Workaround to implement weak hooks on Windows. COFF doesn't directly support
+// weak symbols, but it does support /alternatename, which is similar. If the
+// user does not override the hook, we will use this default definition instead
+// of null.
+extern "C" void __sanitizer_print_memory_profile(int top_percent) {}
+
+#ifdef _WIN64
+#pragma comment(linker, "/alternatename:__sanitizer_print_memory_profile=__sanitizer_default_print_memory_profile") // NOLINT
+#else
+#pragma comment(linker, "/alternatename:___sanitizer_print_memory_profile=___sanitizer_default_print_memory_profile") // NOLINT
+#endif
+#endif
+
#endif // _WIN32
diff --git a/lib/sanitizer_common/scripts/gen_dynamic_list.py b/lib/sanitizer_common/scripts/gen_dynamic_list.py
index b8b79b5994b7..1d4230607b92 100755
--- a/lib/sanitizer_common/scripts/gen_dynamic_list.py
+++ b/lib/sanitizer_common/scripts/gen_dynamic_list.py
@@ -19,6 +19,7 @@ import os
import re
import subprocess
import sys
+import platform
new_delete = set([
'_Znam', '_ZnamRKSt9nothrow_t', # operator new[](unsigned long)
@@ -42,14 +43,15 @@ versioned_functions = set(['memcpy', 'pthread_attr_getaffinity_np',
def get_global_functions(library):
functions = []
- nm_proc = subprocess.Popen(['nm', library], stdout=subprocess.PIPE,
+ nm = os.environ.get('NM', 'nm')
+ nm_proc = subprocess.Popen([nm, library], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
nm_out = nm_proc.communicate()[0].decode().split('\n')
if nm_proc.returncode != 0:
- raise subprocess.CalledProcessError(nm_proc.returncode, 'nm')
+ raise subprocess.CalledProcessError(nm_proc.returncode, nm)
func_symbols = ['T', 'W']
# On PowerPC, nm prints function descriptors from .data section.
- if os.uname()[4] in ["powerpc", "ppc64"]:
+ if platform.uname()[4] in ["powerpc", "ppc64"]:
func_symbols += ['D']
for line in nm_out:
cols = line.split(' ')
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
new file mode 100644
index 000000000000..bd315a0c9bd4
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
@@ -0,0 +1,72 @@
+//===-- sanitizer_symbolize.cc ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of weak hooks from sanitizer_symbolizer_posix_libcdep.cc.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdio.h>
+#include <string>
+
+#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
+#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+
+static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
+ static llvm::symbolize::LLVMSymbolizer DefaultSymbolizer;
+ return &DefaultSymbolizer;
+}
+
+namespace __sanitizer {
+int internal_snprintf(char *buffer, unsigned long length, const char *format,
+ ...);
+} // namespace __sanitizer
+
+extern "C" {
+
+typedef uint64_t u64;
+
+bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ auto ResOrErr =
+ getDefaultSymbolizer()->symbolizeInlinedCode(ModuleName, ModuleOffset);
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ }
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return true;
+}
+
+bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ auto ResOrErr =
+ getDefaultSymbolizer()->symbolizeData(ModuleName, ModuleOffset);
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ }
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return true;
+}
+
+void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
+
+int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength) {
+ std::string Result =
+ llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
+ __sanitizer::internal_snprintf(Buffer, MaxLength, "%s", Result.c_str());
+ return static_cast<int>(Result.size() + 1);
+}
+
+} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
new file mode 100644
index 000000000000..0a796d91a3d0
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
@@ -0,0 +1,175 @@
+//===-- sanitizer_wrappers.cc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Redirect some functions to sanitizer interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <tuple>
+
+// Need to match ../sanitizer_common/sanitizer_internal_defs.h
+#if defined(ARCH_PPC)
+#define OFF_T unsigned long
+#else
+#define OFF_T unsigned long long
+#endif
+
+namespace __sanitizer {
+unsigned long internal_open(const char *filename, int flags);
+unsigned long internal_open(const char *filename, int flags, unsigned mode);
+unsigned long internal_close(int fd);
+unsigned long internal_stat(const char *path, void *buf);
+unsigned long internal_lstat(const char *path, void *buf);
+unsigned long internal_fstat(int fd, void *buf);
+size_t internal_strlen(const char *s);
+unsigned long internal_mmap(void *addr, unsigned long length, int prot,
+ int flags, int fd, OFF_T offset);
+void *internal_memcpy(void *dest, const void *src, unsigned long n);
+// Used to propagate errno.
+bool internal_iserror(unsigned long retval, int *rverrno = 0);
+} // namespace __sanitizer
+
+namespace {
+
+template <typename T>
+struct GetTypes;
+
+template <typename R, typename... Args>
+struct GetTypes<R(Args...)> {
+ using Result = R;
+ template <size_t i>
+ struct Arg {
+ using Type = typename std::tuple_element<i, std::tuple<Args...>>::type;
+ };
+};
+
+#define LLVM_SYMBOLIZER_GET_FUNC(Function) \
+ ((__interceptor_##Function) \
+ ? (__interceptor_##Function) \
+ : reinterpret_cast<decltype(&Function)>(dlsym(RTLD_NEXT, #Function)))
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR1(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR2(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR3(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR4(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type arg3) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2, arg3); \
+ }
+
+} // namespace
+
+// C-style interface around internal sanitizer libc functions.
+extern "C" {
+
+#define RETURN_OR_SET_ERRNO(T, res) \
+ int rverrno; \
+ if (__sanitizer::internal_iserror(res, &rverrno)) { \
+ errno = rverrno; \
+ return (T)-1; \
+ } \
+ return (T)res;
+
+int open(const char *filename, int flags, ...) {
+ unsigned long res;
+ if (flags | O_CREAT) {
+ va_list va;
+ va_start(va, flags);
+ unsigned mode = va_arg(va, unsigned);
+ va_end(va);
+ res = __sanitizer::internal_open(filename, flags, mode);
+ } else {
+ res = __sanitizer::internal_open(filename, flags);
+ }
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+int close(int fd) {
+ unsigned long res = __sanitizer::internal_close(fd);
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+#define STAT(func, arg, buf) \
+ unsigned long res = __sanitizer::internal_##func(arg, buf); \
+ RETURN_OR_SET_ERRNO(int, res);
+
+int stat(const char *path, struct stat *buf) { STAT(stat, path, buf); }
+
+int lstat(const char *path, struct stat *buf) { STAT(lstat, path, buf); }
+
+int fstat(int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+// Redirect versioned stat functions to the __sanitizer::internal() as well.
+int __xstat(int version, const char *path, struct stat *buf) {
+ STAT(stat, path, buf);
+}
+
+int __lxstat(int version, const char *path, struct stat *buf) {
+ STAT(lstat, path, buf);
+}
+
+int __fxstat(int version, int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+size_t strlen(const char *s) { return __sanitizer::internal_strlen(s); }
+
+void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ unsigned long res = __sanitizer::internal_mmap(
+ addr, (unsigned long)length, prot, flags, fd, (unsigned long long)offset);
+ RETURN_OR_SET_ERRNO(void *, res);
+}
+
+LLVM_SYMBOLIZER_INTERCEPTOR3(read, ssize_t(int, void *, size_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread, ssize_t(int, void *, size_t, off_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread64, ssize_t(int, void *, size_t, off64_t))
+LLVM_SYMBOLIZER_INTERCEPTOR2(realpath, char *(const char *, char *))
+
+} // extern "C"
diff --git a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
new file mode 100755
index 000000000000..07239eb50587
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -0,0 +1,187 @@
+#!/bin/bash -eu
+#
+# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \
+# build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/
+# zlib can be downloaded from from http://www.zlib.net.
+#
+# Script compiles self-contained object file with symbolization code and injects
+# it into the given set of runtime libraries. Script updates only libraries
+# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.
+# Object file is be compiled from LLVM sources with dependencies like libc++ and
+# zlib. Then it internalizes symbols in the file, so that it can be linked
+# into arbitrary programs, avoiding conflicts with the program own symbols and
+# avoiding dependencies on any program symbols. The only acceptable dependencies
+# are libc and __sanitizer::internal_* from sanitizer runtime.
+#
+# Symbols exported by the object file will be used by Sanitizer runtime
+# libraries to symbolize code/data in-process.
+#
+# The script will modify the output directory which is given as the first
+# argument to the script.
+#
+# FIXME: We should really be using a simpler approach to building this object
+# file, and it should be available as a regular cmake rule. Conceptually, we
+# want to be doing "ld -r" followed by "objcopy -G" to create a relocatable
+# object file with only our entry points exposed. However, this does not work at
+# present, see PR30750.
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+SRC_DIR=$(readlink -f $SCRIPT_DIR/..)
+TARGE_DIR=$(readlink -f $1)
+
+LLVM_SRC="${LLVM_SRC:-$SCRIPT_DIR/../../../../../..}"
+LLVM_SRC=$(readlink -f $LLVM_SRC)
+
+if [[ ! -d "${LLVM_SRC}/projects/libcxxabi" ||
+ ! -d "${LLVM_SRC}/projects/libcxx" ]]; then
+ echo "Missing or incomplete LLVM_SRC"
+ exit 1
+fi
+
+if [[ "$ZLIB_SRC" == "" ||
+ ! -x "${ZLIB_SRC}/configure" ||
+ ! -f "${ZLIB_SRC}/zlib.h" ]]; then
+ echo "Missing or incomplete ZLIB_SRC"
+ exit 1
+fi
+ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+
+J="${J:-50}"
+
+CLANG="${CLANG:-`which clang`}"
+CLANG_DIR=$(readlink -f $(dirname "$CLANG"))
+
+BUILD_DIR=$(readlink -f ./symbolizer)
+mkdir -p $BUILD_DIR
+cd $BUILD_DIR
+
+CC=$CLANG_DIR/clang
+CXX=$CLANG_DIR/clang++
+TBLGEN=$CLANG_DIR/llvm-tblgen
+LINK=$CLANG_DIR/llvm-link
+OPT=$CLANG_DIR/opt
+AR=$CLANG_DIR/llvm-ar
+
+for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
+ if [[ ! -x "$F" ]]; then
+ echo "Missing $F"
+ exit 1
+ fi
+done
+
+ZLIB_BUILD=${BUILD_DIR}/zlib
+LIBCXX_BUILD=${BUILD_DIR}/libcxx
+LLVM_BUILD=${BUILD_DIR}/llvm
+SYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer
+
+FLAGS=${FLAGS:-}
+FLAGS="$FLAGS -fPIC -flto -Os -g0 -DNDEBUG"
+
+# Build zlib.
+mkdir -p ${ZLIB_BUILD}
+cd ${ZLIB_BUILD}
+cp -r ${ZLIB_SRC}/* .
+CC=$CC CFLAGS="$FLAGS" RANLIB=/bin/true ./configure --static
+make -j${J} libz.a
+
+# Build and install libcxxabi and libcxx.
+if [[ ! -d ${LIBCXX_BUILD} ]]; then
+ mkdir -p ${LIBCXX_BUILD}
+ cd ${LIBCXX_BUILD}
+ LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined -I${LLVM_SRC}/projects/libcxxabi/include"
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
+ -DLIBCXXABI_ENABLE_THREADS=OFF \
+ -DLIBCXX_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXX_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXX_ENABLE_RTTI=OFF \
+ -DLIBCXX_ENABLE_SHARED=OFF \
+ -DLIBCXX_ENABLE_THREADS=OFF \
+ $LLVM_SRC
+fi
+cd ${LIBCXX_BUILD}
+ninja cxx cxxabi
+
+FLAGS="${FLAGS} -fno-rtti -fno-exceptions"
+
+# Build LLVM.
+if [[ ! -d ${LLVM_BUILD} ]]; then
+ mkdir -p ${LLVM_BUILD}
+ cd ${LLVM_BUILD}
+ LLVM_FLAGS="${FLAGS} -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1"
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DLLVM_TABLEGEN=$TBLGEN \
+ -DLLVM_ENABLE_ZLIB=ON \
+ -DLLVM_ENABLE_TERMINFO=OFF \
+ -DLLVM_ENABLE_THREADS=OFF \
+ $LLVM_SRC
+fi
+cd ${LLVM_BUILD}
+ninja LLVMSymbolize LLVMObject LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC
+
+cd ${BUILD_DIR}
+rm -rf ${SYMBOLIZER_BUILD}
+mkdir ${SYMBOLIZER_BUILD}
+cd ${SYMBOLIZER_BUILD}
+
+for A in $LIBCXX_BUILD/lib/libc++.a \
+ $LIBCXX_BUILD/lib/libc++abi.a \
+ $LLVM_BUILD/lib/libLLVMSymbolize.a \
+ $LLVM_BUILD/lib/libLLVMObject.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
+ $LLVM_BUILD/lib/libLLVMSupport.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
+ $LLVM_BUILD/lib/libLLVMMC.a \
+ $ZLIB_BUILD/libz.a ; do
+ for O in $($AR t $A); do
+ $AR x $A $O
+ mv -f $O "$(basename $A).$O" # Rename to avoid collisions between libs.
+ done
+done
+
+echo "Compiling..."
+SYMBOLIZER_FLAGS="$FLAGS -std=c++11 -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -I${LIBCXX_BUILD}/include/c++/v1"
+$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cc ${SRC_DIR}/sanitizer_wrappers.cc -c
+
+SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
+
+# Merge all the object files together and copy the resulting library back.
+$LINK *.o -o all.bc
+echo "Optimizing..."
+$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
+$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o
+
+echo "Checking undefined symbols..."
+nm -f posix -g symbolizer.o | cut -f 1,2 -d \ | LC_COLLATE=C sort -u > undefined.new
+(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E "^\+[^+]") && \
+ (echo "Failed: unexpected symbols"; exit 1)
+
+arch() {
+ objdump -f $1 | grep -m1 -Po "(?<=file format ).*$"
+}
+
+SYMBOLIZER_FORMAT=$(arch symbolizer.o)
+echo "Injecting $SYMBOLIZER_FORMAT symbolizer..."
+for A in $TARGE_DIR/libclang_rt.*san*.a; do
+ A_FORMAT=$(arch $A)
+ if [[ "$A_FORMAT" != "$SYMBOLIZER_FORMAT" ]] ; then
+ continue
+ fi
+ (nm -u $A 2>/dev/null | grep -E "__sanitizer_symbolize_code" >/dev/null) || continue
+ echo "$A"
+ $AR rcs $A symbolizer.o
+done
+
+echo "Success!"
diff --git a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
new file mode 100644
index 000000000000..033acf7f202a
--- /dev/null
+++ b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -0,0 +1,137 @@
+_GLOBAL_OFFSET_TABLE_ U
+_ZN11__sanitizer13internal_mmapEPvmiiiy U
+_ZN11__sanitizer13internal_openEPKcij U
+_ZN11__sanitizer13internal_statEPKcPv U
+_ZN11__sanitizer14internal_closeEi U
+_ZN11__sanitizer14internal_fstatEiPv U
+_ZN11__sanitizer14internal_lstatEPKcPv U
+_ZN11__sanitizer15internal_strlenEPKc U
+_ZN11__sanitizer16internal_iserrorEmPi U
+_ZN11__sanitizer17internal_snprintfEPcmPKcz U
+__ctype_b_loc U
+__ctype_get_mb_cur_max U
+__cxa_atexit U
+__divdi3 U
+__dso_handle U
+__errno_location U
+__interceptor_pread w
+__interceptor_read w
+__interceptor_realpath w
+__moddi3 U
+__sanitizer_symbolize_code T
+__sanitizer_symbolize_data T
+__sanitizer_symbolize_demangle T
+__sanitizer_symbolize_flush T
+__strdup U
+__udivdi3 U
+__umoddi3 U
+_exit U
+abort U
+access U
+calloc U
+catclose U
+catgets U
+catopen U
+ceil U
+clock_gettime U
+cfgetospeed U
+dl_iterate_phdr U
+dlsym U
+dup2 U
+environ U
+execv U
+exit U
+fclose U
+fflush U
+fileno U
+fopen U
+fork U
+fprintf U
+fputc U
+free U
+freelocale U
+fwrite U
+getc U
+getcwd U
+getenv U
+getpagesize U
+getpid U
+gettimeofday U
+ioctl U
+isatty U
+isprint U
+isupper U
+isxdigit U
+log10 U
+lseek U
+lseek64 U
+malloc U
+mbrlen U
+mbrtowc U
+mbsnrtowcs U
+mbsrtowcs U
+mbtowc U
+memchr U
+memcmp U
+memcpy U
+memmove U
+memset U
+mkdir U
+munmap U
+newlocale U
+perror U
+posix_spawn U
+posix_spawn_file_actions_adddup2 U
+posix_spawn_file_actions_addopen U
+posix_spawn_file_actions_destroy U
+posix_spawn_file_actions_init U
+qsort U
+rand U
+readlink U
+realloc U
+remove U
+setvbuf U
+sigfillset U
+sigprocmask U
+snprintf U
+sprintf U
+srand U
+sscanf U
+stderr U
+stdin U
+stdout U
+strcat U
+strchr U
+strcmp U
+strcpy U
+strdup U
+strerror U
+strerror_r U
+strftime_l U
+strncmp U
+strncpy U
+strrchr U
+strsep U
+strtod_l U
+strtof_l U
+strtol U
+strtold_l U
+strtoll_l U
+strtoull_l U
+tcgetattr U
+uname U
+ungetc U
+unlink U
+uselocale U
+vasprintf U
+vfprintf U
+vsnprintf U
+vsscanf U
+waitpid U
+wcrtomb U
+wcslen U
+wcsnrtombs U
+wmemcpy U
+wmemmove U
+wmemset U
+write U
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
index 0a828dc13486..b66f7563b01e 100644
--- a/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -78,6 +78,11 @@ if(ANDROID)
list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON -pie)
endif()
+if(APPLE)
+ list(APPEND SANITIZER_TEST_CFLAGS_COMMON ${DARWIN_osx_CFLAGS})
+ list(APPEND SANITIZER_TEST_LINK_FLAGS_COMMON ${DARWIN_osx_LINKFLAGS})
+endif()
+
# MSVC linker is allocating 1M for the stack by default, which is not
# enough for the unittests. Some unittests require more than 2M.
# The default stack size for clang is 8M.
diff --git a/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc b/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc
new file mode 100644
index 000000000000..3e03c4bddfd7
--- /dev/null
+++ b/lib/sanitizer_common/tests/malloc_stress_transfer_test.cc
@@ -0,0 +1,37 @@
+#include <thread>
+#include <iostream>
+
+const size_t kAllocSize = 16;
+const size_t kInitialNumAllocs = 1 << 10;
+const size_t kPeriodicNumAllocs = 1 << 10;
+const size_t kNumIterations = 1 << 7;
+const size_t kNumThreads = 16;
+
+void Thread() {
+ // int sp;
+ // std::cerr << "Thread starting, sp = " << &sp << std::endl;
+ char *InitialAllocations[kInitialNumAllocs];
+ char *PeriodicaAllocations[kPeriodicNumAllocs];
+ for (auto &p : InitialAllocations) p = new char[kAllocSize];
+ for (size_t i = 0; i < kNumIterations; i++) {
+ for (size_t j = 0; j < kPeriodicNumAllocs; j++) {
+ for (auto &p : PeriodicaAllocations) {
+ p = new char[kAllocSize];
+ *p = 0;
+ }
+ for (auto p : PeriodicaAllocations) delete [] p;
+ }
+ }
+ for (auto p : InitialAllocations) delete [] p;
+}
+
+int main() {
+// Thread();
+// return 0;
+ std::thread *Threads[kNumThreads];
+ for (auto &T : Threads) T = new std::thread(&Thread);
+ for (auto T : Threads) {
+ T->join();
+ delete T;
+ }
+}
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 31eec19c3632..8df5efda674e 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -25,25 +25,73 @@
#include <vector>
#include <set>
+using namespace __sanitizer;
+
// Too slow for debug build
#if !SANITIZER_DEBUG
#if SANITIZER_CAN_USE_ALLOCATOR64
#if SANITIZER_WINDOWS
-static const uptr kAllocatorSpace = 0x10000000000ULL;
-static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-static const u64 kAddressSpaceSize = 1ULL << 40;
+// On Windows 64-bit there is no easy way to find a large enough fixed address
+// space that is always available. Thus, a dynamically allocated address space
+// is used instead (i.e. ~(uptr)0).
+static const uptr kAllocatorSpace = ~(uptr)0;
+static const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_ANDROID && defined(__aarch64__)
+static const uptr kAllocatorSpace = 0x3000000000ULL;
+static const uptr kAllocatorSize = 0x2000000000ULL;
+static const u64 kAddressSpaceSize = 1ULL << 39;
+typedef VeryCompactSizeClassMap SizeClassMap;
#else
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
#endif
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
+struct AP64 { // Allocator Params. Short name for shorter demangled names..
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+struct AP64Dyn {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+struct AP64Compact {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef CompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+struct AP64VeryCompact {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = 1ULL << 37;
+ static const uptr kMetadataSize = 16;
+ typedef VeryCompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+
+typedef SizeClassAllocator64<AP64> Allocator64;
+typedef SizeClassAllocator64<AP64Dyn> Allocator64Dynamic;
+typedef SizeClassAllocator64<AP64Compact> Allocator64Compact;
+typedef SizeClassAllocator64<AP64VeryCompact> Allocator64VeryCompact;
#elif defined(__mips64)
static const u64 kAddressSpaceSize = 1ULL << 40;
#elif defined(__aarch64__)
@@ -70,7 +118,7 @@ typedef SizeClassAllocator32<
template <class SizeClassMap>
void TestSizeClassMap() {
typedef SizeClassMap SCMap;
- // SCMap::Print();
+ SCMap::Print();
SCMap::Validate();
}
@@ -82,6 +130,10 @@ TEST(SanitizerCommon, CompactSizeClassMap) {
TestSizeClassMap<CompactSizeClassMap>();
}
+TEST(SanitizerCommon, VeryCompactSizeClassMap) {
+ TestSizeClassMap<VeryCompactSizeClassMap>();
+}
+
TEST(SanitizerCommon, InternalSizeClassMap) {
TestSizeClassMap<InternalSizeClassMap>();
}
@@ -89,13 +141,15 @@ TEST(SanitizerCommon, InternalSizeClassMap) {
template <class Allocator>
void TestSizeClassAllocator() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
- static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
- 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
+ static const uptr sizes[] = {
+ 1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
+ };
std::vector<void *> allocated;
@@ -154,15 +208,29 @@ void TestSizeClassAllocator() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64) {
TestSizeClassAllocator<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
+ TestSizeClassAllocator<Allocator64Dynamic>();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
}
#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
+ TestSizeClassAllocator<Allocator64VeryCompact>();
+}
+#endif
+#endif
+
TEST(SanitizerCommon, SizeClassAllocator32Compact) {
TestSizeClassAllocator<Allocator32Compact>();
}
@@ -170,7 +238,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Compact) {
template <class Allocator>
void SizeClassAllocatorMetadataStress() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -179,7 +247,7 @@ void SizeClassAllocatorMetadataStress() {
void *allocated[kNumAllocs];
void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = cache.Allocate(a, 1 + i % 50);
+ void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
allocated[i] = x;
meta[i] = a->GetMetaData(x);
}
@@ -190,7 +258,7 @@ void SizeClassAllocatorMetadataStress() {
EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(a, 1 + i % 50, allocated[i]);
+ cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
}
a->TestOnlyUnmap();
@@ -198,31 +266,41 @@ void SizeClassAllocatorMetadataStress() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
+ SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
+#endif
+
+#endif
#endif // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
}
template <class Allocator>
-void SizeClassAllocatorGetBlockBeginStress() {
+void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
- uptr max_size_class = Allocator::kNumClasses - 1;
+ uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
uptr size = Allocator::SizeClassMapT::Size(max_size_class);
- u64 G8 = 1ULL << 33;
// Make sure we correctly compute GetBlockBegin() w/o overflow.
- for (size_t i = 0; i <= G8 / size; i++) {
+ for (size_t i = 0; i <= TotalSize / size; i++) {
void *x = cache.Allocate(a, max_size_class);
void *beg = a->GetBlockBegin(x);
// if ((i & (i - 1)) == 0)
@@ -235,15 +313,30 @@ void SizeClassAllocatorGetBlockBeginStress() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator64>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
+}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
+}
+#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
+ // Does not have > 4Gb for each class.
+ SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
}
TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
+ SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
}
+#endif
#endif // SANITIZER_CAN_USE_ALLOCATOR64
struct TestMapUnmapCallback {
@@ -255,27 +348,42 @@ int TestMapUnmapCallback::map_count;
int TestMapUnmapCallback::unmap_count;
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
+
+struct AP64WithCallback {
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef TestMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
- typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
- TestMapUnmapCallback> Allocator64WithCallBack;
+ typedef SizeClassAllocator64<AP64WithCallback> Allocator64WithCallBack;
Allocator64WithCallBack *a = new Allocator64WithCallBack;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
AllocatorStats stats;
stats.Init();
- a->AllocateBatch(&stats, &cache, 32);
- EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
+ const size_t kNumChunks = 128;
+ uint32_t chunks[kNumChunks];
+ a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
+ // State + alloc + metadata + freearray.
+ EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
a->TestOnlyUnmap();
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
delete a;
}
#endif
+#endif
TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
@@ -289,7 +397,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TestMapUnmapCallback>
Allocator32WithCallBack;
Allocator32WithCallBack *a = new Allocator32WithCallBack;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
memset(&cache, 0, sizeof(cache));
@@ -322,20 +430,24 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
template<class Allocator>
void FailInAssertionOnOOM() {
Allocator a;
- a.Init();
+ a.Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
AllocatorStats stats;
stats.Init();
+ const size_t kNumChunks = 128;
+ uint32_t chunks[kNumChunks];
for (int i = 0; i < 1000000; i++) {
- a.AllocateBatch(&stats, &cache, 52);
+ a.GetFromAllocator(&stats, 52, chunks, kNumChunks);
}
a.TestOnlyUnmap();
}
-#if SANITIZER_CAN_USE_ALLOCATOR64
+// Don't test OOM conditions on Win64 because it causes other tests on the same
+// machine to OOM.
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
}
@@ -390,8 +502,10 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
}
CHECK_EQ(a.TotalMemoryUsed(), 0);
- // Test alignments.
- uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
+ // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
+ // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
+ uptr max_alignment =
+ (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
const uptr kNumAlignedAllocs = 100;
for (uptr i = 0; i < kNumAlignedAllocs; i++) {
@@ -424,7 +538,7 @@ void TestCombinedAllocator() {
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
Allocator *a = new Allocator;
- a->Init(/* may_return_null */ true);
+ a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
@@ -480,6 +594,13 @@ TEST(SanitizerCommon, CombinedAllocator64) {
SizeClassAllocatorLocalCache<Allocator64> > ();
}
+TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
+ TestCombinedAllocator<Allocator64Dynamic,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact,
LargeMmapAllocator<>,
@@ -487,6 +608,13 @@ TEST(SanitizerCommon, CombinedAllocator64Compact) {
}
#endif
+TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
+ TestCombinedAllocator<Allocator64VeryCompact,
+ LargeMmapAllocator<>,
+ SizeClassAllocatorLocalCache<Allocator64VeryCompact> > ();
+}
+#endif
+
TEST(SanitizerCommon, CombinedAllocator32Compact) {
TestCombinedAllocator<Allocator32Compact,
LargeMmapAllocator<>,
@@ -499,7 +627,7 @@ void TestSizeClassAllocatorLocalCache() {
typedef typename AllocatorCache::Allocator Allocator;
Allocator *a = new Allocator();
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -528,16 +656,31 @@ void TestSizeClassAllocatorLocalCache() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64> >();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
+}
+
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64Compact> >();
}
#endif
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
+}
+#endif
+#endif
TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
TestSizeClassAllocatorLocalCache<
@@ -559,7 +702,7 @@ void *AllocatorLeakTestWorker(void *arg) {
TEST(SanitizerCommon, AllocatorLeakTest) {
typedef AllocatorCache::Allocator Allocator;
Allocator a;
- a.Init();
+ a.Init(kReleaseToOSIntervalNever);
uptr total_used_memory = 0;
for (int i = 0; i < 100; i++) {
pthread_t t;
@@ -592,7 +735,7 @@ static void *DeallocNewThreadWorker(void *arg) {
// able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator, AllocatorCacheDeallocNewThread) {
AllocatorCache::Allocator allocator;
- allocator.Init();
+ allocator.Init(kReleaseToOSIntervalNever);
AllocatorCache main_cache;
AllocatorCache child_cache;
memset(&main_cache, 0, sizeof(main_cache));
@@ -663,7 +806,7 @@ void IterationTestCallback(uptr chunk, void *arg) {
template <class Allocator>
void TestSizeClassAllocatorIteration() {
Allocator *a = new Allocator;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<Allocator> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
@@ -703,9 +846,16 @@ void TestSizeClassAllocatorIteration() {
}
#if SANITIZER_CAN_USE_ALLOCATOR64
+// These tests can fail on Windows if memory is somewhat full and lit happens
+// to run them all at the same time. FIXME: Make them not flaky and reenable.
+#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
TestSizeClassAllocatorIteration<Allocator64>();
}
+TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
+ TestSizeClassAllocatorIteration<Allocator64Dynamic>();
+}
+#endif
#endif
TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
@@ -777,33 +927,60 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
}
-#if SANITIZER_CAN_USE_ALLOCATOR64
+// Don't test OOM conditions on Win64 because it causes other tests on the same
+// machine to OOM.
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
+typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
+struct AP64_SpecialSizeClassMap {
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef SpecialSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
// Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// In a world where regions are small and chunks are huge...
- typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
- SpecialSizeClassMap> SpecialAllocator64;
+ typedef SizeClassAllocator64<AP64_SpecialSizeClassMap> SpecialAllocator64;
const uptr kRegionSize =
kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
SpecialAllocator64 *a = new SpecialAllocator64;
- a->Init();
+ a->Init(kReleaseToOSIntervalNever);
SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
// ...one man is on a mission to overflow a region with a series of
// successive allocations.
+
const uptr kClassID = 107;
- const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
+ const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
ASSERT_LT(2 * kAllocationSize, kRegionSize);
ASSERT_GT(3 * kAllocationSize, kRegionSize);
cache.Allocate(a, kClassID);
EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
"The process has exhausted");
+
+ const uptr Class2 = 100;
+ const uptr Size2 = SpecialSizeClassMap::Size(Class2);
+ ASSERT_EQ(Size2 * 8, kRegionSize);
+ char *p[7];
+ for (int i = 0; i < 7; i++) {
+ p[i] = (char*)cache.Allocate(a, Class2);
+ fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
+ p[i][Size2 - 1] = 42;
+ if (i) ASSERT_LT(p[i - 1], p[i]);
+ }
+ EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted");
+ cache.Deallocate(a, Class2, p[0]);
+ cache.Drain(a);
+ ASSERT_EQ(p[6][Size2 - 1], 42);
a->TestOnlyUnmap();
delete a;
}
+
#endif
TEST(SanitizerCommon, TwoLevelByteMap) {
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
index 038d9c543bb7..c6dd3c4bb20c 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc
@@ -14,6 +14,7 @@
clang++ -std=c++11 -fno-exceptions -g -fPIC -I. -I../include -Isanitizer \
sanitizer_common/tests/sanitizer_allocator_testlib.cc \
$(\ls sanitizer_common/sanitizer_*.cc | grep -v sanitizer_common_nolibc.cc) \
+ sanitizer_common/sanitizer_linux_x86_64.S \
-shared -lpthread -o testmalloc.so
LD_PRELOAD=`pwd`/testmalloc.so /your/app
*/
@@ -33,13 +34,22 @@ LD_PRELOAD=`pwd`/testmalloc.so /your/app
# define SANITIZER_FREE_HOOK(p)
#endif
-namespace {
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-// typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
-typedef SizeClassAllocator64<~(uptr)0, kAllocatorSize, 0,
- CompactSizeClassMap> PrimaryAllocator;
+struct __AP64 {
+ static const uptr kSpaceBeg = ~(uptr)0;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef CompactSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+};
+
+namespace {
+
+typedef SizeClassAllocator64<__AP64> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -59,6 +69,25 @@ static void thread_dtor(void *v) {
allocator.SwallowCache(&cache);
}
+static size_t GetRss() {
+ if (FILE *f = fopen("/proc/self/statm", "r")) {
+ size_t size = 0, rss = 0;
+ fscanf(f, "%zd %zd", &size, &rss);
+ fclose(f);
+ return rss << 12; // rss is in pages.
+ }
+ return 0;
+}
+
+struct AtExit {
+ ~AtExit() {
+ allocator.PrintStats();
+ Printf("RSS: %zdM\n", GetRss() >> 20);
+ }
+};
+
+static AtExit at_exit;
+
static void NOINLINE thread_init() {
if (!global_inited) {
global_inited = true;
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
index 6fc308ad14d4..ebc885db7525 100644
--- a/lib/sanitizer_common/tests/sanitizer_common_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -10,6 +10,8 @@
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
+#include <algorithm>
+
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -170,15 +172,54 @@ bool UptrLess(uptr a, uptr b) {
return a < b;
}
-TEST(SanitizerCommon, InternalBinarySearch) {
+TEST(SanitizerCommon, InternalLowerBound) {
static const uptr kSize = 5;
- uptr arr[kSize];
- for (uptr i = 0; i < kSize; i++) arr[i] = i * i;
+ int arr[kSize];
+ arr[0] = 1;
+ arr[1] = 3;
+ arr[2] = 5;
+ arr[3] = 7;
+ arr[4] = 11;
+
+ EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 0, UptrLess));
+ EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 1, UptrLess));
+ EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 2, UptrLess));
+ EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 3, UptrLess));
+ EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 4, UptrLess));
+ EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 5, UptrLess));
+ EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 6, UptrLess));
+ EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 7, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 8, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 9, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 10, UptrLess));
+ EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 11, UptrLess));
+ EXPECT_EQ(5u, InternalLowerBound(arr, 0, kSize, 12, UptrLess));
+}
- for (uptr i = 0; i < kSize; i++)
- ASSERT_EQ(InternalBinarySearch(arr, 0, kSize, i * i, UptrLess), i);
+TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {
+ std::vector<int> data;
+ auto create_item = [] (size_t i, size_t j) {
+ auto v = i * 10000 + j;
+ return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;
+ };
+ for (size_t i = 0; i < 1000; ++i) {
+ data.resize(i);
+ for (size_t j = 0; j < i; ++j) {
+ data[j] = create_item(i, j);
+ }
- ASSERT_EQ(InternalBinarySearch(arr, 0, kSize, 7, UptrLess), kSize + 1);
+ std::sort(data.begin(), data.end());
+
+ for (size_t j = 0; j < i; ++j) {
+ int val = create_item(i, j);
+ for (auto to_find : {val - 1, val, val + 1}) {
+ uptr expected =
+ std::lower_bound(data.begin(), data.end(), to_find) - data.begin();
+ EXPECT_EQ(expected, InternalLowerBound(data.data(), 0, data.size(),
+ to_find, std::less<int>()));
+ }
+ }
+ }
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID
diff --git a/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc b/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
index 13918aff1009..2f0494f82b0a 100644
--- a/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc
@@ -256,4 +256,8 @@ TEST(SanitizerCommonInterceptors, Printf) {
// Checks for wide-character strings are not implemented yet.
testPrintf("%ls", 1, 0);
+
+ testPrintf("%m", 0);
+ testPrintf("%m%s", 1, test_buf_size);
+ testPrintf("%s%m%s", 2, test_buf_size, test_buf_size);
}
diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
index 015e32a09e37..625257622bf2 100644
--- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
@@ -25,6 +25,8 @@
# include "sanitizer_common/sanitizer_posix.h"
#endif
+using namespace __sanitizer;
+
// A regression test for internal_memmove() implementation.
TEST(SanitizerCommon, InternalMemmoveRegression) {
char src[] = "Hello World";
diff --git a/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc b/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
index 72df621d07ff..e761f00c56fc 100644
--- a/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
+++ b/lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc
@@ -15,5 +15,5 @@
#include "sanitizer_common/sanitizer_libc.h"
extern "C" void _start() {
- internal__exit(0);
+ __sanitizer::internal__exit(0);
}
diff --git a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
index ae7c5d531ae7..4ac55c706d6c 100644
--- a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
@@ -52,5 +52,26 @@ TEST(MemoryMappingLayout, DumpListOfModules) {
EXPECT_TRUE(found);
}
+TEST(MemoryMapping, LoadedModuleArchAndUUID) {
+ if (SANITIZER_MAC) {
+ MemoryMappingLayout memory_mapping(false);
+ const uptr kMaxModules = 100;
+ InternalMmapVector<LoadedModule> modules(kMaxModules);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ ModuleArch arch = modules[i].arch();
+ // Darwin unit tests are only run on i386/x86_64/x86_64h.
+ if (SANITIZER_WORDSIZE == 32) {
+ EXPECT_EQ(arch, kModuleArchI386);
+ } else if (SANITIZER_WORDSIZE == 64) {
+ EXPECT_TRUE(arch == kModuleArchX86_64 || arch == kModuleArchX86_64H);
+ }
+ const u8 *uuid = modules[i].uuid();
+ u8 null_uuid[kModuleUUIDSize] = {0};
+ EXPECT_NE(memcmp(null_uuid, uuid, kModuleUUIDSize), 0);
+ }
+ }
+}
+
} // namespace __sanitizer
#endif // !defined(_WIN32)
diff --git a/lib/sanitizer_common/tests/sanitizer_test_main.cc b/lib/sanitizer_common/tests/sanitizer_test_main.cc
index 20f8f53975d0..0da886120c31 100644
--- a/lib/sanitizer_common/tests/sanitizer_test_main.cc
+++ b/lib/sanitizer_common/tests/sanitizer_test_main.cc
@@ -19,6 +19,6 @@ int main(int argc, char **argv) {
argv0 = argv[0];
testing::GTEST_FLAG(death_test_style) = "threadsafe";
testing::InitGoogleTest(&argc, argv);
- SetCommonFlagsDefaults();
+ __sanitizer::SetCommonFlagsDefaults();
return RUN_ALL_TESTS();
}