summaryrefslogtreecommitdiff
path: root/lib/lsan
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lsan')
-rw-r--r--lib/lsan/CMakeLists.txt39
-rw-r--r--lib/lsan/lsan.cc1
-rw-r--r--lib/lsan/lsan.h7
-rw-r--r--lib/lsan/lsan_allocator.cc77
-rw-r--r--lib/lsan/lsan_allocator.h49
-rw-r--r--lib/lsan/lsan_common.cc48
-rw-r--r--lib/lsan/lsan_common.h54
-rw-r--r--lib/lsan/lsan_common_linux.cc26
-rw-r--r--lib/lsan/lsan_common_mac.cc126
-rw-r--r--lib/lsan/lsan_flags.inc2
-rw-r--r--lib/lsan/lsan_interceptors.cc84
-rw-r--r--lib/lsan/lsan_linux.cc33
-rw-r--r--lib/lsan/lsan_malloc_mac.cc55
-rw-r--r--lib/lsan/lsan_thread.cc15
-rw-r--r--lib/lsan/weak_symbols.txt2
15 files changed, 486 insertions, 132 deletions
diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt
index b782f2421dc2b..55c825f5c6ea9 100644
--- a/lib/lsan/CMakeLists.txt
+++ b/lib/lsan/CMakeLists.txt
@@ -5,12 +5,15 @@ append_rtti_flag(OFF LSAN_CFLAGS)
set(LSAN_COMMON_SOURCES
lsan_common.cc
- lsan_common_linux.cc)
+ lsan_common_linux.cc
+ lsan_common_mac.cc)
set(LSAN_SOURCES
lsan.cc
lsan_allocator.cc
+ lsan_linux.cc
lsan_interceptors.cc
+ lsan_malloc_mac.cc
lsan_preinit.cc
lsan_thread.cc)
@@ -24,16 +27,34 @@ add_compiler_rt_object_libraries(RTLSanCommon
if(COMPILER_RT_HAS_LSAN)
add_compiler_rt_component(lsan)
- foreach(arch ${LSAN_SUPPORTED_ARCH})
+ if(APPLE)
+ add_weak_symbols("lsan" WEAK_SYMBOL_LINK_FLAGS)
+ add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS)
+
add_compiler_rt_runtime(clang_rt.lsan
- STATIC
- ARCHS ${arch}
+ SHARED
+ OS ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${LSAN_SUPPORTED_ARCH}
SOURCES ${LSAN_SOURCES}
- $<TARGET_OBJECTS:RTInterception.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
- $<TARGET_OBJECTS:RTLSanCommon.${arch}>
+ OBJECT_LIBS RTLSanCommon
+ RTInterception
+ RTSanitizerCommon
+ RTSanitizerCommonLibc
CFLAGS ${LSAN_CFLAGS}
+ LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS}
PARENT_TARGET lsan)
- endforeach()
+ else()
+ foreach(arch ${LSAN_SUPPORTED_ARCH})
+ add_compiler_rt_runtime(clang_rt.lsan
+ STATIC
+ ARCHS ${arch}
+ SOURCES ${LSAN_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ $<TARGET_OBJECTS:RTLSanCommon.${arch}>
+ CFLAGS ${LSAN_CFLAGS}
+ PARENT_TARGET lsan)
+ endforeach()
+ endif()
endif()
diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc
index c7c34299147db..6c4767d612522 100644
--- a/lib/lsan/lsan.cc
+++ b/lib/lsan/lsan.cc
@@ -76,6 +76,7 @@ extern "C" void __lsan_init() {
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
+ ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
diff --git a/lib/lsan/lsan.h b/lib/lsan/lsan.h
index ec5eb93dc155e..1061d2fcfde75 100644
--- a/lib/lsan/lsan.h
+++ b/lib/lsan/lsan.h
@@ -41,6 +41,13 @@
namespace __lsan {
void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+} while (0)
} // namespace __lsan
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index c805a39e1cc5b..011979eee396e 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -24,44 +24,18 @@
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
-
-struct ChunkMetadata {
- u8 allocated : 8; // Must be first.
- ChunkTag tag : 2;
- uptr requested_size : 54;
- u32 stack_trace_id;
-};
-
-#if defined(__mips64) || defined(__aarch64__)
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
-static const uptr kRegionSizeLog = 20;
-static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
-typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-typedef CompactSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
- sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
- PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-
-struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x600000000000ULL;
- static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
- static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
-};
-
-typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
-static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.InitLinkerInitialized(
@@ -70,7 +44,7 @@ void InitializeAllocator() {
}
void AllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+ allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -102,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return nullptr;
}
- void *p = allocator.Allocate(&cache, size, alignment, false);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -116,7 +90,7 @@ void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
@@ -124,17 +98,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
return nullptr;
}
- p = allocator.Reallocate(&cache, p, new_size, alignment);
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
- *begin = (uptr)&cache;
- *end = *begin + sizeof(cache);
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {
@@ -143,6 +117,37 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ return Allocate(stack, size, alignment, kAlwaysClearMemory);
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return Allocate(stack, size, 1, kAlwaysClearMemory);
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return Reallocate(stack, p, size, 1);
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ if (size == 0)
+ size = GetPageSizeCached();
+ return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
///// Interface to the common LSan module. /////
void LockAllocator() {
diff --git a/lib/lsan/lsan_allocator.h b/lib/lsan/lsan_allocator.h
index f564601193bd3..e5def17d4ee94 100644
--- a/lib/lsan/lsan_allocator.h
+++ b/lib/lsan/lsan_allocator.h
@@ -15,8 +15,10 @@
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
namespace __lsan {
@@ -34,6 +36,53 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__arm__)
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
+ sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
+ PrimaryAllocator;
+#elif defined(__x86_64__)
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+
+AllocatorCache *GetAllocatorCache();
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index f0554526b76f7..6cc73749812bd 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -32,20 +32,15 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL int disable_counter;
-bool DisabledInThisThread() { return disable_counter > 0; }
-void DisableInThisThread() { disable_counter++; }
-void EnableInThisThread() {
- if (!disable_counter && common_flags()->detect_leaks) {
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
- disable_counter--;
}
-Flags lsan_flags;
-
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
@@ -180,6 +175,23 @@ void ScanRangeForPointers(uptr begin, uptr end,
}
}
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
@@ -206,11 +218,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
continue;
}
uptr sp;
- bool have_registers =
- (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
- if (!have_registers) {
- Report("Unable to get registers from thread %d.\n");
- // If unable to get SP, consider the entire stack to be reachable.
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
sp = stack_begin;
}
@@ -258,7 +272,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
}
- if (dtls) {
+ if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
@@ -268,6 +282,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
}
}
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 890ce6562c82a..919be0ec2662a 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -22,8 +22,23 @@
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
- && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
+// supported for Linux only. Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on new architecture, one need to implement
+// internal_clone function as well as (probably) adjust TLS machinery for
+// new architecture inside sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -44,6 +59,8 @@ enum ChunkTag {
kIgnored = 3
};
+const u32 kInvalidTid = (u32) -1;
+
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
@@ -107,6 +124,7 @@ void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
@@ -117,6 +135,7 @@ enum IgnoreObjectResult {
// Functions called from the parent tool.
void InitCommonLsan();
void DoLeakCheck();
+void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
@@ -129,13 +148,36 @@ struct ScopedInterceptorDisabler {
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
// Special case for "new T[0]" where T is a type with DTOR.
-// new T[0] will allocate one word for the array size (0) and store a pointer
-// to the end of allocated chunk.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
- return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
- *reinterpret_cast<uptr *>(chunk_beg) == 0;
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
}
// The following must be implemented in the parent tool.
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index f6154d8b97d18..0d1e998a5cfec 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -34,6 +34,17 @@ static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
}
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
@@ -67,20 +78,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
- uptr allocator_begin = 0, allocator_end = 0;
- GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
- if (begin <= allocator_begin && allocator_begin < end) {
- CHECK_LE(allocator_begin, allocator_end);
- CHECK_LE(allocator_end, end);
- if (begin < allocator_begin)
- ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
- kReachable);
- if (allocator_end < end)
- ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
- kReachable);
- } else {
- ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
- }
+ ScanGlobalRange(begin, end, frontier);
}
return 0;
}
diff --git a/lib/lsan/lsan_common_mac.cc b/lib/lsan/lsan_common_mac.cc
new file mode 100644
index 0000000000000..022e73937895f
--- /dev/null
+++ b/lib/lsan/lsan_common_mac.cc
@@ -0,0 +1,126 @@
+//=-- lsan_common_mac.cc --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "lsan_allocator.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ CHECK(data);
+ return data->current_thread_id;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {
+ if (flags()->use_tls) {
+ Report("use_tls=1 is not supported on Darwin.\n");
+ Die();
+ }
+}
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ if (range.executable) continue;
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ CHECK(0 && "unimplemented");
+}
+
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ CHECK(0 && "unimplemented");
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
diff --git a/lib/lsan/lsan_flags.inc b/lib/lsan/lsan_flags.inc
index e390e2ae5a1b6..8135bdcff01a3 100644
--- a/lib/lsan/lsan_flags.inc
+++ b/lib/lsan/lsan_flags.inc
@@ -30,7 +30,7 @@ LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
-LSAN_FLAG(bool, use_tls, true,
+LSAN_FLAG(bool, use_tls, !SANITIZER_MAC,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index 12190175949fb..fe1f49bcdebaa 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -21,12 +21,15 @@
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include <stddef.h>
+
using namespace __lsan;
extern "C" {
@@ -37,29 +40,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
-
///// Malloc/free interceptors. /////
-const bool kAlwaysClearMemory = true;
-
namespace std {
struct nothrow_t;
}
+#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+ return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
- Deallocate(p);
+ lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
@@ -77,28 +73,42 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- size *= nmemb;
- return Allocate(stack, size, 1, true);
+ return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Reallocate(stack, q, size, 1);
+ return lsan_realloc(q, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ *memptr = lsan_memalign(alignment, size, stack);
+ // FIXME: Return ENOMEM if user requested more than max alloc size.
+ return 0;
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_valloc(size, stack);
}
+#endif
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
@@ -108,32 +118,27 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
#endif // SANITIZER_INTERCEPT_MEMALIGN
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
-}
-
-INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- ENSURE_LSAN_INITED;
- GET_STACK_TRACE_MALLOC;
- *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
- // FIXME: Return ENOMEM if user requested more than max alloc size.
- return 0;
-}
-
-INTERCEPTOR(void*, valloc, uptr size) {
- ENSURE_LSAN_INITED;
- GET_STACK_TRACE_MALLOC;
- if (size == 0)
- size = GetPageSizeCached();
- return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
struct fake_mallinfo {
@@ -186,13 +191,13 @@ INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
ENSURE_LSAN_INITED; \
@@ -277,7 +282,8 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
+ int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+ IsStateDetached(detached));
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
@@ -307,11 +313,11 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(realloc);
LSAN_MAYBE_INTERCEPT_MEMALIGN;
LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
- INTERCEPT_FUNCTION(aligned_alloc);
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
INTERCEPT_FUNCTION(posix_memalign);
INTERCEPT_FUNCTION(valloc);
LSAN_MAYBE_INTERCEPT_PVALLOC;
- INTERCEPT_FUNCTION(malloc_usable_size);
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
diff --git a/lib/lsan/lsan_linux.cc b/lib/lsan/lsan_linux.cc
new file mode 100644
index 0000000000000..c9749c7456551
--- /dev/null
+++ b/lib/lsan/lsan_linux.cc
@@ -0,0 +1,33 @@
+//=-- lsan_linux.cc -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX
diff --git a/lib/lsan/lsan_malloc_mac.cc b/lib/lsan/lsan_malloc_mac.cc
new file mode 100644
index 0000000000000..9c1dacc055bd2
--- /dev/null
+++ b/lib/lsan/lsan_malloc_mac.cc
@@ -0,0 +1,55 @@
+//===-- lsan_malloc_mac.cc ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_MAC
diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc
index 5dff4f7481064..09eeb9c249821 100644
--- a/lib/lsan/lsan_thread.cc
+++ b/lib/lsan/lsan_thread.cc
@@ -19,13 +19,11 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
+#include "lsan_common.h"
namespace __lsan {
-const u32 kInvalidTid = (u32) -1;
-
static ThreadRegistry *thread_registry;
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
@@ -41,14 +39,6 @@ void InitializeThreadRegistry() {
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
-u32 GetCurrentThread() {
- return current_thread_tid;
-}
-
-void SetCurrentThread(u32 tid) {
- current_thread_tid = tid;
-}
-
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
@@ -97,11 +87,12 @@ void ThreadStart(u32 tid, uptr os_id) {
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
- thread_registry->StartThread(tid, os_id, &args);
+ thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
}
ThreadContext *CurrentThreadContext() {
diff --git a/lib/lsan/weak_symbols.txt b/lib/lsan/weak_symbols.txt
new file mode 100644
index 0000000000000..da4f994da8653
--- /dev/null
+++ b/lib/lsan/weak_symbols.txt
@@ -0,0 +1,2 @@
+___lsan_default_suppressions
+___lsan_is_turned_off