diff options
Diffstat (limited to 'lib/lsan')
-rw-r--r-- | lib/lsan/.clang-format | 1 | ||||
-rw-r--r-- | lib/lsan/CMakeLists.txt | 14 | ||||
-rw-r--r-- | lib/lsan/lsan.cc | 2 | ||||
-rw-r--r-- | lib/lsan/lsan_allocator.cc | 74 | ||||
-rw-r--r-- | lib/lsan/lsan_allocator.h | 4 | ||||
-rw-r--r-- | lib/lsan/lsan_common.cc | 52 | ||||
-rw-r--r-- | lib/lsan/lsan_common.h | 7 | ||||
-rw-r--r-- | lib/lsan/lsan_common_mac.cc | 12 | ||||
-rw-r--r-- | lib/lsan/lsan_interceptors.cc | 57 | ||||
-rw-r--r-- | lib/lsan/lsan_malloc_mac.cc | 3 | ||||
-rw-r--r-- | lib/lsan/lsan_thread.cc | 5 |
11 files changed, 183 insertions, 48 deletions
diff --git a/lib/lsan/.clang-format b/lib/lsan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/lsan/.clang-format +++ b/lib/lsan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt index 60da3e186871..34f686135ac4 100644 --- a/lib/lsan/CMakeLists.txt +++ b/lib/lsan/CMakeLists.txt @@ -18,12 +18,20 @@ set(LSAN_SOURCES lsan_preinit.cc lsan_thread.cc) +set(LSAN_HEADERS + lsan.h + lsan_allocator.h + lsan_common.h + lsan_flags.inc + lsan_thread.h) + set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}) add_compiler_rt_object_libraries(RTLSanCommon OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${LSAN_COMMON_SUPPORTED_ARCH} SOURCES ${LSAN_COMMON_SOURCES} + ADDITIONAL_HEADERS ${LSAN_HEADERS} CFLAGS ${LSAN_CFLAGS}) if(COMPILER_RT_HAS_LSAN) @@ -39,10 +47,13 @@ if(COMPILER_RT_HAS_LSAN) OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${LSAN_SUPPORTED_ARCH} SOURCES ${LSAN_SOURCES} + ADDITIONAL_HEADERS ${LSAN_HEADERS} OBJECT_LIBS RTLSanCommon RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer CFLAGS ${LSAN_CFLAGS} LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} LINK_LIBS ${LSAN_LINK_LIBS} @@ -56,7 +67,10 @@ if(COMPILER_RT_HAS_LSAN) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTLSanCommon.${arch}> + ADDITIONAL_HEADERS ${LSAN_HEADERS} CFLAGS ${LSAN_CFLAGS} PARENT_TARGET lsan) endforeach() diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc index a9f7e399e14c..93bced0459c2 100644 --- a/lib/lsan/lsan.cc +++ b/lib/lsan/lsan.cc @@ -66,6 +66,8 @@ static void InitializeFlags() { if (Verbosity()) ReportUnrecognizedFlags(); if (common_flags()->help) parser.PrintFlagDescriptions(); + + __sanitizer_set_report_path(common_flags()->log_path); } static void OnStackUnwind(const SignalContext &sig, const void *, diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc index 2df58b44f6b8..c58c3548002f 100644 --- a/lib/lsan/lsan_allocator.cc +++ b/lib/lsan/lsan_allocator.cc @@ -17,6 +17,7 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -70,15 +71,27 @@ static void RegisterDeallocation(void *p) { atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); } +static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { + if (AllocatorMayReturnNull()) { + Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); +} + void *Allocate(const StackTrace &stack, uptr size, uptr alignment, bool cleared) { if (size == 0) size = 1; - if (size > kMaxAllowedMallocSize) { - Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return Allocator::FailureHandler::OnBadRequest(); - } + if (size > kMaxAllowedMallocSize) + return ReportAllocationSizeTooBig(size, stack); void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, &stack); + } // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); @@ -89,8 +102,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, } static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return Allocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, &stack); + } size *= nmemb; return Allocate(stack, size, 1, true); } @@ -106,9 +122,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, uptr alignment) { RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { - Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); - return Allocator::FailureHandler::OnBadRequest(); + return ReportAllocationSizeTooBig(new_size, stack); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -126,10 +141,38 @@ uptr GetMallocUsableSize(const void *p) { return m->requested_size; } +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, &stack); } return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } @@ -155,6 +198,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) { Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); } +void *lsan_pvalloc(uptr size, const StackTrace &stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); +} + uptr lsan_mz_size(const void *p) { return GetMallocUsableSize(p); } diff --git a/lib/lsan/lsan_allocator.h b/lib/lsan/lsan_allocator.h index 4006f7929269..7c70bb6d9766 100644 --- a/lib/lsan/lsan_allocator.h +++ b/lib/lsan/lsan_allocator.h @@ -90,12 +90,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; AllocatorCache *GetAllocatorCache(); +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack); +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack); void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack); void *lsan_malloc(uptr size, const StackTrace &stack); void lsan_free(void *p); void *lsan_realloc(void *p, uptr size, const StackTrace &stack); void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack); void *lsan_valloc(uptr size, const StackTrace &stack); +void *lsan_pvalloc(uptr size, const StackTrace &stack); uptr lsan_mz_size(const void *p); } // namespace __lsan diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc index 69ffda539a26..012a673c3b25 100644 --- a/lib/lsan/lsan_common.cc +++ b/lib/lsan/lsan_common.cc @@ -15,14 +15,15 @@ #include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_suppressions.h" -#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS @@ -99,12 +100,14 @@ static SuppressionContext *GetSuppressionContext() { static InternalMmapVector<RootRegion> *root_regions; +static uptr initialized_for_pid; + InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; - root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); + root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT } const char *MaybeCallLsanDefaultOptions() { @@ -112,6 +115,7 @@ const char *MaybeCallLsanDefaultOptions() { } void InitCommonLsan() { + initialized_for_pid = internal_getpid(); InitializeRootRegions(); if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if @@ -214,9 +218,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { - InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount()); + InternalMmapVector<uptr> registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast<uptr>(registers.data()); - uptr registers_end = registers_begin + registers.size(); + uptr registers_end = + reinterpret_cast<uptr>(registers.data() + registers.size()); for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); @@ -444,7 +449,7 @@ void ProcessPC(Frontier *frontier) { // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. - Frontier frontier(1); + Frontier frontier; ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); @@ -506,7 +511,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { } static void PrintMatchedSuppressions() { - InternalMmapVector<Suppression *> matched(1); + InternalMmapVector<Suppression *> matched; GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; @@ -525,11 +530,36 @@ struct CheckForLeaksParam { LeakReport leak_report; }; +static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { + const InternalMmapVector<tid_t> &suspended_threads = + *(const InternalMmapVector<tid_t> *)arg; + if (tctx->status == ThreadStatusRunning) { + uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), + tctx->os_id, CompareLess<int>()); + if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) + Report("Running thread %d was not suspended. False leaks are possible.\n", + tctx->os_id); + }; +} + +static void ReportUnsuspendedThreads( + const SuspendedThreadsList &suspended_threads) { + InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); + for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) + threads[i] = suspended_threads.GetThreadID(i); + + Sort(threads.data(), threads.size()); + + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + &ReportIfNotSuspended, &threads); +} + static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CHECK(param); CHECK(!param->success); + ReportUnsuspendedThreads(suspended_threads); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any @@ -541,6 +571,12 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) return false; + if (initialized_for_pid != internal_getpid()) { + // If process was forked and it had threads we fail to detect references + // from other threads. + Report("WARNING: LeakSanitizer is disabled in forked process.\n"); + return false; + } EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; param.success = false; @@ -684,7 +720,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) Printf("The %zu top leak(s):\n", num_leaks_to_report); - InternalSort(&leaks_, leaks_.size(), LeakComparator); + Sort(leaks_.data(), leaks_.size(), &LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h index f3863309d893..1d1e1e462435 100644 --- a/lib/lsan/lsan_common.h +++ b/lib/lsan/lsan_common.h @@ -47,6 +47,7 @@ namespace __sanitizer { class FlagParser; +class ThreadRegistry; struct DTLS; } @@ -95,7 +96,7 @@ struct LeakedObject { // Aggregates leaks by stack trace prefix. class LeakReport { public: - LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {} + LeakReport() {} void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag); void ReportTopLeaks(uptr max_leaks); @@ -103,12 +104,11 @@ class LeakReport { void ApplySuppressions(); uptr UnsuppressedLeakCount(); - private: void PrintReportForLeak(uptr index); void PrintLeakedObjectsForLeak(uptr index); - u32 next_id_; + u32 next_id_ = 0; InternalMmapVector<Leak> leaks_; InternalMmapVector<LeakedObject> leaked_objects_; }; @@ -205,6 +205,7 @@ bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. void LockThreadRegistry(); void UnlockThreadRegistry(); +ThreadRegistry *GetThreadRegistryLocked(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); diff --git a/lib/lsan/lsan_common_mac.cc b/lib/lsan/lsan_common_mac.cc index ac27c7af6e35..2508c1dbd873 100644 --- a/lib/lsan/lsan_common_mac.cc +++ b/lib/lsan/lsan_common_mac.cc @@ -24,6 +24,13 @@ #include <mach/mach.h> +// Only introduced in Mac OS X 10.9. +#ifdef VM_MEMORY_OS_ALLOC_ONCE +static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE; +#else +static const int kSanitizerVmMemoryOsAllocOnce = 73; +#endif + namespace __lsan { typedef struct { @@ -112,7 +119,8 @@ void ProcessGlobalRegions(Frontier *frontier) { for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName); MemoryMappingLayout memory_mapping(false); - InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128); + InternalMmapVector<LoadedModule> modules; + modules.reserve(128); memory_mapping.DumpListOfModules(&modules); for (uptr i = 0; i < modules.size(); ++i) { // Even when global scanning is disabled, we still need to scan @@ -157,7 +165,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { // libxpc stashes some pointers in the Kernel Alloc Once page, // make sure not to report those as leaks. - if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { + if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) { ScanRangeForPointers(address, end_address, frontier, "GLOBAL", kReachable); diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc index b3e73e3896d5..fde52e496164 100644 --- a/lib/lsan/lsan_interceptors.cc +++ b/lib/lsan/lsan_interceptors.cc @@ -14,6 +14,7 @@ #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -86,9 +87,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - *memptr = lsan_memalign(alignment, size, stack); - // FIXME: Return ENOMEM if user requested more than max alloc size. - return 0; + return lsan_posix_memalign(memptr, alignment, size, stack); } INTERCEPTOR(void*, valloc, uptr size) { @@ -123,7 +122,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - return lsan_memalign(alignment, size, stack); + return lsan_aligned_alloc(alignment, size, stack); } #define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) #else @@ -166,13 +165,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { INTERCEPTOR(void*, pvalloc, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); + return lsan_pvalloc(size, stack); } #define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else @@ -202,21 +195,21 @@ INTERCEPTOR(int, mprobe, void *ptr) { // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -#define OPERATOR_NEW_BODY(nothrow) \ - ENSURE_LSAN_INITED; \ - GET_STACK_TRACE_MALLOC; \ - void *res = lsan_malloc(size, stack); \ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ +#define OPERATOR_NEW_BODY(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_malloc(size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; -#define OPERATOR_NEW_BODY_ALIGN(nothrow) \ - ENSURE_LSAN_INITED; \ - GET_STACK_TRACE_MALLOC; \ - void *res = lsan_memalign((uptr)align, size, stack); \ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ +#define OPERATOR_NEW_BODY_ALIGN(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_memalign((uptr)align, size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; -#define OPERATOR_DELETE_BODY \ - ENSURE_LSAN_INITED; \ +#define OPERATOR_DELETE_BODY\ + ENSURE_LSAN_INITED;\ lsan_free(ptr); // On OS X it's not enough to just provide our own 'operator new' and @@ -309,7 +302,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) ///// Thread initialization and finalization. ///// -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD static unsigned g_thread_finalize_key; static void thread_finalize(void *v) { @@ -336,6 +329,17 @@ INTERCEPTOR(void, _lwp_exit) { #define LSAN_MAYBE_INTERCEPT__LWP_EXIT #endif +#if SANITIZER_INTERCEPT_THR_EXIT +INTERCEPTOR(void, thr_exit, tid_t *state) { + ENSURE_LSAN_INITED; + ThreadFinish(); + REAL(thr_exit)(state); +} +#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) +#else +#define LSAN_MAYBE_INTERCEPT_THR_EXIT +#endif + struct ThreadParam { void *(*callback)(void *arg); void *param; @@ -348,7 +352,7 @@ extern "C" void *__lsan_thread_start_func(void *arg) { void *param = p->param; // Wait until the last iteration to maximize the chance that we are the last // destructor to run. -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD if (pthread_setspecific(g_thread_finalize_key, (void*)GetPthreadDestructorIterations())) { Report("LeakSanitizer: failed to set thread key.\n"); @@ -443,8 +447,9 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(_exit); LSAN_MAYBE_INTERCEPT__LWP_EXIT; + LSAN_MAYBE_INTERCEPT_THR_EXIT; -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Report("LeakSanitizer: failed to create thread key.\n"); Die(); diff --git a/lib/lsan/lsan_malloc_mac.cc b/lib/lsan/lsan_malloc_mac.cc index 9c1dacc055bd..94ffb6d02539 100644 --- a/lib/lsan/lsan_malloc_mac.cc +++ b/lib/lsan/lsan_malloc_mac.cc @@ -37,6 +37,9 @@ using namespace __lsan; #define COMMON_MALLOC_CALLOC(count, size) \ GET_STACK_TRACE_MALLOC; \ void *p = lsan_calloc(count, size, stack) +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = lsan_posix_memalign(memptr, alignment, size, stack) #define COMMON_MALLOC_VALLOC(size) \ GET_STACK_TRACE_MALLOC; \ void *p = lsan_valloc(size, stack) diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc index 4404c8cc51d2..a25aff379961 100644 --- a/lib/lsan/lsan_thread.cc +++ b/lib/lsan/lsan_thread.cc @@ -155,4 +155,9 @@ void UnlockThreadRegistry() { thread_registry->Unlock(); } +ThreadRegistry *GetThreadRegistryLocked() { + thread_registry->CheckLocked(); + return thread_registry; +} + } // namespace __lsan |