summaryrefslogtreecommitdiff
path: root/lib/asan/asan_allocator2.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/asan/asan_allocator2.cc')
-rw-r--r--lib/asan/asan_allocator2.cc265
1 files changed, 188 insertions, 77 deletions
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
index 42d8b29afd6b1..d74aa553a288c 100644
--- a/lib/asan/asan_allocator2.cc
+++ b/lib/asan/asan_allocator2.cc
@@ -13,21 +13,20 @@
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
-// Status: under development, not enabled by default yet.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
-#if ASAN_ALLOCATOR_VERSION == 2
#include "asan_mapping.h"
+#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_thread.h"
-#include "asan_thread_registry.h"
-#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
namespace __asan {
@@ -35,7 +34,7 @@ struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
@@ -50,23 +49,32 @@ struct AsanMapUnmapCallback {
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Statistics.
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
};
#if SANITIZER_WORDSIZE == 64
+#if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+#else
const uptr kAllocatorSpace = 0x600000000000ULL;
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+#endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#elif SANITIZER_WORDSIZE == 32
static const u64 kAddressSpaceSize = 1ULL << 32;
typedef CompactSizeClassMap SizeClassMap;
+static const uptr kRegionSizeLog = 20;
+static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+ SizeClassMap, kRegionSizeLog,
+ FlatByteMap<kFlatByteMapSize>,
+ AsanMapUnmapCallback> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
@@ -91,8 +99,6 @@ static const uptr kMaxAllowedMallocSize =
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
-static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
-
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
@@ -114,7 +120,7 @@ static u32 RZSize2Log(u32 rz_size) {
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
- u32 res = __builtin_ctz(rz_size) - 4;
+ u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
@@ -158,6 +164,7 @@ struct ChunkHeader {
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
+ u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
@@ -168,7 +175,6 @@ struct ChunkHeader {
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
- AsanChunk *next;
u32 free_context_id;
};
@@ -189,7 +195,8 @@ struct AsanChunk: ChunkBase {
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
- // We store the alloc/free stack traces in the chunk itself.
+ // If we don't use stack depot, we store the alloc/free stack traces
+ // in the chunk itself.
u32 *AllocStackBeg() {
return (u32*)(Beg() - RZLog2Size(rz_log));
}
@@ -205,6 +212,9 @@ struct AsanChunk: ChunkBase {
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
return (available - kChunkHeader2Size) / sizeof(u32);
}
+ bool AddrIsInside(uptr addr) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
+ }
};
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
@@ -258,8 +268,8 @@ struct QuarantineCallback {
}
void Recycle(AsanChunk *m) {
- CHECK(m->chunk_state == CHUNK_QUARANTINE);
- m->chunk_state = CHUNK_AVAILABLE;
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
@@ -273,7 +283,7 @@ struct QuarantineCallback {
}
// Statistics.
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
@@ -291,34 +301,32 @@ struct QuarantineCallback {
AllocatorCache *cache_;
};
-static void Init() {
- static int inited = 0;
- if (inited) return;
- __asan_init();
- inited = true; // this must happen before any threads are created.
+void InitializeAllocator() {
allocator.Init();
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
- AllocType alloc_type) {
- Init();
+ AllocType alloc_type, bool can_fill) {
+ if (!asan_inited)
+ __asan_init();
+ Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
- if (alignment <= kReturnOnZeroMalloc)
- return reinterpret_cast<void *>(kReturnOnZeroMalloc);
- else
- return 0; // 0 bytes with large alignment requested. Just return 0.
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc would
+ // not return NULL even for zero-size allocations. Moreover, it looks like
+ // operator new should never return NULL, and results of consecutive "new"
+ // calls must be different even if the allocated size is zero.
+ size = 1;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
- uptr rounded_size = RoundUpTo(size, alignment);
- if (rounded_size < kChunkHeader2Size)
- rounded_size = kChunkHeader2Size;
+ uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
@@ -336,7 +344,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
return 0;
}
- AsanThread *t = asanThreadRegistry().GetCurrent();
+ AsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
@@ -358,7 +366,6 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- m->chunk_state = CHUNK_ALLOCATED;
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
@@ -384,7 +391,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg;
}
- if (flags()->use_stack_depot) {
+ if (fl.use_stack_depot) {
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->alloc_context_id = 0;
@@ -396,12 +403,12 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
- if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
+ if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = size & (SHADOW_GRANULARITY - 1);
}
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
@@ -411,25 +418,42 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
thread_stats.malloc_large++;
void *res = reinterpret_cast<void *>(user_beg);
+ if (can_fill && fl.max_malloc_fill_size) {
+ uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+ REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+ }
+ // Must be the last mutation of metadata in this function.
+ atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
-static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
- uptr p = reinterpret_cast<uptr>(ptr);
- if (p == 0 || p == kReturnOnZeroMalloc) return;
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-
+static void AtomicallySetQuarantineFlag(AsanChunk *m,
+ void *ptr, StackTrace *stack) {
+ u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
- u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
- memory_order_relaxed);
-
- if (old_chunk_state == CHUNK_QUARANTINE)
- ReportDoubleFree((uptr)ptr, stack);
- else if (old_chunk_state != CHUNK_ALLOCATED)
- ReportFreeNotMalloced((uptr)ptr, stack);
- CHECK(old_chunk_state == CHUNK_ALLOCATED);
+ if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
+ CHUNK_QUARANTINE, memory_order_acquire)) {
+ if (old_chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ }
+ CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+}
+
+// Expects the chunk to already be marked as quarantined by using
+// AtomicallySetQuarantineFlag.
+static void QuarantineChunk(AsanChunk *m, void *ptr,
+ StackTrace *stack, AllocType alloc_type) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+
+ // FIXME: if the free hook produces an ASan report (e.g. due to a bug),
+ // printing the report may crash as the AsanChunk free-related fields have not
+ // been updated yet. We might need to introduce yet another chunk state to
+ // handle this correctly, but don't want to yet.
+ ASAN_FREE_HOOK(ptr);
+
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
ReportAllocTypeMismatch((uptr)ptr, stack,
(AllocType)m->alloc_type, (AllocType)alloc_type);
@@ -437,7 +461,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
- AsanThread *t = asanThreadRegistry().GetCurrent();
+ AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
if (flags()->use_stack_depot) {
m->free_context_id = StackDepotPut(stack->trace, stack->size);
@@ -445,13 +469,12 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
m->free_context_id = 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
}
- CHECK(m->chunk_state == CHUNK_QUARANTINE);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
@@ -467,8 +490,17 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
}
+}
- ASAN_FREE_HOOK(ptr);
+static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0) return;
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ AtomicallySetQuarantineFlag(m, ptr, stack);
+ QuarantineChunk(m, ptr, stack, alloc_type);
}
static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
@@ -477,18 +509,21 @@ static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
+ AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
- CHECK(m->chunk_state == CHUNK_ALLOCATED);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ // This also ensures that other threads can't deallocate it in the meantime.
+ AtomicallySetQuarantineFlag(m, old_ptr, stack);
+
uptr old_size = m->UsedSize();
uptr memcpy_size = Min(new_size, old_size);
- void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
- CHECK(REAL(memcpy) != 0);
+ CHECK_NE(REAL(memcpy), (void*)0);
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack, FROM_MALLOC);
+ QuarantineChunk(m, old_ptr, stack, FROM_MALLOC);
}
return new_ptr;
}
@@ -548,7 +583,7 @@ AsanChunk *ChooseChunk(uptr addr,
return right_chunk;
}
// Same chunk_state: choose based on offset.
- uptr l_offset = 0, r_offset = 0;
+ sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
@@ -559,7 +594,7 @@ AsanChunk *ChooseChunk(uptr addr,
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
- uptr offset = 0;
+ sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
@@ -589,7 +624,7 @@ void PrintInternalAllocatorStats() {
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
- return Allocate(size, alignment, stack, alloc_type);
+ return Allocate(size, alignment, stack, alloc_type, true);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -599,19 +634,22 @@ void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_malloc(uptr size, StackTrace *stack) {
- return Allocate(size, 8, stack, FROM_MALLOC);
+ return Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
- void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
- if (ptr)
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
if (p == 0)
- return Allocate(size, 8, stack, FROM_MALLOC);
+ return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
Deallocate(p, stack, FROM_MALLOC);
return 0;
@@ -620,7 +658,7 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
}
void *asan_valloc(uptr size, StackTrace *stack) {
- return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
+ return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
@@ -630,12 +668,12 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
- return Allocate(size, PageSize, stack, FROM_MALLOC);
+ return Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
- void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
+ void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
@@ -651,20 +689,96 @@ uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
}
uptr asan_mz_size(const void *ptr) {
- UNIMPLEMENTED();
- return 0;
+ return AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
- UNIMPLEMENTED();
+ allocator.ForceLock();
+ fallback_mutex.Lock();
}
void asan_mz_force_unlock() {
- UNIMPLEMENTED();
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
}
} // namespace __asan
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+ __asan::allocator.ForceLock();
+}
+
+void UnlockAllocator() {
+ __asan::allocator.ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__asan::allocator;
+ *end = *begin + sizeof(__asan::allocator);
+}
+
+void *PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
+ if (!m) return 0;
+ uptr chunk = m->Beg();
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
+ return reinterpret_cast<void *>(chunk);
+ return 0;
+}
+
+void *GetUserBegin(void *p) {
+ __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(reinterpret_cast<uptr>(p));
+ CHECK(m);
+ return reinterpret_cast<void *>(m->Beg());
+}
+
+LsanMetadata::LsanMetadata(void *chunk) {
+ uptr addr = reinterpret_cast<uptr>(chunk);
+ metadata_ = reinterpret_cast<void *>(addr - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->UsedSize();
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->alloc_context_id;
+}
+
+template <typename Callable> void ForEachChunk(Callable const &callback) {
+ __asan::allocator.ForEachChunk(callback);
+}
+#if CAN_SANITIZE_LEAKS
+template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
+ ProcessPlatformSpecificAllocationsCb const &callback);
+template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
+template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
+template void ForEachChunk<MarkIndirectlyLeakedCb>(
+ MarkIndirectlyLeakedCb const &callback);
+template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback);
+#endif // CAN_SANITIZE_LEAKS
+} // namespace __lsan
+
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
@@ -676,7 +790,7 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
bool __asan_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
- return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0);
+ return (AllocationSize(ptr) > 0);
}
uptr __asan_get_allocated_size(const void *p) {
@@ -684,7 +798,7 @@ uptr __asan_get_allocated_size(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
- if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) {
+ if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
}
@@ -705,6 +819,3 @@ void __asan_free_hook(void *ptr) {
}
} // extern "C"
#endif
-
-
-#endif // ASAN_ALLOCATOR_VERSION