diff options
Diffstat (limited to 'lib/sanitizer_common')
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator.cc | 8 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator.h | 10 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator_local_cache.h | 14 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator_primary32.h | 7 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator_primary64.h | 191 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_common.h | 3 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_common_interceptors.inc | 73 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_platform_interceptors.h | 1 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_posix.cc | 34 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_win.cc | 12 | ||||
-rw-r--r-- | lib/sanitizer_common/tests/sanitizer_allocator_test.cc | 34 |
11 files changed, 256 insertions, 131 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index db3ebb0336a9f..2f8f6e3f9aa72 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -160,7 +160,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { - if (CallocShouldReturnNullDueToOverflow(count, size)) + if (CheckForCallocOverflow(count, size)) return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); @@ -202,7 +202,7 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } -bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { +bool CheckForCallocOverflow(uptr size, uptr n) { if (!size) return false; uptr max = (uptr)-1L; return (max / size) < n; @@ -246,11 +246,11 @@ void *ReturnNullOrDieOnFailure::OnOOM() { ReportAllocatorCannotReturnNull(); } -void *DieOnFailure::OnBadRequest() { +void NORETURN *DieOnFailure::OnBadRequest() { ReportAllocatorCannotReturnNull(); } -void *DieOnFailure::OnOOM() { +void NORETURN *DieOnFailure::OnOOM() { atomic_store_relaxed(&allocator_out_of_memory, 1); ReportAllocatorCannotReturnNull(); } diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index f59c13d1c5a56..0fb8a087ed6be 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -39,8 +39,8 @@ struct ReturnNullOrDieOnFailure { }; // Always dies on the failure. struct DieOnFailure { - static void *OnBadRequest(); - static void *OnOOM(); + static void NORETURN *OnBadRequest(); + static void NORETURN *OnOOM(); }; // Returns true if allocator detected OOM condition. Can be used to avoid memory @@ -56,8 +56,10 @@ struct NoOpMapUnmapCallback { // Callback type for iterating over chunks. typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); -// Returns true if calloc(size, n) should return 0 due to overflow in size*n. -bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); +// Returns true if calloc(size, n) call overflows on size*n calculation. +// The caller should "return POLICY::OnBadRequest();" where POLICY is the +// current allocator failure handling policy. +bool CheckForCallocOverflow(uptr size, uptr n); #include "sanitizer_allocator_size_class_map.h" #include "sanitizer_allocator_stats.h" diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h index 8fa62a3bf8292..ec0742c20be2e 100644 --- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -46,8 +46,10 @@ struct SizeClassAllocator64LocalCache { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; - if (UNLIKELY(c->count == 0)) - Refill(c, allocator, class_id); + if (UNLIKELY(c->count == 0)) { + if (UNLIKELY(!Refill(c, allocator, class_id))) + return nullptr; + } stats_.Add(AllocatorStatAllocated, c->class_size); CHECK_GT(c->count, 0); CompactPtrT chunk = c->chunks[--c->count]; @@ -101,13 +103,15 @@ struct SizeClassAllocator64LocalCache { } } - NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator, + NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, uptr class_id) { InitCache(); uptr num_requested_chunks = c->max_count / 2; - allocator->GetFromAllocator(&stats_, class_id, c->chunks, - num_requested_chunks); + if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, + num_requested_chunks))) + return false; c->count = num_requested_chunks; + return true; } NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h index d3949cc057345..e85821543d271 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -118,7 +118,6 @@ class SizeClassAllocator32 { } void *MapWithCallback(uptr size) { - size = RoundUpTo(size, GetPageSizeCached()); void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); return res; @@ -285,7 +284,7 @@ class SizeClassAllocator32 { return 0; MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); - CHECK_EQ(0U, (res & (kRegionSize - 1))); + CHECK(IsAligned(res, kRegionSize)); possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); return res; } @@ -303,17 +302,17 @@ class SizeClassAllocator32 { return false; uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); + CHECK_GT(max_count, 0); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); - if (!b) + if (UNLIKELY(!b)) return false; b->Clear(); } b->Add((void*)i); if (b->Count() == max_count) { - CHECK_GT(b->Count(), 0); sci->free_list.push_back(b); b = nullptr; } diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h index 035d92b98cfca..0c2e72ce7eb18 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -80,7 +80,7 @@ class SizeClassAllocator64 { CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } SetReleaseToOSIntervalMs(release_to_os_interval_ms); - MapWithCallback(SpaceEnd(), AdditionalSize()); + MapWithCallbackOrDie(SpaceEnd(), AdditionalSize()); } s32 ReleaseToOSIntervalMs() const { @@ -92,16 +92,6 @@ class SizeClassAllocator64 { memory_order_relaxed); } - void MapWithCallback(uptr beg, uptr size) { - CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); - MapUnmapCallback().OnMap(beg, size); - } - - void UnmapWithCallback(uptr beg, uptr size) { - MapUnmapCallback().OnUnmap(beg, size); - UnmapOrDie(reinterpret_cast<void *>(beg), size); - } - static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; @@ -116,16 +106,20 @@ class SizeClassAllocator64 { BlockingMutexLock l(®ion->mutex); uptr old_num_chunks = region->num_freed_chunks; uptr new_num_freed_chunks = old_num_chunks + n_chunks; - EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks); + // Failure to allocate free array space while releasing memory is non + // recoverable. + if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, + new_num_freed_chunks))) + DieOnFailure::OnOOM(); for (uptr i = 0; i < n_chunks; i++) free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; - region->n_freed += n_chunks; + region->stats.n_freed += n_chunks; MaybeReleaseToOS(class_id); } - NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id, + NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, CompactPtrT *chunks, uptr n_chunks) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); @@ -133,18 +127,19 @@ class SizeClassAllocator64 { BlockingMutexLock l(®ion->mutex); if (UNLIKELY(region->num_freed_chunks < n_chunks)) { - PopulateFreeArray(stat, class_id, region, - n_chunks - region->num_freed_chunks); + if (UNLIKELY(!PopulateFreeArray(stat, class_id, region, + n_chunks - region->num_freed_chunks))) + return false; CHECK_GE(region->num_freed_chunks, n_chunks); } region->num_freed_chunks -= n_chunks; uptr base_idx = region->num_freed_chunks; for (uptr i = 0; i < n_chunks; i++) chunks[i] = free_array[base_idx + i]; - region->n_allocated += n_chunks; + region->stats.n_allocated += n_chunks; + return true; } - bool PointerIsMine(const void *p) { uptr P = reinterpret_cast<uptr>(p); if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) @@ -211,7 +206,7 @@ class SizeClassAllocator64 { // Test-only. void TestOnlyUnmap() { - UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize()); + UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize()); } static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, @@ -224,15 +219,15 @@ class SizeClassAllocator64 { void PrintStats(uptr class_id, uptr rss) { RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user == 0) return; - uptr in_use = region->n_allocated - region->n_freed; + uptr in_use = region->stats.n_allocated - region->stats.n_freed; uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); Printf( - " %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " + "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd\n", - class_id, ClassIdToSize(class_id), region->mapped_user >> 10, - region->n_allocated, region->n_freed, in_use, - region->num_freed_chunks, avail_chunks, rss >> 10, - region->rtoi.num_releases); + region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), + region->mapped_user >> 10, region->stats.n_allocated, + region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, + rss >> 10, region->rtoi.num_releases); } void PrintStats() { @@ -242,8 +237,8 @@ class SizeClassAllocator64 { for (uptr class_id = 1; class_id < kNumClasses; class_id++) { RegionInfo *region = GetRegionInfo(class_id); total_mapped += region->mapped_user; - n_allocated += region->n_allocated; - n_freed += region->n_freed; + n_allocated += region->stats.n_allocated; + n_freed += region->stats.n_freed; } Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; " "remains %zd\n", @@ -326,6 +321,11 @@ class SizeClassAllocator64 { atomic_sint32_t release_to_os_interval_ms_; + struct Stats { + uptr n_allocated; + uptr n_freed; + }; + struct ReleaseToOsInfo { uptr n_freed_at_last_release; uptr num_releases; @@ -340,8 +340,9 @@ class SizeClassAllocator64 { uptr allocated_meta; // Bytes allocated for metadata. uptr mapped_user; // Bytes mapped for user memory. uptr mapped_meta; // Bytes mapped for metadata. - u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. - uptr n_allocated, n_freed; // Just stats. + u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. + bool exhausted; // Whether region is out of space for new chunks. + Stats stats; ReleaseToOsInfo rtoi; }; COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); @@ -386,7 +387,26 @@ class SizeClassAllocator64 { kFreeArraySize); } - void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, + bool MapWithCallback(uptr beg, uptr size) { + uptr mapped = reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(beg, size)); + if (UNLIKELY(!mapped)) + return false; + CHECK_EQ(beg, mapped); + MapUnmapCallback().OnMap(beg, size); + return true; + } + + void MapWithCallbackOrDie(uptr beg, uptr size) { + CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); + MapUnmapCallback().OnMap(beg, size); + } + + void UnmapWithCallbackOrDie(uptr beg, uptr size) { + MapUnmapCallback().OnUnmap(beg, size); + UnmapOrDie(reinterpret_cast<void *>(beg), size); + } + + bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, uptr num_freed_chunks) { uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); if (region->mapped_free_array < needed_space) { @@ -395,66 +415,87 @@ class SizeClassAllocator64 { uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + region->mapped_free_array; uptr new_map_size = new_mapped_free_array - region->mapped_free_array; - MapWithCallback(current_map_end, new_map_size); + if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size))) + return false; region->mapped_free_array = new_mapped_free_array; } + return true; } - - NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id, + NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, RegionInfo *region, uptr requested_count) { // region->mutex is held. - uptr size = ClassIdToSize(class_id); - uptr beg_idx = region->allocated_user; - uptr end_idx = beg_idx + requested_count * size; - uptr region_beg = GetRegionBeginBySizeClass(class_id); - if (end_idx > region->mapped_user) { + const uptr size = ClassIdToSize(class_id); + const uptr new_space_beg = region->allocated_user; + const uptr new_space_end = new_space_beg + requested_count * size; + const uptr region_beg = GetRegionBeginBySizeClass(class_id); + + // Map more space for chunks, if necessary. + if (new_space_end > region->mapped_user) { if (!kUsingConstantSpaceBeg && region->mapped_user == 0) region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR. // Do the mmap for the user memory. uptr map_size = kUserMapSize; - while (end_idx > region->mapped_user + map_size) + while (new_space_end > region->mapped_user + map_size) map_size += kUserMapSize; - CHECK_GE(region->mapped_user + map_size, end_idx); - MapWithCallback(region_beg + region->mapped_user, map_size); + CHECK_GE(region->mapped_user + map_size, new_space_end); + if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user, + map_size))) + return false; stat->Add(AllocatorStatMapped, map_size); region->mapped_user += map_size; } - CompactPtrT *free_array = GetFreeArray(region_beg); - uptr total_count = (region->mapped_user - beg_idx) / size; - uptr num_freed_chunks = region->num_freed_chunks; - EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count); - for (uptr i = 0; i < total_count; i++) { - uptr chunk = beg_idx + i * size; - free_array[num_freed_chunks + total_count - 1 - i] = - PointerToCompactPtr(0, chunk); + const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size; + + // Calculate the required space for metadata. + const uptr requested_allocated_meta = + region->allocated_meta + new_chunks_count * kMetadataSize; + uptr requested_mapped_meta = region->mapped_meta; + while (requested_allocated_meta > requested_mapped_meta) + requested_mapped_meta += kMetaMapSize; + // Check whether this size class is exhausted. + if (region->mapped_user + requested_mapped_meta > + kRegionSize - kFreeArraySize) { + if (!region->exhausted) { + region->exhausted = true; + Printf("%s: Out of memory. ", SanitizerToolName); + Printf("The process has exhausted %zuMB for size class %zu.\n", + kRegionSize >> 20, size); + } + return false; + } + // Map more space for metadata, if necessary. + if (requested_mapped_meta > region->mapped_meta) { + if (UNLIKELY(!MapWithCallback( + GetMetadataEnd(region_beg) - requested_mapped_meta, + requested_mapped_meta - region->mapped_meta))) + return false; + region->mapped_meta = requested_mapped_meta; } + + // If necessary, allocate more space for the free array and populate it with + // newly allocated chunks. + const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; + if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))) + return false; + CompactPtrT *free_array = GetFreeArray(region_beg); + for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count; + i++, chunk += size) + free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); if (kRandomShuffleChunks) - RandomShuffle(&free_array[num_freed_chunks], total_count, + RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, ®ion->rand_state); - region->num_freed_chunks += total_count; - region->allocated_user += total_count * size; - CHECK_LE(region->allocated_user, region->mapped_user); - region->allocated_meta += total_count * kMetadataSize; - if (region->allocated_meta > region->mapped_meta) { - uptr map_size = kMetaMapSize; - while (region->allocated_meta > region->mapped_meta + map_size) - map_size += kMetaMapSize; - // Do the mmap for the metadata. - CHECK_GE(region->mapped_meta + map_size, region->allocated_meta); - MapWithCallback(GetMetadataEnd(region_beg) - - region->mapped_meta - map_size, map_size); - region->mapped_meta += map_size; - } + // All necessary memory is mapped and now it is safe to advance all + // 'allocated_*' counters. + region->num_freed_chunks += new_chunks_count; + region->allocated_user += new_chunks_count * size; + CHECK_LE(region->allocated_user, region->mapped_user); + region->allocated_meta = requested_allocated_meta; CHECK_LE(region->allocated_meta, region->mapped_meta); - if (region->mapped_user + region->mapped_meta > - kRegionSize - kFreeArraySize) { - Printf("%s: Out of memory. Dying. ", SanitizerToolName); - Printf("The process has exhausted %zuMB for size class %zu.\n", - kRegionSize / 1024 / 1024, size); - Die(); - } + region->exhausted = false; + + return true; } void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size, @@ -478,8 +519,8 @@ class SizeClassAllocator64 { uptr n = region->num_freed_chunks; if (n * chunk_size < page_size) return; // No chance to release anything. - if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size < - page_size) { + if ((region->stats.n_freed - + region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { return; // Nothing new to release. } @@ -508,7 +549,7 @@ class SizeClassAllocator64 { CHECK_GT(chunk - prev, scaled_chunk_size); if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) { MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev); - region->rtoi.n_freed_at_last_release = region->n_freed; + region->rtoi.n_freed_at_last_release = region->stats.n_freed; region->rtoi.num_releases++; } range_beg = chunk; @@ -517,5 +558,3 @@ class SizeClassAllocator64 { } } }; - - diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h index 560c53b6400e5..d44c715138968 100644 --- a/lib/sanitizer_common/sanitizer_common.h +++ b/lib/sanitizer_common/sanitizer_common.h @@ -92,6 +92,9 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); +// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in +// that case returns nullptr. +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size); void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc index 6ca431d8ad822..459530aa95bac 100644 --- a/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -224,16 +224,16 @@ bool PlatformHasDifferentMemcpyAndMemmove(); #endif #ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL -#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \ - COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \ - uptr copy_length = internal_strnlen(s, size); \ - char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \ - if (common_flags()->intercept_strndup) { \ - COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \ - } \ - COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \ - internal_memcpy(new_mem, s, copy_length); \ - new_mem[copy_length] = '\0'; \ +#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \ + COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \ + uptr copy_length = internal_strnlen(s, size); \ + char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \ + if (common_flags()->intercept_strndup) { \ + COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \ + } \ + COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \ + internal_memcpy(new_mem, s, copy_length); \ + new_mem[copy_length] = '\0'; \ return new_mem; #endif @@ -6199,6 +6199,57 @@ INTERCEPTOR(int, mprobe, void *ptr) { } #endif +INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s); + SIZE_T res = REAL(wcslen)(s); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1)); + return res; +} + +INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n); + SIZE_T res = REAL(wcsnlen)(s, n); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n)); + return res; +} +#define INIT_WCSLEN \ + COMMON_INTERCEPT_FUNCTION(wcslen); \ + COMMON_INTERCEPT_FUNCTION(wcsnlen); + +#if SANITIZER_INTERCEPT_WCSCAT +INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src); + SIZE_T src_size = REAL(wcslen)(src); + SIZE_T dst_size = REAL(wcslen)(dst); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, + (src_size + 1) * sizeof(wchar_t)); + return REAL(wcscat)(dst, src); // NOLINT +} + +INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n); + SIZE_T src_size = REAL(wcsnlen)(src, n); + SIZE_T dst_size = REAL(wcslen)(dst); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, + Min(src_size + 1, n) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, + (src_size + 1) * sizeof(wchar_t)); + return REAL(wcsncat)(dst, src, n); // NOLINT +} +#define INIT_WCSCAT \ + COMMON_INTERCEPT_FUNCTION(wcscat); \ + COMMON_INTERCEPT_FUNCTION(wcsncat); +#else +#define INIT_WCSCAT +#endif + static void InitializeCommonInterceptors() { static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1]; interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap(); @@ -6403,4 +6454,6 @@ static void InitializeCommonInterceptors() { INIT_UTMP; INIT_UTMPX; INIT_GETLOADAVG; + INIT_WCSLEN; + INIT_WCSCAT; } diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h index a95497467d61b..1bc43e817230b 100644 --- a/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -354,5 +354,6 @@ #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_WCSCAT SI_NOT_WINDOWS #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc index 87c5b9add5cf7..63f1bf713b245 100644 --- a/lib/sanitizer_common/sanitizer_posix.cc +++ b/lib/sanitizer_common/sanitizer_posix.cc @@ -129,7 +129,7 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; - if (internal_iserror(res, &reserrno)) + if (UNLIKELY(internal_iserror(res, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report); IncreaseTotalMmap(size); return (void *)res; @@ -138,7 +138,7 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { void UnmapOrDie(void *addr, uptr size) { if (!addr || !size) return; uptr res = internal_munmap(addr, size); - if (internal_iserror(res)) { + if (UNLIKELY(internal_iserror(res))) { Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", SanitizerToolName, size, size, addr); CHECK("unable to unmap" && 0); @@ -152,7 +152,7 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; - if (internal_iserror(res, &reserrno)) { + if (UNLIKELY(internal_iserror(res, &reserrno))) { if (reserrno == ENOMEM) return nullptr; ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); @@ -170,15 +170,15 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type); - if (!map_res) + if (UNLIKELY(!map_res)) return nullptr; uptr map_end = map_res + map_size; uptr res = map_res; - if (res & (alignment - 1)) // Not aligned. - res = (map_res + alignment) & ~(alignment - 1); - uptr end = res + size; - if (res != map_res) + if (!IsAligned(res, alignment)) { + res = (map_res + alignment - 1) & ~(alignment - 1); UnmapOrDie((void*)map_res, res - map_res); + } + uptr end = res + size; if (end != map_end) UnmapOrDie((void*)end, map_end - end); return (void*)res; @@ -192,13 +192,13 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) { MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, -1, 0); int reserrno; - if (internal_iserror(p, &reserrno)) + if (UNLIKELY(internal_iserror(p, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno); IncreaseTotalMmap(size); return (void *)p; } -void *MmapFixedOrDie(uptr fixed_addr, uptr size) { +void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), RoundUpTo(size, PageSize), @@ -206,8 +206,10 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0); int reserrno; - if (internal_iserror(p, &reserrno)) { - char mem_type[30]; + if (UNLIKELY(internal_iserror(p, &reserrno))) { + if (tolerate_enomem && reserrno == ENOMEM) + return nullptr; + char mem_type[40]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); @@ -216,6 +218,14 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return (void *)p; } +void *MmapFixedOrDie(uptr fixed_addr, uptr size) { + return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/); +} + +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { + return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/); +} + bool MprotectNoAccess(uptr addr, uptr size) { return 0 == internal_mprotect((void*)addr, size, PROT_NONE); } diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc index c6a146553412c..89d9cf61c3e49 100644 --- a/lib/sanitizer_common/sanitizer_win.cc +++ b/lib/sanitizer_common/sanitizer_win.cc @@ -235,6 +235,18 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return p; } +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { + void *p = VirtualAlloc((LPVOID)fixed_addr, size, + MEM_COMMIT, PAGE_READWRITE); + if (p == 0) { + char mem_type[30]; + internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", + fixed_addr); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); + } + return p; +} + void *MmapNoReserveOrDie(uptr size, const char *mem_type) { // FIXME: make this really NoReserve? return MmapOrDie(size, mem_type); diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index f256d8776d80d..0def8ee0fd70d 100644 --- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -436,30 +436,31 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); } -template<class Allocator> -void FailInAssertionOnOOM() { - Allocator a; +// Don't test OOM conditions on Win64 because it causes other tests on the same +// machine to OOM. +#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64Overflow) { + Allocator64 a; a.Init(kReleaseToOSIntervalNever); - SizeClassAllocatorLocalCache<Allocator> cache; + SizeClassAllocatorLocalCache<Allocator64> cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); AllocatorStats stats; stats.Init(); + const size_t kNumChunks = 128; uint32_t chunks[kNumChunks]; + bool allocation_failed = false; for (int i = 0; i < 1000000; i++) { - a.GetFromAllocator(&stats, 52, chunks, kNumChunks); + if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) { + allocation_failed = true; + break; + } } + EXPECT_EQ(allocation_failed, true); a.TestOnlyUnmap(); } - -// Don't test OOM conditions on Win64 because it causes other tests on the same -// machine to OOM. -#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID -TEST(SanitizerCommon, SizeClassAllocator64Overflow) { - EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory"); -} #endif TEST(SanitizerCommon, LargeMmapAllocator) { @@ -970,9 +971,9 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID); ASSERT_LT(2 * kAllocationSize, kRegionSize); ASSERT_GT(3 * kAllocationSize, kRegionSize); - cache.Allocate(a, kClassID); - EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID), - "The process has exhausted"); + EXPECT_NE(cache.Allocate(a, kClassID), nullptr); + EXPECT_NE(cache.Allocate(a, kClassID), nullptr); + EXPECT_EQ(cache.Allocate(a, kClassID), nullptr); const uptr Class2 = 100; const uptr Size2 = SpecialSizeClassMap::Size(Class2); @@ -980,11 +981,12 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { char *p[7]; for (int i = 0; i < 7; i++) { p[i] = (char*)cache.Allocate(a, Class2); + EXPECT_NE(p[i], nullptr); fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2); p[i][Size2 - 1] = 42; if (i) ASSERT_LT(p[i - 1], p[i]); } - EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted"); + EXPECT_EQ(cache.Allocate(a, Class2), nullptr); cache.Deallocate(a, Class2, p[0]); cache.Drain(a); ASSERT_EQ(p[6][Size2 - 1], 42); |