summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_primary32.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_primary32.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h21
1 files changed, 15 insertions, 6 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
index e13510ba33b91..d3949cc057345 100644
--- a/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -24,7 +24,8 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
// be returned by MmapOrDie().
//
// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
+// kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
@@ -149,8 +150,9 @@ class SizeClassAllocator32 {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
+ if (sci->free_list.empty() &&
+ UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
+ return nullptr;
CHECK(!sci->free_list.empty());
TransferBatch *b = sci->free_list.front();
sci->free_list.pop_front();
@@ -277,8 +279,10 @@ class SizeClassAllocator32 {
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
+ kRegionSize, kRegionSize, "SizeClassAllocator32"));
+ if (UNLIKELY(!res))
+ return 0;
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
@@ -291,16 +295,20 @@ class SizeClassAllocator32 {
return &size_class_info_array[class_id];
}
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
+ if (UNLIKELY(!reg))
+ return false;
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = TransferBatch::MaxCached(class_id);
TransferBatch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) {
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ if (!b)
+ return false;
b->Clear();
}
b->Add((void*)i);
@@ -314,6 +322,7 @@ class SizeClassAllocator32 {
CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b);
}
+ return true;
}
ByteMap possible_regions;