summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_secondary.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_secondary.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h35
1 files changed, 9 insertions, 26 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 2c69f47ec4e68..261dfb5e1a285 100644
--- a/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -17,17 +17,19 @@
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class FailureHandlerT = ReturnNullOrDieOnFailure>
class LargeMmapAllocator {
public:
- void InitLinkerInitialized(bool may_return_null) {
+ typedef FailureHandlerT FailureHandler;
+
+ void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void Init(bool may_return_null) {
+ void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
+ InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -37,11 +39,11 @@ class LargeMmapAllocator {
map_size += alignment;
// Overflow.
if (map_size < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
- return ReturnNullOrDieOnOOM();
+ return FailureHandler::OnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
@@ -75,24 +77,6 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -278,7 +262,6 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};