diff options
Diffstat (limited to 'lib/sanitizer_common')
19 files changed, 298 insertions, 145 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index d47b5b41413cc..db3ebb0336a9f 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() { SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { - internal_allocator_instance->Init( - /* may_return_null */ false, kReleaseToOSIntervalNever); + internal_allocator_instance->Init(kReleaseToOSIntervalNever); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } @@ -108,9 +107,9 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, if (cache == 0) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Allocate(&internal_allocator_cache, size, - alignment, false); + alignment); } - return internal_allocator()->Allocate(cache, size, alignment, false); + return internal_allocator()->Allocate(cache, size, alignment); } static void *RawInternalRealloc(void *ptr, uptr size, @@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { if (CallocShouldReturnNullDueToOverflow(count, size)) - return internal_allocator()->ReturnNullOrDieOnBadRequest(); + return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); return p; @@ -209,12 +208,15 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { return (max / size) < n; } -static atomic_uint8_t reporting_out_of_memory = {0}; +static atomic_uint8_t allocator_out_of_memory = {0}; +static atomic_uint8_t allocator_may_return_null = {0}; -bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); } +bool IsAllocatorOutOfMemory() { + return atomic_load_relaxed(&allocator_out_of_memory); +} -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) { - if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1); +// Prints error message and kills the program. +void NORETURN ReportAllocatorCannotReturnNull() { Report("%s's allocator is terminating the process instead of returning 0\n", SanitizerToolName); Report("If you don't like this behavior set allocator_may_return_null=1\n"); @@ -222,4 +224,35 @@ void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) { Die(); } +bool AllocatorMayReturnNull() { + return atomic_load(&allocator_may_return_null, memory_order_relaxed); +} + +void SetAllocatorMayReturnNull(bool may_return_null) { + atomic_store(&allocator_may_return_null, may_return_null, + memory_order_relaxed); +} + +void *ReturnNullOrDieOnFailure::OnBadRequest() { + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *ReturnNullOrDieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnBadRequest() { + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + ReportAllocatorCannotReturnNull(); +} + } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index 9a37a2f2145ff..f59c13d1c5a56 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -24,12 +24,28 @@ namespace __sanitizer { -// Returns true if ReportAllocatorCannotReturnNull(true) was called. -// Can be use to avoid memory hungry operations. -bool IsReportingOOM(); +// Since flags are immutable and allocator behavior can be changed at runtime +// (unit tests or ASan on Android are some examples), allocator_may_return_null +// flag value is cached here and can be altered later. +bool AllocatorMayReturnNull(); +void SetAllocatorMayReturnNull(bool may_return_null); -// Prints error message and kills the program. -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory); +// Allocator failure handling policies: +// Implements AllocatorMayReturnNull policy, returns null when the flag is set, +// dies otherwise. +struct ReturnNullOrDieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; +// Always dies on the failure. +struct DieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; + +// Returns true if allocator detected OOM condition. Can be used to avoid memory +// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. +bool IsAllocatorOutOfMemory(); // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h index 2c2390b3d2c6e..efd25cadfe74a 100644 --- a/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,31 +24,26 @@ template <class PrimaryAllocator, class AllocatorCache, class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) { - primary_.Init(release_to_os_interval_ms); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); - } + typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized( - bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.InitLinkerInitialized(may_return_null); + void InitLinkerInitialized(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.InitLinkerInitialized(); stats_.InitLinkerInitialized(); - InitCommon(may_return_null, release_to_os_interval_ms); } - void Init(bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.Init(may_return_null); + void Init(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.Init(); stats_.Init(); - InitCommon(may_return_null, release_to_os_interval_ms); } - void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, - bool cleared = false) { + void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; if (size + alignment < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -56,49 +51,24 @@ class CombinedAllocator { // alignment check. if (alignment > 8) size = RoundUpTo(size, alignment); - void *res; - bool from_primary = primary_.CanAllocate(size, alignment); // The primary allocator should return a 2^x aligned allocation when // requested 2^x bytes, hence using the rounded up 'size' when being // serviced by the primary (this is no longer true when the primary is // using a non-fixed base address). The secondary takes care of the // alignment without such requirement, and allocating 'size' would use // extraneous memory, so we employ 'original_size'. - if (from_primary) + void *res; + if (primary_.CanAllocate(size, alignment)) res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); + if (!res) + return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); - // When serviced by the secondary, the chunk comes from a mmap allocation - // and will be zero'd out anyway. We only need to clear our the chunk if - // it was serviced by the primary, hence using the rounded up 'size'. - if (cleared && res && from_primary) - internal_bzero_aligned16(res, RoundUpTo(size, 16)); return res; } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - secondary_.SetMayReturnNull(may_return_null); - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - s32 ReleaseToOSIntervalMs() const { return primary_.ReleaseToOSIntervalMs(); } @@ -219,6 +189,5 @@ class CombinedAllocator { PrimaryAllocator primary_; SecondaryAllocator secondary_; AllocatorGlobalStats stats_; - atomic_uint8_t may_return_null_; }; diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h index d1890f20f8104..a791d0d948946 100644 --- a/lib/sanitizer_common/sanitizer_allocator_internal.h +++ b/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> InternalAllocatorCache; typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, - LargeMmapAllocator<> > InternalAllocator; + LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> + > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h index b3729bf55dbb8..8fa62a3bf8292 100644 --- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; - if (UNLIKELY(c->count == 0)) - Refill(allocator, class_id); + if (UNLIKELY(c->count == 0)) { + if (UNLIKELY(!Refill(allocator, class_id))) + return nullptr; + } stats_.Add(AllocatorStatAllocated, c->class_size); void *res = c->batch[--c->count]; PREFETCH(c->batch[c->count - 1]); @@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache { Deallocate(allocator, batch_class_id, b); } - NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { + NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); + if (UNLIKELY(!b)) + return false; CHECK_GT(b->Count(), 0); b->CopyToArray(c->batch); c->count = b->Count(); DestroyBatch(class_id, allocator, b); + return true; } NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { @@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache { uptr first_idx_to_drain = c->count - cnt; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); + // Failure to allocate a batch while releasing memory is non recoverable. + // TODO(alekseys): Figure out how to do it without allocating a new batch. + if (UNLIKELY(!b)) + DieOnFailure::OnOOM(); b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), &c->batch[first_idx_to_drain], cnt); c->count -= cnt; diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h index e13510ba33b91..d3949cc057345 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -24,7 +24,8 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache; // be returned by MmapOrDie(). // // Region: -// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). +// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize, +// kRegionSize). // Since the regions are aligned by kRegionSize, there are exactly // kNumPossibleRegions possible regions in the address space and so we keep // a ByteMap possible_regions to store the size classes of each Region. @@ -149,8 +150,9 @@ class SizeClassAllocator32 { CHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); - if (sci->free_list.empty()) - PopulateFreeList(stat, c, sci, class_id); + if (sci->free_list.empty() && + UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) + return nullptr; CHECK(!sci->free_list.empty()); TransferBatch *b = sci->free_list.front(); sci->free_list.pop_front(); @@ -277,8 +279,10 @@ class SizeClassAllocator32 { uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { CHECK_LT(class_id, kNumClasses); - uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize, - "SizeClassAllocator32")); + uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( + kRegionSize, kRegionSize, "SizeClassAllocator32")); + if (UNLIKELY(!res)) + return 0; MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); CHECK_EQ(0U, (res & (kRegionSize - 1))); @@ -291,16 +295,20 @@ class SizeClassAllocator32 { return &size_class_info_array[class_id]; } - void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, + bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { uptr size = ClassIdToSize(class_id); uptr reg = AllocateRegion(stat, class_id); + if (UNLIKELY(!reg)) + return false; uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); + if (!b) + return false; b->Clear(); } b->Add((void*)i); @@ -314,6 +322,7 @@ class SizeClassAllocator32 { CHECK_GT(b->Count(), 0); sci->free_list.push_back(b); } + return true; } ByteMap possible_regions; diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h index 2c69f47ec4e68..261dfb5e1a285 100644 --- a/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -17,17 +17,19 @@ // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template <class MapUnmapCallback = NoOpMapUnmapCallback> +template <class MapUnmapCallback = NoOpMapUnmapCallback, + class FailureHandlerT = ReturnNullOrDieOnFailure> class LargeMmapAllocator { public: - void InitLinkerInitialized(bool may_return_null) { + typedef FailureHandlerT FailureHandler; + + void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } - void Init(bool may_return_null) { + void Init() { internal_memset(this, 0, sizeof(*this)); - InitLinkerInitialized(may_return_null); + InitLinkerInitialized(); } void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { @@ -37,11 +39,11 @@ class LargeMmapAllocator { map_size += alignment; // Overflow. if (map_size < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr map_beg = reinterpret_cast<uptr>( MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); if (!map_beg) - return ReturnNullOrDieOnOOM(); + return FailureHandler::OnOOM(); CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; @@ -75,24 +77,6 @@ class LargeMmapAllocator { return reinterpret_cast<void*>(res); } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { @@ -278,7 +262,6 @@ class LargeMmapAllocator { struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; - atomic_uint8_t may_return_null_; SpinMutex mutex_; }; diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h index 38363e8755606..65b3a38f0d515 100644 --- a/lib/sanitizer_common/sanitizer_atomic_clang.h +++ b/lib/sanitizer_common/sanitizer_atomic_clang.h @@ -71,16 +71,25 @@ INLINE typename T::Type atomic_exchange(volatile T *a, return v; } -template<typename T> -INLINE bool atomic_compare_exchange_strong(volatile T *a, - typename T::Type *cmp, +template <typename T> +INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { typedef typename T::Type Type; Type cmpv = *cmp; - Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); - if (prev == cmpv) - return true; + Type prev; +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + if (sizeof(*a) == 8) { + Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use); + prev = __mips_sync_val_compare_and_swap<u64>( + reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg); + } else { + prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); + } +#else + prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); +#endif + if (prev == cmpv) return true; *cmp = prev; return false; } diff --git a/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/lib/sanitizer_common/sanitizer_atomic_clang_other.h index 099b9f7ec8c69..d2acc311bf7da 100644 --- a/lib/sanitizer_common/sanitizer_atomic_clang_other.h +++ b/lib/sanitizer_common/sanitizer_atomic_clang_other.h @@ -17,6 +17,56 @@ namespace __sanitizer { +// MIPS32 does not support atomic > 4 bytes. To address this lack of +// functionality, the sanitizer library provides helper methods which use an +// internal spin lock mechanism to emulate atomic oprations when the size is +// 8 bytes. +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 +static void __spin_lock(volatile int *lock) { + while (__sync_lock_test_and_set(lock, 1)) + while (*lock) { + } +} + +static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); } + + +// Make sure the lock is on its own cache line to prevent false sharing. +// Put it inside a struct that is aligned and padded to the typical MIPS +// cacheline which is 32 bytes. +static struct { + int lock; + char pad[32 - sizeof(int)]; +} __attribute__((aligned(32))) lock = {0}; + +template <class T> +T __mips_sync_fetch_and_add(volatile T *ptr, T val) { + T ret; + + __spin_lock(&lock.lock); + + ret = *ptr; + *ptr = ret + val; + + __spin_unlock(&lock.lock); + + return ret; +} + +template <class T> +T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) { + T ret; + __spin_lock(&lock.lock); + + ret = *ptr; + if (ret == oldval) *ptr = newval; + + __spin_unlock(&lock.lock); + + return ret; +} +#endif + INLINE void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); } @@ -53,8 +103,15 @@ INLINE typename T::Type atomic_load( // 64-bit load on 32-bit platform. // Gross, but simple and reliable. // Assume that it is not in read-only memory. +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + typename T::Type volatile *val_ptr = + const_cast<typename T::Type volatile *>(&a->val_dont_use); + v = __mips_sync_fetch_and_add<u64>( + reinterpret_cast<u64 volatile *>(val_ptr), 0); +#else v = __sync_fetch_and_add( const_cast<typename T::Type volatile *>(&a->val_dont_use), 0); +#endif } return v; } @@ -84,7 +141,14 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { typename T::Type cmp = a->val_dont_use; typename T::Type cur; for (;;) { +#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32 + typename T::Type volatile *val_ptr = + const_cast<typename T::Type volatile *>(&a->val_dont_use); + cur = __mips_sync_val_compare_and_swap<u64>( + reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v); +#else cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v); +#endif if (cmp == v) break; cmp = cur; diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h index 875a46009f494..560c53b6400e5 100644 --- a/lib/sanitizer_common/sanitizer_common.h +++ b/lib/sanitizer_common/sanitizer_common.h @@ -95,7 +95,9 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size); void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); +// Dies on all but out of memory errors, in the latter case returns nullptr. +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type); // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an // unaccessible memory. bool MprotectNoAccess(uptr addr, uptr size); @@ -808,8 +810,11 @@ INLINE void LogMessageOnPrintf(const char *str) {} #if SANITIZER_LINUX // Initialize Android logging. Any writes before this are silently lost. void AndroidLogInit(); +void SetAbortMessage(const char *); #else INLINE void AndroidLogInit() {} +// FIXME: MacOS implementation could use CRSetCrashLogMessage. +INLINE void SetAbortMessage(const char *) {} #endif #if SANITIZER_ANDROID @@ -919,6 +924,10 @@ const s32 kReleaseToOSIntervalNever = -1; void CheckNoDeepBind(const char *filename, int flag); +// Returns the requested amount of random data (up to 256 bytes) that can then +// be used to seed a PRNG. +bool GetRandom(void *buffer, uptr length); + } // namespace __sanitizer inline void *operator new(__sanitizer::operator_new_size_type size, diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc index d31c49d694aef..a94a63c7f16de 100644 --- a/lib/sanitizer_common/sanitizer_linux.cc +++ b/lib/sanitizer_common/sanitizer_linux.cc @@ -1604,6 +1604,32 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) { return 0; } +bool GetRandom(void *buffer, uptr length) { + if (!buffer || !length || length > 256) + return false; +#if defined(__NR_getrandom) + static atomic_uint8_t skip_getrandom_syscall; + if (!atomic_load_relaxed(&skip_getrandom_syscall)) { + // Up to 256 bytes, getrandom will not be interrupted. + uptr res = internal_syscall(SYSCALL(getrandom), buffer, length, 0); + int rverrno = 0; + if (internal_iserror(res, &rverrno) && rverrno == ENOSYS) + atomic_store_relaxed(&skip_getrandom_syscall, 1); + else if (res == length) + return true; + } +#endif + uptr fd = internal_open("/dev/urandom", O_RDONLY); + if (internal_iserror(fd)) + return false; + // internal_read deals with EINTR. + uptr res = internal_read(fd, buffer, length); + if (internal_iserror(res)) + return false; + internal_close(fd); + return true; +} + } // namespace __sanitizer #endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc index 894013ddd8806..b9a48a1e496b8 100644 --- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc @@ -551,6 +551,13 @@ void LogMessageOnPrintf(const char *str) { WriteToSyslog(str); } +#if SANITIZER_ANDROID && __ANDROID_API__ >= 21 +extern "C" void android_set_abort_message(const char *msg); +void SetAbortMessage(const char *str) { android_set_abort_message(str); } +#else +void SetAbortMessage(const char *str) {} +#endif + #endif // SANITIZER_LINUX } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc index a788a091592f9..b48238106dd9b 100644 --- a/lib/sanitizer_common/sanitizer_mac.cc +++ b/lib/sanitizer_common/sanitizer_mac.cc @@ -923,6 +923,11 @@ void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } +// FIXME: implement on this platform. +bool GetRandom(void *buffer, uptr length) { + UNIMPLEMENTED(); +} + } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc index 4184a84c73f83..87c5b9add5cf7 100644 --- a/lib/sanitizer_common/sanitizer_posix.cc +++ b/lib/sanitizer_common/sanitizer_posix.cc @@ -164,11 +164,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { // We want to map a chunk of address space aligned to 'alignment'. // We do it by maping a bit more and then unmaping redundant pieces. // We probably can do it with fewer syscalls in some OS-dependent way. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; - uptr map_res = (uptr)MmapOrDie(map_size, mem_type); + uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type); + if (!map_res) + return nullptr; uptr map_end = map_res + map_size; uptr res = map_res; if (res & (alignment - 1)) // Not aligned. diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc index 5b1d53698707e..e113fb1093d43 100644 --- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -189,25 +189,7 @@ void UnsetAlternateSignalStack() { static void MaybeInstallSigaction(int signum, SignalHandlerType handler) { - switch (GetHandleSignalMode(signum)) { - case kHandleSignalNo: - return; - case kHandleSignalYes: { - struct sigaction sigact; - internal_memset(&sigact, 0, sizeof(sigact)); - CHECK_EQ(0, internal_sigaction(signum, nullptr, &sigact)); - if (sigact.sa_flags & SA_SIGINFO) { - if (sigact.sa_sigaction) return; - } else { - if (sigact.sa_handler != SIG_DFL && sigact.sa_handler != SIG_IGN && - sigact.sa_handler != SIG_ERR) - return; - } - break; - } - case kHandleSignalExclusive: - break; - } + if (GetHandleSignalMode(signum) == kHandleSignalNo) return; struct sigaction sigact; internal_memset(&sigact, 0, sizeof(sigact)); diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc index d3c77b510d359..60ec7506c312f 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc @@ -495,7 +495,7 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list, VReport(2, "Symbolizer is disabled.\n"); return; } - if (IsReportingOOM()) { + if (IsAllocatorOutOfMemory()) { VReport(2, "Cannot use internal symbolizer: out of memory\n"); } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) { VReport(2, "Using internal symbolizer.\n"); diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc index 506e7374a3298..c6a146553412c 100644 --- a/lib/sanitizer_common/sanitizer_win.cc +++ b/lib/sanitizer_common/sanitizer_win.cc @@ -131,18 +131,24 @@ void UnmapOrDie(void *addr, uptr size) { } } +static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, + const char *mmap_type) { + error_t last_error = GetLastError(); + if (last_error == ERROR_NOT_ENOUGH_MEMORY) + return nullptr; + ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); +} + void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); - if (rv == 0) { - error_t last_error = GetLastError(); - if (last_error != ERROR_NOT_ENOUGH_MEMORY) - ReportMmapFailureAndDie(size, mem_type, "allocate", last_error); - } + if (rv == 0) + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); return rv; } // We want to map a chunk of address space aligned to 'alignment'. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); @@ -152,7 +158,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { uptr mapped_addr = (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (!mapped_addr) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // If we got it right on the first try, return. Otherwise, unmap it and go to // the slow path. @@ -172,8 +178,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { mapped_addr = (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); if (!mapped_addr) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", - GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // Find the aligned address. uptr aligned_addr = RoundUpTo(mapped_addr, alignment); @@ -191,7 +196,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { // Fail if we can't make this work quickly. if (retries == kMaxRetries && mapped_addr == 0) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); return (void *)mapped_addr; } @@ -1002,6 +1007,11 @@ void CheckNoDeepBind(const char *filename, int flag) { // Do nothing. } +// FIXME: implement on this platform. +bool GetRandom(void *buffer, uptr length) { + UNIMPLEMENTED(); +} + } // namespace __sanitizer #endif // _WIN32 diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index b28159a2adafa..f256d8776d80d 100644 --- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -426,8 +426,8 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator<TestMapUnmapCallback> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a; + a.Init(); AllocatorStats stats; stats.Init(); void *x = a.Allocate(&stats, 1 << 20, 1); @@ -463,8 +463,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) { #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + a.Init(); AllocatorStats stats; stats.Init(); @@ -546,8 +546,9 @@ void TestCombinedAllocator() { typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; + SetAllocatorMayReturnNull(true); Allocator *a = new Allocator; - a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever); std::mt19937 r; AllocatorCache cache; @@ -561,7 +562,7 @@ void TestCombinedAllocator() { EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); // Set to false - a->SetMayReturnNull(false); + SetAllocatorMayReturnNull(false); EXPECT_DEATH(a->Allocate(&cache, -1, 1), "allocator is terminating the process"); @@ -873,8 +874,8 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) { } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + a.Init(); AllocatorStats stats; stats.Init(); @@ -900,8 +901,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) { } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + a.Init(); AllocatorStats stats; stats.Init(); diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc index ebc885db75258..93a8794eeb8f7 100644 --- a/lib/sanitizer_common/tests/sanitizer_common_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -72,12 +72,12 @@ TEST(SanitizerCommon, SortTest) { EXPECT_TRUE(IsSorted(array, 2)); } -TEST(SanitizerCommon, MmapAlignedOrDie) { +TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { uptr PageSize = GetPageSizeCached(); for (uptr size = 1; size <= 32; size *= 2) { for (uptr alignment = 1; alignment <= 32; alignment *= 2) { for (int iter = 0; iter < 100; iter++) { - uptr res = (uptr)MmapAlignedOrDie( + uptr res = (uptr)MmapAlignedOrDieOnFatalError( size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest"); EXPECT_EQ(0U, res % (alignment * PageSize)); internal_memset((void*)res, 1, size * PageSize); @@ -300,4 +300,21 @@ TEST(SanitizerCommon, InternalScopedString) { EXPECT_STREQ("012345678", str.data()); } +#if SANITIZER_LINUX +TEST(SanitizerCommon, GetRandom) { + u8 buffer_1[32], buffer_2[32]; + EXPECT_FALSE(GetRandom(nullptr, 32)); + EXPECT_FALSE(GetRandom(buffer_1, 0)); + EXPECT_FALSE(GetRandom(buffer_1, 512)); + EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2)); + for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) { + for (uptr i = 0; i < 100; i++) { + EXPECT_TRUE(GetRandom(buffer_1, size)); + EXPECT_TRUE(GetRandom(buffer_2, size)); + EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0); + } + } +} +#endif + } // namespace __sanitizer |