summaryrefslogtreecommitdiff
path: root/lib/scudo/standalone
diff options
context:
space:
mode:
Diffstat (limited to 'lib/scudo/standalone')
-rw-r--r--lib/scudo/standalone/allocator_config.h4
-rw-r--r--lib/scudo/standalone/checksum.cpp (renamed from lib/scudo/standalone/checksum.cc)2
-rw-r--r--lib/scudo/standalone/chunk.h26
-rw-r--r--lib/scudo/standalone/combined.h131
-rw-r--r--lib/scudo/standalone/common.cpp (renamed from lib/scudo/standalone/common.cc)2
-rw-r--r--lib/scudo/standalone/crc32_hw.cpp (renamed from lib/scudo/standalone/crc32_hw.cc)2
-rw-r--r--lib/scudo/standalone/flags.cpp (renamed from lib/scudo/standalone/flags.cc)2
-rw-r--r--lib/scudo/standalone/flags_parser.cpp (renamed from lib/scudo/standalone/flags_parser.cc)2
-rw-r--r--lib/scudo/standalone/fuchsia.cpp (renamed from lib/scudo/standalone/fuchsia.cc)14
-rw-r--r--lib/scudo/standalone/internal_defs.h4
-rw-r--r--lib/scudo/standalone/linux.cpp (renamed from lib/scudo/standalone/linux.cc)4
-rw-r--r--lib/scudo/standalone/linux.h2
-rw-r--r--lib/scudo/standalone/list.h12
-rw-r--r--lib/scudo/standalone/local_cache.h16
-rw-r--r--lib/scudo/standalone/mutex.h6
-rw-r--r--lib/scudo/standalone/primary32.h60
-rw-r--r--lib/scudo/standalone/primary64.h89
-rw-r--r--lib/scudo/standalone/quarantine.h20
-rw-r--r--lib/scudo/standalone/report.cpp (renamed from lib/scudo/standalone/report.cc)2
-rw-r--r--lib/scudo/standalone/secondary.cpp (renamed from lib/scudo/standalone/secondary.cc)27
-rw-r--r--lib/scudo/standalone/secondary.h5
-rw-r--r--lib/scudo/standalone/size_class_map.h16
-rw-r--r--lib/scudo/standalone/stats.h2
-rw-r--r--lib/scudo/standalone/string_utils.cpp (renamed from lib/scudo/standalone/string_utils.cc)24
-rw-r--r--lib/scudo/standalone/string_utils.h1
-rw-r--r--lib/scudo/standalone/tsd_exclusive.h4
-rw-r--r--lib/scudo/standalone/tsd_shared.h5
-rw-r--r--lib/scudo/standalone/wrappers_c.cpp (renamed from lib/scudo/standalone/wrappers_c.cc)2
-rw-r--r--lib/scudo/standalone/wrappers_c.inc16
-rw-r--r--lib/scudo/standalone/wrappers_c_bionic.cpp (renamed from lib/scudo/standalone/wrappers_c_bionic.cc)2
-rw-r--r--lib/scudo/standalone/wrappers_cpp.cpp (renamed from lib/scudo/standalone/wrappers_cpp.cc)2
31 files changed, 292 insertions, 214 deletions
diff --git a/lib/scudo/standalone/allocator_config.h b/lib/scudo/standalone/allocator_config.h
index 06ec4f3f795ad..62c6f2875106e 100644
--- a/lib/scudo/standalone/allocator_config.h
+++ b/lib/scudo/standalone/allocator_config.h
@@ -53,8 +53,8 @@ struct AndroidSvelteConfig {
// 512MB regions
typedef SizeClassAllocator64<SizeClassMap, 29U> Primary;
#else
- // 256KB regions
- typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+ // 64KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
#endif
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
diff --git a/lib/scudo/standalone/checksum.cc b/lib/scudo/standalone/checksum.cpp
index 0896d5bdccd5a..f713f5a816093 100644
--- a/lib/scudo/standalone/checksum.cc
+++ b/lib/scudo/standalone/checksum.cpp
@@ -1,4 +1,4 @@
-//===-- checksum.cc ---------------------------------------------*- C++ -*-===//
+//===-- checksum.cpp --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/chunk.h b/lib/scudo/standalone/chunk.h
index 76ef661b0dc55..9ae75823ba778 100644
--- a/lib/scudo/standalone/chunk.h
+++ b/lib/scudo/standalone/chunk.h
@@ -22,22 +22,22 @@ extern Checksum HashAlgorithm;
INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
- // as opposed to only for crc32_hw.cc. This means that other hardware specific
- // instructions were likely emitted at other places, and as a result there is
- // no reason to not use it here.
+ // as opposed to only for crc32_hw.cpp. This means that other hardware
+ // specific instructions were likely emitted at other places, and as a result
+ // there is no reason to not use it here.
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
for (uptr I = 0; I < ArraySize; I++)
Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
- return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+ return static_cast<u16>(Crc ^ (Crc >> 16));
#else
if (HashAlgorithm == Checksum::HardwareCRC32) {
u32 Crc = computeHardwareCRC32(Seed, Value);
for (uptr I = 0; I < ArraySize; I++)
Crc = computeHardwareCRC32(Crc, Array[I]);
- return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+ return static_cast<u16>(Crc ^ (Crc >> 16));
} else {
- u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed & 0xffff), Value);
+ u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
for (uptr I = 0; I < ArraySize; I++)
Checksum = computeBSDChecksum(Checksum, Array[I]);
return Checksum;
@@ -63,24 +63,24 @@ enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
typedef u64 PackedHeader;
// Update the 'Mask' constants to reflect changes in this structure.
struct UnpackedHeader {
- u64 Checksum : 16;
- u64 ClassId : 8;
- u64 SizeOrUnusedBytes : 20;
+ uptr ClassId : 8;
u8 State : 2;
u8 Origin : 2;
- u64 Offset : 16;
+ uptr SizeOrUnusedBytes : 20;
+ uptr Offset : 16;
+ uptr Checksum : 16;
};
typedef atomic_u64 AtomicPackedHeader;
COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
// Those constants are required to silence some -Werror=conversion errors when
// assigning values to the related bitfield variables.
-constexpr uptr ChecksumMask = (1UL << 16) - 1;
constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr u8 StateMask = (1U << 2) - 1;
+constexpr u8 OriginMask = (1U << 2) - 1;
constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
-constexpr uptr StateMask = (1UL << 2) - 1;
-constexpr uptr OriginMask = (1UL << 2) - 1;
constexpr uptr OffsetMask = (1UL << 16) - 1;
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
constexpr uptr getHeaderSize() {
return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
diff --git a/lib/scudo/standalone/combined.h b/lib/scudo/standalone/combined.h
index 4c1c1196bf8f7..60be1dd20d398 100644
--- a/lib/scudo/standalone/combined.h
+++ b/lib/scudo/standalone/combined.h
@@ -46,8 +46,8 @@ public:
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
- const uptr ClassId = Header.ClassId;
- if (ClassId)
+ const uptr ClassId = NewHeader.ClassId;
+ if (LIKELY(ClassId))
Cache.deallocate(ClassId, BlockBegin);
else
Allocator.Secondary.deallocate(BlockBegin);
@@ -123,14 +123,16 @@ public:
Options.ZeroContents = getFlags()->zero_contents;
Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
- Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
+ Options.QuarantineMaxChunkSize =
+ static_cast<u32>(getFlags()->quarantine_max_chunk_size);
Stats.initLinkerInitialized();
Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
Secondary.initLinkerInitialized(&Stats);
- Quarantine.init(getFlags()->quarantine_size_kb << 10,
- getFlags()->thread_local_quarantine_size_kb << 10);
+ Quarantine.init(
+ static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+ static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
}
void reset() { memset(this, 0, sizeof(*this)); }
@@ -165,16 +167,17 @@ public:
return nullptr;
reportAlignmentTooBig(Alignment, MaxAlignment);
}
- if (UNLIKELY(Alignment < MinAlignment))
+ if (Alignment < MinAlignment)
Alignment = MinAlignment;
// If the requested size happens to be 0 (more common than you might think),
- // allocate 1 byte on top of the header. Then add the extra bytes required
- // to fulfill the alignment requirements: we allocate enough to be sure that
- // there will be an address in the block that will satisfy the alignment.
+ // allocate MinAlignment bytes on top of the header. Then add the extra
+ // bytes required to fulfill the alignment requirements: we allocate enough
+ // to be sure that there will be an address in the block that will satisfy
+ // the alignment.
const uptr NeededSize =
- Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
- ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
+ roundUpTo(Size, MinAlignment) +
+ ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
if (UNLIKELY(Size >= MaxAllowedMallocSize ||
@@ -186,9 +189,10 @@ public:
void *Block;
uptr ClassId;
- uptr BlockEnd = 0;
- if (PrimaryT::canAllocate(NeededSize)) {
+ uptr BlockEnd;
+ if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+ DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
Block = TSD->Cache.allocate(ClassId);
@@ -205,17 +209,17 @@ public:
reportOutOfMemory(NeededSize);
}
- // We only need to zero the contents for Primary backed allocations.
- if ((ZeroContents || Options.ZeroContents) && ClassId)
+ // We only need to zero the contents for Primary backed allocations. This
+ // condition is not necessarily unlikely, but since memset is costly, we
+ // might as well mark it as such.
+ if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId))
memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
Chunk::UnpackedHeader Header = {};
uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
- // The following condition isn't necessarily "UNLIKELY".
- if (!isAligned(UserPtr, Alignment)) {
+ if (UNLIKELY(!isAligned(UserPtr, Alignment))) {
const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
const uptr Offset = AlignedUserPtr - UserPtr;
- Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
DCHECK_GT(Offset, 2 * sizeof(u32));
// The BlockMarker has no security purpose, but is specifically meant for
// the chunk iteration function that can be used in debugging situations.
@@ -224,16 +228,13 @@ public:
reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
UserPtr = AlignedUserPtr;
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
}
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
Header.State = Chunk::State::Allocated;
Header.Origin = Origin & Chunk::OriginMask;
- if (ClassId) {
- Header.ClassId = ClassId & Chunk::ClassIdMask;
- Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
- } else {
- Header.SizeOrUnusedBytes =
- (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
- }
+ Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) &
+ Chunk::SizeOrUnusedBytesMask;
void *Ptr = reinterpret_cast<void *>(UserPtr);
Chunk::storeHeader(Cookie, Ptr, &Header);
@@ -310,18 +311,30 @@ public:
OldHeader.Origin, Chunk::Origin::Malloc);
}
- const uptr OldSize = getSize(OldPtr, &OldHeader);
- // If the new size is identical to the old one, or lower but within an
- // acceptable range, we just keep the old chunk, and update its header.
- if (NewSize == OldSize)
- return OldPtr;
- if (NewSize < OldSize) {
- const uptr Delta = OldSize - NewSize;
- if (Delta < (SizeClassMap::MaxSize / 2)) {
+ void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
+ uptr BlockEnd;
+ uptr OldSize;
+ const uptr ClassId = OldHeader.ClassId;
+ if (LIKELY(ClassId)) {
+ BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
+ SizeClassMap::getSizeByClassId(ClassId);
+ OldSize = OldHeader.SizeOrUnusedBytes;
+ } else {
+ BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
+ OldSize = BlockEnd -
+ (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
+ }
+ // If the new chunk still fits in the previously allocated block (with a
+ // reasonable delta), we just keep the old block, and update the chunk
+ // header to reflect the size change.
+ if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
+ const uptr Delta =
+ OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize;
+ if (Delta <= SizeClassMap::MaxSize / 2) {
Chunk::UnpackedHeader NewHeader = OldHeader;
NewHeader.SizeOrUnusedBytes =
- (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta
- : NewHeader.SizeOrUnusedBytes + Delta) &
+ (ClassId ? NewSize
+ : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
return OldPtr;
@@ -334,6 +347,7 @@ public:
// are currently unclear.
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
if (NewPtr) {
+ const uptr OldSize = getSize(OldPtr, &OldHeader);
memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
}
@@ -355,12 +369,31 @@ public:
Primary.enable();
}
+ // The function returns the amount of bytes required to store the statistics,
+ // which might be larger than the amount of bytes provided. Note that the
+ // statistics buffer is not necessarily constant between calls to this
+ // function. This can be called with a null buffer or zero size for buffer
+ // sizing purposes.
+ uptr getStats(char *Buffer, uptr Size) {
+ ScopedString Str(1024);
+ disable();
+ const uptr Length = getStats(&Str) + 1;
+ enable();
+ if (Length < Size)
+ Size = Length;
+ if (Buffer && Size) {
+ memcpy(Buffer, Str.data(), Size);
+ Buffer[Size - 1] = '\0';
+ }
+ return Length;
+ }
+
void printStats() {
+ ScopedString Str(1024);
disable();
- Primary.printStats();
- Secondary.printStats();
- Quarantine.printStats();
+ getStats(&Str);
enable();
+ Str.output();
}
void releaseToOS() { Primary.releaseToOS(); }
@@ -374,7 +407,7 @@ public:
const uptr From = Base;
const uptr To = Base + Size;
auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
- if (Block < From || Block > To)
+ if (Block < From || Block >= To)
return;
uptr ChunkSize;
const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
@@ -471,8 +504,7 @@ private:
// last and last class sizes, as well as the dynamic base for the Primary.
// The following is an over-approximation that works for our needs.
const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
- Header.SizeOrUnusedBytes =
- MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
reportSanityCheckError("size (or unused bytes)");
@@ -484,15 +516,15 @@ private:
static INLINE void *getBlockBegin(const void *Ptr,
Chunk::UnpackedHeader *Header) {
- return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
- Chunk::getHeaderSize() -
- (Header->Offset << MinAlignmentLog));
+ return reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
+ (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
}
// Return the size of a chunk as requested during its allocation.
INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
- if (Header->ClassId)
+ if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
@@ -514,7 +546,7 @@ private:
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
const uptr ClassId = NewHeader.ClassId;
- if (ClassId) {
+ if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
TSD->Cache.deallocate(ClassId, BlockBegin);
@@ -550,6 +582,13 @@ private:
*Size = getSize(Ptr, &Header);
return P;
}
+
+ uptr getStats(ScopedString *Str) {
+ Primary.getStats(Str);
+ Secondary.getStats(Str);
+ Quarantine.getStats(Str);
+ return Str->length();
+ }
};
} // namespace scudo
diff --git a/lib/scudo/standalone/common.cc b/lib/scudo/standalone/common.cpp
index 2a26efbb9c890..d93bfc59b3ca3 100644
--- a/lib/scudo/standalone/common.cc
+++ b/lib/scudo/standalone/common.cpp
@@ -1,4 +1,4 @@
-//===-- common.cc -----------------------------------------------*- C++ -*-===//
+//===-- common.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/crc32_hw.cc b/lib/scudo/standalone/crc32_hw.cpp
index f4dae7b5fea83..62841ba510199 100644
--- a/lib/scudo/standalone/crc32_hw.cc
+++ b/lib/scudo/standalone/crc32_hw.cpp
@@ -1,4 +1,4 @@
-//===-- crc32_hw.h ----------------------------------------------*- C++ -*-===//
+//===-- crc32_hw.cpp --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/flags.cc b/lib/scudo/standalone/flags.cpp
index 21144f2111026..1e970ae495056 100644
--- a/lib/scudo/standalone/flags.cc
+++ b/lib/scudo/standalone/flags.cpp
@@ -1,4 +1,4 @@
-//===-- flags.cc ------------------------------------------------*- C++ -*-===//
+//===-- flags.cpp -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/flags_parser.cc b/lib/scudo/standalone/flags_parser.cpp
index 5f1253f58d52f..070c08b019384 100644
--- a/lib/scudo/standalone/flags_parser.cc
+++ b/lib/scudo/standalone/flags_parser.cpp
@@ -1,4 +1,4 @@
-//===-- flags_parser.cc -----------------------------------------*- C++ -*-===//
+//===-- flags_parser.cpp ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/fuchsia.cc b/lib/scudo/standalone/fuchsia.cpp
index 896d346e7e721..0a9483ae1dd0d 100644
--- a/lib/scudo/standalone/fuchsia.cc
+++ b/lib/scudo/standalone/fuchsia.cpp
@@ -1,4 +1,4 @@
-//===-- fuchsia.cc ----------------------------------------------*- C++ -*-===//
+//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -40,7 +40,7 @@ static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
_zx_vmar_root_self(),
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
Size, &Data->Vmar, &Data->VmarBase);
- if (Status != ZX_OK) {
+ if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
return nullptr;
@@ -78,7 +78,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
} else {
// Otherwise, create a Vmo and set its name.
Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
- if (Status != ZX_OK) {
+ if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
return nullptr;
@@ -102,7 +102,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
} else {
CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
}
- if (Status != ZX_OK) {
+ if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
return nullptr;
@@ -125,7 +125,7 @@ void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
const zx_status_t Status =
_zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
- if (Status != ZX_OK)
+ if (UNLIKELY(Status != ZX_OK))
dieOnMapUnmapError();
}
if (Data) {
@@ -170,9 +170,9 @@ u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
-bool getRandom(void *Buffer, uptr Length, bool Blocking) {
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
- if (!Buffer || !Length || Length > MaxRandomLength)
+ if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
return false;
_zx_cprng_draw(Buffer, Length);
return true;
diff --git a/lib/scudo/standalone/internal_defs.h b/lib/scudo/standalone/internal_defs.h
index 901eac372b360..64ed238ebfec4 100644
--- a/lib/scudo/standalone/internal_defs.h
+++ b/lib/scudo/standalone/internal_defs.h
@@ -55,11 +55,11 @@
namespace scudo {
typedef unsigned long uptr;
-typedef signed long sptr;
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long long u64;
+typedef signed long sptr;
typedef signed char s8;
typedef signed short s16;
typedef signed int s32;
@@ -128,8 +128,6 @@ void NORETURN reportCheckFailed(const char *File, int Line,
#define COMPILER_CHECK(Pred) static_assert(Pred, "")
-enum LinkerInitialized { LINKER_INITIALIZED = 0 };
-
} // namespace scudo
#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/lib/scudo/standalone/linux.cc b/lib/scudo/standalone/linux.cpp
index 049477bba8b0d..8266a528f42c5 100644
--- a/lib/scudo/standalone/linux.cc
+++ b/lib/scudo/standalone/linux.cpp
@@ -1,4 +1,4 @@
-//===-- linux.cc ------------------------------------------------*- C++ -*-===//
+//===-- linux.cpp -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -43,7 +43,7 @@ void NORETURN die() { abort(); }
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
- int MmapFlags = MAP_PRIVATE | MAP_ANON;
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
int MmapProt;
if (Flags & MAP_NOACCESS) {
MmapFlags |= MAP_NORESERVE;
diff --git a/lib/scudo/standalone/linux.h b/lib/scudo/standalone/linux.h
index 92c9eb5e97ee6..c8e41484c8515 100644
--- a/lib/scudo/standalone/linux.h
+++ b/lib/scudo/standalone/linux.h
@@ -55,7 +55,7 @@ struct MapPlatformData {};
// The Android Bionic team has allocated a TLS slot for sanitizers starting
// with Q, given that Android currently doesn't support ELF TLS. It is used to
// store sanitizer thread specific data.
-static const int TLS_SLOT_SANITIZER = 8; // TODO(kostyak): 6 for Q!!
+static const int TLS_SLOT_SANITIZER = 6;
ALWAYS_INLINE uptr *getAndroidTlsPtr() {
return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
diff --git a/lib/scudo/standalone/list.h b/lib/scudo/standalone/list.h
index 139e73eff5ad2..6a7b9bd747a71 100644
--- a/lib/scudo/standalone/list.h
+++ b/lib/scudo/standalone/list.h
@@ -106,17 +106,17 @@ template <class Item> struct IntrusiveList {
void checkConsistency() {
if (Size == 0) {
- CHECK_EQ(First, 0);
- CHECK_EQ(Last, 0);
+ CHECK_EQ(First, nullptr);
+ CHECK_EQ(Last, nullptr);
} else {
- uptr count = 0;
+ uptr Count = 0;
for (Item *I = First;; I = I->Next) {
- count++;
+ Count++;
if (I == Last)
break;
}
- CHECK_EQ(size(), count);
- CHECK_EQ(Last->Next, 0);
+ CHECK_EQ(size(), Count);
+ CHECK_EQ(Last->Next, nullptr);
}
}
diff --git a/lib/scudo/standalone/local_cache.h b/lib/scudo/standalone/local_cache.h
index 2acc288740158..b08abd3e5d9b0 100644
--- a/lib/scudo/standalone/local_cache.h
+++ b/lib/scudo/standalone/local_cache.h
@@ -22,9 +22,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
void setFromArray(void **Array, u32 N) {
DCHECK_LE(N, MaxNumCached);
- for (u32 I = 0; I < N; I++)
- Batch[I] = Array[I];
Count = N;
+ memcpy(Batch, Array, sizeof(void *) * Count);
}
void clear() { Count = 0; }
void add(void *P) {
@@ -32,8 +31,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Batch[Count++] = P;
}
void copyToArray(void **Array) const {
- for (u32 I = 0; I < Count; I++)
- Array[I] = Batch[I];
+ memcpy(Array, Batch, sizeof(void *) * Count);
}
u32 getCount() const { return Count; }
void *get(u32 I) const {
@@ -52,7 +50,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
Stats.initLinkerInitialized();
- if (S)
+ if (LIKELY(S))
S->link(&Stats);
Allocator = A;
}
@@ -64,12 +62,12 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
void destroy(GlobalStats *S) {
drain();
- if (S)
+ if (LIKELY(S))
S->unlink(&Stats);
}
void *allocate(uptr ClassId) {
- CHECK_LT(ClassId, NumClasses);
+ DCHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
if (C->Count == 0) {
if (UNLIKELY(!refill(C, ClassId)))
@@ -85,6 +83,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
// performance. It definitely decreases performance on Android though.
// if (!SCUDO_ANDROID) PREFETCH(P);
Stats.add(StatAllocated, ClassSize);
+ Stats.sub(StatFree, ClassSize);
return P;
}
@@ -100,6 +99,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
const uptr ClassSize = C->ClassSize;
C->Chunks[C->Count++] = P;
Stats.sub(StatAllocated, ClassSize);
+ Stats.add(StatFree, ClassSize);
}
void drain() {
@@ -157,8 +157,8 @@ private:
if (UNLIKELY(!B))
return false;
DCHECK_GT(B->getCount(), 0);
- B->copyToArray(C->Chunks);
C->Count = B->getCount();
+ B->copyToArray(C->Chunks);
destroyBatch(ClassId, B);
return true;
}
diff --git a/lib/scudo/standalone/mutex.h b/lib/scudo/standalone/mutex.h
index b6dc9188d3471..b26b2df06627d 100644
--- a/lib/scudo/standalone/mutex.h
+++ b/lib/scudo/standalone/mutex.h
@@ -25,7 +25,7 @@ public:
void init() { memset(this, 0, sizeof(*this)); }
bool tryLock();
NOINLINE void lock() {
- if (tryLock())
+ if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
// NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
@@ -44,8 +44,8 @@ public:
void unlock();
private:
- static constexpr u8 NumberOfTries = 10U;
- static constexpr u8 NumberOfYields = 10U;
+ static constexpr u8 NumberOfTries = 8U;
+ static constexpr u8 NumberOfYields = 8U;
#if SCUDO_LINUX
atomic_u32 M;
diff --git a/lib/scudo/standalone/primary32.h b/lib/scudo/standalone/primary32.h
index 2b2fa8b3d7939..9123d07b49b95 100644
--- a/lib/scudo/standalone/primary32.h
+++ b/lib/scudo/standalone/primary32.h
@@ -72,7 +72,7 @@ public:
SizeClassInfo *Sci = getSizeClassInfo(I);
Sci->RandState = getRandomU32(&Seed);
// See comment in the 64-bit primary about releasing smaller size classes.
- Sci->CanRelease = (ReleaseToOsInterval > 0) &&
+ Sci->CanRelease = (ReleaseToOsInterval >= 0) &&
(I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
}
@@ -99,9 +99,9 @@ public:
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
TransferBatch *B = Sci->FreeList.front();
- if (B)
+ if (B) {
Sci->FreeList.pop_front();
- else {
+ } else {
B = populateFreeList(C, ClassId, Sci);
if (UNLIKELY(!B))
return nullptr;
@@ -129,7 +129,7 @@ public:
void enable() {
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
- getSizeClassInfo(I)->Mutex.unlock();
+ getSizeClassInfo(static_cast<uptr>(I))->Mutex.unlock();
}
template <typename F> void iterateOverBlocks(F Callback) {
@@ -143,7 +143,7 @@ public:
}
}
- void printStats() {
+ void getStats(ScopedString *Str) {
// TODO(kostyak): get the RSS per region.
uptr TotalMapped = 0;
uptr PoppedBlocks = 0;
@@ -154,21 +154,23 @@ public:
PoppedBlocks += Sci->Stats.PoppedBlocks;
PushedBlocks += Sci->Stats.PushedBlocks;
}
- Printf("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
- "remains %zu\n",
- TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
for (uptr I = 0; I < NumClasses; I++)
- printStats(I, 0);
+ getStats(Str, I, 0);
}
- void releaseToOS() {
+ uptr releaseToOS() {
+ uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
- releaseToOSMaybe(Sci, I, /*Force=*/true);
+ TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
}
+ return TotalReleasedBytes;
}
private:
@@ -318,53 +320,59 @@ private:
}
DCHECK(B);
DCHECK_GT(B->getCount(), 0);
+
+ C->getStats().add(StatFree, AllocatedUser);
Sci->AllocatedUser += AllocatedUser;
if (Sci->CanRelease)
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
return B;
}
- void printStats(uptr ClassId, uptr Rss) {
+ void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
if (Sci->AllocatedUser == 0)
return;
const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
- Printf(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: %6zu"
- " avail: %6zu rss: %6zuK\n",
- ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
- Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
- AvailableChunks, Rss >> 10);
+ Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu avail: %6zu rss: %6zuK\n",
+ ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+ Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
+ AvailableChunks, Rss >> 10);
}
- NOINLINE void releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
- const uptr N = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
- if (N * BlockSize < PageSize)
- return; // No chance to release anything.
+ const uptr BytesInFreeList =
+ Sci->AllocatedUser -
+ (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
+ if (BytesInFreeList < PageSize)
+ return 0; // No chance to release anything.
if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
BlockSize <
PageSize) {
- return; // Nothing new to release.
+ return 0; // Nothing new to release.
}
if (!Force) {
const s32 IntervalMs = ReleaseToOsIntervalMs;
if (IntervalMs < 0)
- return;
- if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ return 0;
+ if (Sci->ReleaseInfo.LastReleaseAtNs +
+ static_cast<uptr>(IntervalMs) * 1000000ULL >
getMonotonicTime()) {
- return; // Memory was returned recently.
+ return 0; // Memory was returned recently.
}
}
// TODO(kostyak): currently not ideal as we loop over all regions and
// iterate multiple times over the same freelist if a ClassId spans multiple
// regions. But it will have to do for now.
+ uptr TotalReleasedBytes = 0;
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
if (PossibleRegions[I] == ClassId) {
ReleaseRecorder Recorder(I * RegionSize);
@@ -374,10 +382,12 @@ private:
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
}
}
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return TotalReleasedBytes;
}
SizeClassInfo SizeClassInfoArray[NumClasses];
diff --git a/lib/scudo/standalone/primary64.h b/lib/scudo/standalone/primary64.h
index 035182b33ef4c..8f443ea7fa3f3 100644
--- a/lib/scudo/standalone/primary64.h
+++ b/lib/scudo/standalone/primary64.h
@@ -36,7 +36,7 @@ namespace scudo {
// freelist to the thread specific freelist, and back.
//
// The memory used by this allocator is never unmapped, but can be partially
-// released it the platform allows for it.
+// released if the platform allows for it.
template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
public:
@@ -79,7 +79,7 @@ public:
// memory accesses which ends up being fairly costly. The current lower
// limit is mostly arbitrary and based on empirical observations.
// TODO(kostyak): make the lower limit a runtime option
- Region->CanRelease = (ReleaseToOsInterval > 0) &&
+ Region->CanRelease = (ReleaseToOsInterval >= 0) &&
(I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
Region->RandState = getRandomU32(&Seed);
@@ -102,9 +102,9 @@ public:
RegionInfo *Region = getRegionInfo(ClassId);
ScopedLock L(Region->Mutex);
TransferBatch *B = Region->FreeList.front();
- if (B)
+ if (B) {
Region->FreeList.pop_front();
- else {
+ } else {
B = populateFreeList(C, ClassId, Region);
if (UNLIKELY(!B))
return nullptr;
@@ -131,11 +131,13 @@ public:
void enable() {
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
- getRegionInfo(I)->Mutex.unlock();
+ getRegionInfo(static_cast<uptr>(I))->Mutex.unlock();
}
template <typename F> void iterateOverBlocks(F Callback) const {
- for (uptr I = 1; I < NumClasses; I++) {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
const RegionInfo *Region = getRegionInfo(I);
const uptr BlockSize = getSizeByClassId(I);
const uptr From = Region->RegionBeg;
@@ -145,7 +147,7 @@ public:
}
}
- void printStats() const {
+ void getStats(ScopedString *Str) const {
// TODO(kostyak): get the RSS per region.
uptr TotalMapped = 0;
uptr PoppedBlocks = 0;
@@ -157,22 +159,25 @@ public:
PoppedBlocks += Region->Stats.PoppedBlocks;
PushedBlocks += Region->Stats.PushedBlocks;
}
- Printf("Stats: Primary64: %zuM mapped (%zuM rss) in %zu allocations; "
- "remains %zu\n",
- TotalMapped >> 20, 0, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
+ "allocations; remains %zu\n",
+ TotalMapped >> 20, 0, PoppedBlocks,
+ PoppedBlocks - PushedBlocks);
for (uptr I = 0; I < NumClasses; I++)
- printStats(I, 0);
+ getStats(Str, I, 0);
}
- void releaseToOS() {
+ uptr releaseToOS() {
+ uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
RegionInfo *Region = getRegionInfo(I);
ScopedLock L(Region->Mutex);
- releaseToOSMaybe(Region, I, /*Force=*/true);
+ TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
}
+ return TotalReleasedBytes;
}
private:
@@ -181,7 +186,7 @@ private:
static const uptr PrimarySize = RegionSize * NumClasses;
// Call map for user memory with at least this size.
- static const uptr MapSizeIncrement = 1UL << 16;
+ static const uptr MapSizeIncrement = 1UL << 17;
struct RegionStats {
uptr PoppedBlocks;
@@ -257,7 +262,7 @@ private:
const uptr MappedUser = Region->MappedUser;
const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
// Map more space for blocks, if necessary.
- if (LIKELY(TotalUserBytes > MappedUser)) {
+ if (TotalUserBytes > MappedUser) {
// Do the mmap for the user memory.
const uptr UserMapSize =
roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
@@ -265,14 +270,16 @@ private:
if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
if (!Region->Exhausted) {
Region->Exhausted = true;
- printStats();
- Printf(
+ ScopedString Str(1024);
+ getStats(&Str);
+ Str.append(
"Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
RegionSize >> 20, Size);
+ Str.output();
}
return nullptr;
}
- if (MappedUser == 0)
+ if (UNLIKELY(MappedUser == 0))
Region->Data = Data;
if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
UserMapSize, "scudo:primary",
@@ -307,8 +314,9 @@ private:
return nullptr;
}
DCHECK(B);
- CHECK_GT(B->getCount(), 0);
+ DCHECK_GT(B->getCount(), 0);
+ C->getStats().add(StatFree, AllocatedUser);
Region->AllocatedUser += AllocatedUser;
Region->Exhausted = false;
if (Region->CanRelease)
@@ -317,47 +325,49 @@ private:
return B;
}
- void printStats(uptr ClassId, uptr Rss) const {
+ void getStats(ScopedString *Str, uptr ClassId, uptr Rss) const {
RegionInfo *Region = getRegionInfo(ClassId);
if (Region->MappedUser == 0)
return;
const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
- const uptr AvailableChunks =
- Region->AllocatedUser / getSizeByClassId(ClassId);
- Printf("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: "
- "%6zu avail: %6zu rss: %6zuK releases: %6zu last released: %6zuK "
- "region: 0x%zx (0x%zx)\n",
- Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
- Region->MappedUser >> 10, Region->Stats.PoppedBlocks,
- Region->Stats.PushedBlocks, InUse, AvailableChunks, Rss >> 10,
- Region->ReleaseInfo.RangesReleased,
- Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
- getRegionBaseByClassId(ClassId));
+ const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
+ Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
+ "released: %6zuK region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "F" : " ", ClassId,
+ getSizeByClassId(ClassId), Region->MappedUser >> 10,
+ Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
+ TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
}
- NOINLINE void releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
- const uptr N = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
- if (N * BlockSize < PageSize)
- return; // No chance to release anything.
+ const uptr BytesInFreeList =
+ Region->AllocatedUser -
+ (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
+ if (BytesInFreeList < PageSize)
+ return 0; // No chance to release anything.
if ((Region->Stats.PushedBlocks -
Region->ReleaseInfo.PushedBlocksAtLastRelease) *
BlockSize <
PageSize) {
- return; // Nothing new to release.
+ return 0; // Nothing new to release.
}
if (!Force) {
const s32 IntervalMs = ReleaseToOsIntervalMs;
if (IntervalMs < 0)
- return;
- if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ return 0;
+ if (Region->ReleaseInfo.LastReleaseAtNs +
+ static_cast<uptr>(IntervalMs) * 1000000ULL >
getMonotonicTime()) {
- return; // Memory was returned recently.
+ return 0; // Memory was returned recently.
}
}
@@ -373,6 +383,7 @@ private:
Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
}
Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return Recorder.getReleasedBytes();
}
};
diff --git a/lib/scudo/standalone/quarantine.h b/lib/scudo/standalone/quarantine.h
index bac36e01c1ddc..35fd0bc197ea6 100644
--- a/lib/scudo/standalone/quarantine.h
+++ b/lib/scudo/standalone/quarantine.h
@@ -130,7 +130,7 @@ public:
subFromSize(ExtractedSize);
}
- void printStats() const {
+ void getStats(ScopedString *Str) const {
uptr BatchCount = 0;
uptr TotalOverheadBytes = 0;
uptr TotalBytes = 0;
@@ -152,11 +152,11 @@ public:
(TotalQuarantinedBytes == 0)
? 0
: TotalOverheadBytes * 100 / TotalQuarantinedBytes;
- Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
- "chunks: %zd (capacity: %zd); %zd%% chunks used; %zd%% memory "
- "overhead\n",
- BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
- QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+ Str->append(
+ "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
+ "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
+ BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+ QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
}
private:
@@ -218,11 +218,11 @@ public:
recycle(0, Cb);
}
- void printStats() const {
+ void getStats(ScopedString *Str) const {
// It assumes that the world is stopped, just as the allocator's printStats.
- Printf("Quarantine limits: global: %zdM; thread local: %zdK\n",
- getMaxSize() >> 20, getCacheSize() >> 10);
- Cache.printStats();
+ Cache.getStats(Str);
+ Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
+ getMaxSize() >> 10, getCacheSize() >> 10);
}
private:
diff --git a/lib/scudo/standalone/report.cc b/lib/scudo/standalone/report.cpp
index 47cd951e8ed4f..12d851ff019ad 100644
--- a/lib/scudo/standalone/report.cc
+++ b/lib/scudo/standalone/report.cpp
@@ -1,4 +1,4 @@
-//===-- report.cc -----------------------------------------------*- C++ -*-===//
+//===-- report.cpp ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/secondary.cc b/lib/scudo/standalone/secondary.cpp
index 75f9171f1617b..db7361d7134aa 100644
--- a/lib/scudo/standalone/secondary.cc
+++ b/lib/scudo/standalone/secondary.cpp
@@ -1,4 +1,4 @@
-//===-- secondary.cc --------------------------------------------*- C++ -*-===//
+//===-- secondary.cpp -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -32,14 +32,14 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
uptr MapBase =
reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
- if (!MapBase)
+ if (UNLIKELY(!MapBase))
return nullptr;
uptr CommitBase = MapBase + PageSize;
uptr MapEnd = MapBase + MapSize;
// In the unlikely event of alignments larger than a page, adjust the amount
// of memory we want to commit, and trim the extra memory.
- if (AlignmentHint >= PageSize) {
+ if (UNLIKELY(AlignmentHint >= PageSize)) {
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
@@ -73,13 +73,11 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
H->Data = Data;
{
ScopedLock L(Mutex);
- if (!Tail) {
- Tail = H;
- } else {
+ if (LIKELY(Tail)) {
Tail->Next = H;
H->Prev = Tail;
- Tail = H;
}
+ Tail = H;
AllocatedBytes += CommitSize;
if (LargestSize < CommitSize)
LargestSize = CommitSize;
@@ -106,7 +104,7 @@ void MapAllocator::deallocate(void *Ptr) {
CHECK_EQ(Next->Prev, H);
Next->Prev = Prev;
}
- if (Tail == H) {
+ if (UNLIKELY(Tail == H)) {
CHECK(!Next);
Tail = Prev;
} else {
@@ -125,12 +123,13 @@ void MapAllocator::deallocate(void *Ptr) {
unmap(Addr, Size, UNMAP_ALL, &Data);
}
-void MapAllocator::printStats() const {
- Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
- "(%zdK), remains %zd (%zdK) max %zdM\n",
- NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
- NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
- LargestSize >> 20);
+void MapAllocator::getStats(ScopedString *Str) const {
+ Str->append(
+ "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
+ "(%zuK), remains %zu (%zuK) max %zuM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
}
} // namespace scudo
diff --git a/lib/scudo/standalone/secondary.h b/lib/scudo/standalone/secondary.h
index 9124e2a41c6ad..9d074a57c7722 100644
--- a/lib/scudo/standalone/secondary.h
+++ b/lib/scudo/standalone/secondary.h
@@ -12,6 +12,7 @@
#include "common.h"
#include "mutex.h"
#include "stats.h"
+#include "string_utils.h"
namespace scudo {
@@ -50,7 +51,7 @@ class MapAllocator {
public:
void initLinkerInitialized(GlobalStats *S) {
Stats.initLinkerInitialized();
- if (S)
+ if (LIKELY(S))
S->link(&Stats);
}
void init(GlobalStats *S) {
@@ -70,7 +71,7 @@ public:
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
}
- void printStats() const;
+ void getStats(ScopedString *Str) const;
void disable() { Mutex.lock(); }
diff --git a/lib/scudo/standalone/size_class_map.h b/lib/scudo/standalone/size_class_map.h
index b7df54cf80982..dfef0865b9d93 100644
--- a/lib/scudo/standalone/size_class_map.h
+++ b/lib/scudo/standalone/size_class_map.h
@@ -86,6 +86,7 @@ public:
}
static void print() {
+ ScopedString Buffer(1024);
uptr PrevS = 0;
uptr TotalCached = 0;
for (uptr I = 0; I < NumClasses; I++) {
@@ -93,19 +94,20 @@ public:
continue;
const uptr S = getSizeByClassId(I);
if (S >= MidSize / 2 && (S & (S - 1)) == 0)
- Printf("\n");
+ Buffer.append("\n");
const uptr D = S - PrevS;
const uptr P = PrevS ? (D * 100 / PrevS) : 0;
const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
const uptr Cached = getMaxCachedHint(S) * S;
- Printf(
+ Buffer.append(
"C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
getClassIdBySize(S));
TotalCached += Cached;
PrevS = S;
}
- Printf("Total Cached: %zu\n", TotalCached);
+ Buffer.append("Total Cached: %zu\n", TotalCached);
+ Buffer.output();
}
static void validate() {
@@ -137,11 +139,11 @@ typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
// TODO(kostyak): further tune class maps for Android & Fuchsia.
#if SCUDO_WORDSIZE == 64U
-typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap;
-typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap;
+typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
#else
-typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap;
-typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap;
+typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap;
#endif
} // namespace scudo
diff --git a/lib/scudo/standalone/stats.h b/lib/scudo/standalone/stats.h
index 12436756226b2..16ef5b89b854c 100644
--- a/lib/scudo/standalone/stats.h
+++ b/lib/scudo/standalone/stats.h
@@ -17,7 +17,7 @@
namespace scudo {
// Memory allocator statistics
-enum StatType { StatAllocated, StatMapped, StatCount };
+enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
typedef uptr StatCounters[StatCount];
diff --git a/lib/scudo/standalone/string_utils.cc b/lib/scudo/standalone/string_utils.cpp
index f0068afc1e8b2..5de8b57bfcd12 100644
--- a/lib/scudo/standalone/string_utils.cc
+++ b/lib/scudo/standalone/string_utils.cpp
@@ -1,4 +1,4 @@
-//===-- string_utils.cc -----------------------------------------*- C++ -*-===//
+//===-- string_utils.cpp ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -9,7 +9,6 @@
#include "string_utils.h"
#include "common.h"
-#include <ctype.h>
#include <stdarg.h>
#include <string.h>
@@ -44,7 +43,7 @@ static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
do {
RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
"appendNumber buffer overflow");
- NumBuffer[Pos++] = AbsoluteValue % Base;
+ NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
AbsoluteValue /= Base;
} while (AbsoluteValue > 0);
if (Pos < MinNumberLength) {
@@ -117,7 +116,7 @@ static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
int formatString(char *Buffer, uptr BufferLength, const char *Format,
va_list Args) {
- UNUSED static const char *PrintfFormatsHelp =
+ static const char *PrintfFormatsHelp =
"Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
RAW_CHECK(Format);
@@ -209,9 +208,18 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format,
}
void ScopedString::append(const char *Format, va_list Args) {
- CHECK_LT(Length, String.size());
- formatString(String.data() + Length, String.size() - Length, Format, Args);
- Length += strlen(String.data() + Length);
+ DCHECK_LT(Length, String.size());
+ va_list ArgsCopy;
+ va_copy(ArgsCopy, Args);
+ // formatString doesn't currently support a null buffer or zero buffer length,
+ // so in order to get the resulting formatted string length, we use a one-char
+ // buffer.
+ char C[1];
+ const uptr AdditionalLength =
+ static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
+ String.resize(Length + AdditionalLength);
+ formatString(String.data() + Length, AdditionalLength, Format, ArgsCopy);
+ Length = strlen(String.data());
CHECK_LT(Length, String.size());
}
@@ -227,7 +235,7 @@ FORMAT(1, 2)
void Printf(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- ScopedString Msg(512);
+ ScopedString Msg(1024);
Msg.append(Format, Args);
outputRaw(Msg.data());
va_end(Args);
diff --git a/lib/scudo/standalone/string_utils.h b/lib/scudo/standalone/string_utils.h
index aea7b3ffd7a50..acd60bda9d8d3 100644
--- a/lib/scudo/standalone/string_utils.h
+++ b/lib/scudo/standalone/string_utils.h
@@ -29,6 +29,7 @@ public:
}
void append(const char *Format, va_list Args);
void append(const char *Format, ...);
+ void output() const { outputRaw(String.data()); }
private:
Vector<char> String;
diff --git a/lib/scudo/standalone/tsd_exclusive.h b/lib/scudo/standalone/tsd_exclusive.h
index 18cce1c56af86..971ae4857fcab 100644
--- a/lib/scudo/standalone/tsd_exclusive.h
+++ b/lib/scudo/standalone/tsd_exclusive.h
@@ -61,7 +61,7 @@ template <class Allocator> struct TSDRegistryExT {
private:
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
- if (Initialized)
+ if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
@@ -71,7 +71,7 @@ private:
// used instead.
NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
initOnceMaybe(Instance);
- if (MinimalInit)
+ if (UNLIKELY(MinimalInit))
return;
CHECK_EQ(
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
diff --git a/lib/scudo/standalone/tsd_shared.h b/lib/scudo/standalone/tsd_shared.h
index 0f0a83a3eed48..da88a897b8f52 100644
--- a/lib/scudo/standalone/tsd_shared.h
+++ b/lib/scudo/standalone/tsd_shared.h
@@ -95,7 +95,7 @@ private:
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
- if (Initialized)
+ if (LIKELY(Initialized))
return;
initLinkerInitialized(Instance); // Sets Initialized.
}
@@ -112,8 +112,7 @@ private:
// Use the Precedence of the current TSD as our random seed. Since we are
// in the slow path, it means that tryLock failed, and as a result it's
// very likely that said Precedence is non-zero.
- u32 RandState = static_cast<u32>(CurrentTSD->getPrecedence());
- const u32 R = getRandomU32(&RandState);
+ const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
u32 Index = R % NumberOfTSDs;
uptr LowestPrecedence = UINTPTR_MAX;
diff --git a/lib/scudo/standalone/wrappers_c.cc b/lib/scudo/standalone/wrappers_c.cpp
index 5908c600be333..dffd7cc26fe83 100644
--- a/lib/scudo/standalone/wrappers_c.cc
+++ b/lib/scudo/standalone/wrappers_c.cpp
@@ -1,4 +1,4 @@
-//===-- wrappers_c.cc -------------------------------------------*- C++ -*-===//
+//===-- wrappers_c.cpp ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/wrappers_c.inc b/lib/scudo/standalone/wrappers_c.inc
index 2beddc7248001..a9adbc83588ba 100644
--- a/lib/scudo/standalone/wrappers_c.inc
+++ b/lib/scudo/standalone/wrappers_c.inc
@@ -38,8 +38,17 @@ INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
struct SCUDO_MALLINFO Info = {};
scudo::StatCounters Stats;
SCUDO_ALLOCATOR.getStats(Stats);
+ // Space allocated in mmapped regions (bytes)
+ Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
+ // Maximum total allocated space (bytes)
+ Info.usmblks = Info.hblkhd;
+ // Space in freed fastbin blocks (bytes)
+ Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
+ // Total allocated space (bytes)
Info.uordblks =
static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+ // Total free space (bytes)
+ Info.fordblks = Info.fsmblks;
return Info;
}
@@ -170,7 +179,8 @@ INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
}
-INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(int, FILE *) {
- errno = ENOTSUP;
- return -1;
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
+ fputs("<malloc version=\"scudo-1\">", stream);
+ fputs("</malloc>", stream);
+ return 0;
}
diff --git a/lib/scudo/standalone/wrappers_c_bionic.cc b/lib/scudo/standalone/wrappers_c_bionic.cpp
index f6e863deb9736..fa4145c066b64 100644
--- a/lib/scudo/standalone/wrappers_c_bionic.cc
+++ b/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -1,4 +1,4 @@
-//===-- wrappers_c_bionic.cc ------------------------------------*- C++ -*-===//
+//===-- wrappers_c_bionic.cpp -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/lib/scudo/standalone/wrappers_cpp.cc b/lib/scudo/standalone/wrappers_cpp.cpp
index 3ae1cdc05a06e..72235e9c98209 100644
--- a/lib/scudo/standalone/wrappers_cpp.cc
+++ b/lib/scudo/standalone/wrappers_cpp.cpp
@@ -1,4 +1,4 @@
-//===-- wrappers_cpp.cc -----------------------------------------*- C++ -*-===//
+//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.