aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/scudo/standalone
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib/scudo/standalone')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.def131
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h211
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h149
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h145
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp99
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h143
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h1736
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h236
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h44
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp76
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp178
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp236
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h182
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h166
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp242
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h240
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h189
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h129
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp261
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp153
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h67
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h335
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h97
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h1177
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h1737
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h309
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h701
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp192
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h60
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h734
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h353
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h208
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h102
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp241
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h239
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp118
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h90
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h198
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h269
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h143
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc377
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp150
72 files changed, 14439 insertions, 0 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h
new file mode 100644
index 000000000000..2b77516ad11c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -0,0 +1,92 @@
+//===-- allocator_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMMON_H_
+#define SCUDO_ALLOCATOR_COMMON_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct TransferBatch {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+ static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached);
+ Count = N;
+ memcpy(Batch, Array, sizeof(Batch[0]) * Count);
+ }
+ void appendFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ }
+ void appendFromTransferBatch(TransferBatch *B, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ DCHECK_GE(B->Count, N);
+ // Append from the back of `B`.
+ memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ B->Count = static_cast<u16>(B->Count - N);
+ }
+ void clear() { Count = 0; }
+ bool empty() { return Count == 0; }
+ void add(CompactPtrT P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void moveToArray(CompactPtrT *Array) {
+ memcpy(Array, Batch, sizeof(Batch[0]) * Count);
+ clear();
+ }
+
+ void moveNToArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, Count);
+ memcpy(Array, Batch + Count - N, sizeof(Batch[0]) * N);
+ Count = static_cast<u16>(Count - N);
+ }
+ u16 getCount() const { return Count; }
+ bool isEmpty() const { return Count == 0U; }
+ CompactPtrT get(u16 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ TransferBatch *Next;
+
+private:
+ CompactPtrT Batch[MaxNumCached];
+ u16 Count;
+};
+
+// A BatchGroup is used to collect blocks. Each group has a group id to
+// identify the group kind of contained blocks.
+template <class SizeClassAllocator> struct BatchGroup {
+ // `Next` is used by IntrusiveList.
+ BatchGroup *Next;
+ // The compact base address of each group
+ uptr CompactPtrGroupBase;
+ // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
+ u16 MaxCachedPerBatch;
+ // Number of blocks pushed into this group. This is an increment-only
+ // counter.
+ uptr PushedBlocks;
+ // This is used to track how many bytes are not in-use since last time we
+ // tried to release pages.
+ uptr BytesInBGAtLastCheckpoint;
+ // Blocks are managed by TransferBatch in a list.
+ SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_COMMON_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.def b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.def
new file mode 100644
index 000000000000..ce37b1cfaedc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -0,0 +1,131 @@
+//===-- allocator_config.def ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all the flags and types supported in Scudo. For optional
+// flags and types, only explicitly define them when interested (i.e., unused
+// optional flags or types can be skipped).
+
+#ifndef BASE_REQUIRED_TEMPLATE_TYPE
+#define BASE_REQUIRED_TEMPLATE_TYPE(...)
+#endif
+#ifndef BASE_OPTIONAL
+#define BASE_OPTIONAL(...)
+#endif
+#ifndef PRIMARY_REQUIRED_TYPE
+#define PRIMARY_REQUIRED_TYPE(...)
+#endif
+#ifndef PRIMARY_REQUIRED
+#define PRIMARY_REQUIRED(...)
+#endif
+#ifndef PRIMARY_OPTIONAL
+#define PRIMARY_OPTIONAL(...)
+#endif
+#ifndef PRIMARY_OPTIONAL_TYPE
+#define PRIMARY_OPTIONAL_TYPE(...)
+#endif
+#ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE
+#define SECONDARY_REQUIRED_TEMPLATE_TYPE(...)
+#endif
+#ifndef SECONDARY_CACHE_OPTIONAL
+#define SECONDARY_CACHE_OPTIONAL(...)
+#endif
+
+// BASE_REQUIRED_TEMPLATE_TYPE(NAME)
+//
+// Thread-Specific Data Registry used, shared or exclusive.
+BASE_REQUIRED_TEMPLATE_TYPE(TSDRegistryT)
+
+// Defines the type of Primary allocator to use.
+BASE_REQUIRED_TEMPLATE_TYPE(PrimaryT)
+
+// Defines the type of Secondary allocator to use.
+BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT)
+
+// BASE_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Indicates possible support for Memory Tagging.
+BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
+
+// PRIMARY_REQUIRED_TYPE(NAME)
+//
+// SizeClassMap to use with the Primary.
+PRIMARY_REQUIRED_TYPE(SizeClassMap)
+
+// PRIMARY_REQUIRED(TYPE, NAME)
+//
+// Log2 of the size of a size class region, as used by the Primary.
+PRIMARY_REQUIRED(const uptr, RegionSizeLog)
+
+// Conceptually, a region will be divided into groups based on the address
+// range. Each allocation consumes blocks in the same group until exhaustion
+// then it pops out blocks in a new group. Therefore, `GroupSizeLog` is always
+// smaller or equal to `RegionSizeLog`. Note that `GroupSizeLog` needs to be
+// equal to `RegionSizeLog` for SizeClassAllocator32 because of certain
+// constraints.
+PRIMARY_REQUIRED(const uptr, GroupSizeLog)
+
+// Call map for user memory with at least this size. Only used with primary64.
+PRIMARY_REQUIRED(const uptr, MapSizeIncrement)
+
+// Defines the minimal & maximal release interval that can be set.
+PRIMARY_REQUIRED(const s32, MinReleaseToOsIntervalMs)
+PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
+
+// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale).
+PRIMARY_OPTIONAL(const uptr, CompactPtrScale, SCUDO_MIN_ALIGNMENT_LOG)
+
+// Indicates support for offsetting the start of a region by a random number of
+// pages. This is only used if `EnableContiguousRegions` is enabled.
+PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
+PRIMARY_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)
+
+// When `EnableContiguousRegions` is true, all regions will be be arranged in
+// adjacency. This will reduce the fragmentation caused by region allocations
+// but may require a huge amount of contiguous pages at initialization.
+PRIMARY_OPTIONAL(const bool, EnableContiguousRegions, true)
+
+// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
+//
+// Use condition variable to shorten the waiting time of refillment of
+// freelist. Note that this depends on the implementation of condition
+// variable on each platform and the performance may vary so that it does not
+// guarantee a performance benefit.
+PRIMARY_OPTIONAL_TYPE(ConditionVariableT, ConditionVariableDummy)
+
+// Defines the type and scale of a compact pointer. A compact pointer can
+// be understood as the offset of a pointer within the region it belongs
+// to, in increments of a power-of-2 scale. See `CompactPtrScale` also.
+PRIMARY_OPTIONAL_TYPE(CompactPtrT, uptr)
+
+// SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME)
+//
+// Defines the type of Secondary Cache to use.
+SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT)
+
+// SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Defines the type of cache used by the Secondary. Some additional
+// configuration entries can be necessary depending on the Cache.
+SECONDARY_CACHE_OPTIONAL(const u32, EntriesArraySize, 0)
+SECONDARY_CACHE_OPTIONAL(const u32, QuarantineSize, 0)
+SECONDARY_CACHE_OPTIONAL(const u32, DefaultMaxEntriesCount, 0)
+SECONDARY_CACHE_OPTIONAL(const uptr, DefaultMaxEntrySize, 0)
+SECONDARY_CACHE_OPTIONAL(const s32, MinReleaseToOsIntervalMs, INT32_MIN)
+SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX)
+SECONDARY_CACHE_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)
+
+#undef SECONDARY_CACHE_OPTIONAL
+#undef SECONDARY_REQUIRED_TEMPLATE_TYPE
+#undef PRIMARY_OPTIONAL_TYPE
+#undef PRIMARY_OPTIONAL
+#undef PRIMARY_REQUIRED
+#undef PRIMARY_REQUIRED_TYPE
+#undef BASE_OPTIONAL
+#undef BASE_REQUIRED_TEMPLATE_TYPE
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
new file mode 100644
index 000000000000..60f59bdd2f4c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -0,0 +1,211 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "condition_variable.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "secondary.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
+// aliasing the `Config` like:
+//
+// namespace scudo {
+// // The instance of Scudo will be initiated with `Config`.
+// typedef CustomConfig Config;
+// // Aliasing as default configuration to run the tests with this config.
+// typedef CustomConfig DefaultConfig;
+// } // namespace scudo
+//
+// Put them in the header `custom_scudo_config.h` then you will be using the
+// custom configuration and able to run all the tests as well.
+#ifdef SCUDO_USE_CUSTOM_CONFIG
+#include "custom_scudo_config.h"
+#endif
+
+namespace scudo {
+
+// Scudo uses a structure as a template argument that specifies the
+// configuration options for the various subcomponents of the allocator. See the
+// following configs as examples and check `allocator_config.def` for all the
+// available options.
+
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+
+// Default configurations for various platforms. Note this is only enabled when
+// there's no custom configuration in the build system.
+struct DefaultConfig {
+ static const bool MaySupportMemoryTagging = true;
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+
+ struct Primary {
+ using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ static const uptr RegionSizeLog = 32U;
+ static const uptr GroupSizeLog = 21U;
+ typedef uptr CompactPtrT;
+ static const uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
+#else
+ static const uptr RegionSizeLog = 19U;
+ static const uptr GroupSizeLog = 19U;
+ typedef uptr CompactPtrT;
+#endif
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
+#endif
+
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 32U;
+ static const u32 QuarantineSize = 0U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 1UL << 19;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
+};
+
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
+struct AndroidConfig {
+ static const bool MaySupportMemoryTagging = true;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+
+ struct Primary {
+ using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ static const uptr RegionSizeLog = 28U;
+ typedef u32 CompactPtrT;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr GroupSizeLog = 20U;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
+#else
+ static const uptr RegionSizeLog = 18U;
+ static const uptr GroupSizeLog = 18U;
+ typedef uptr CompactPtrT;
+#endif
+ static const s32 MinReleaseToOsIntervalMs = 1000;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
+#endif
+
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 256U;
+ static const u32 QuarantineSize = 32U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 2UL << 20;
+ static const s32 MinReleaseToOsIntervalMs = 0;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
+};
+
+#if SCUDO_CAN_USE_PRIMARY64
+struct FuchsiaConfig {
+ static const bool MaySupportMemoryTagging = false;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+
+ struct Primary {
+ using SizeClassMap = FuchsiaSizeClassMap;
+#if SCUDO_RISCV64
+ // Support 39-bit VMA for riscv-64
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 19U;
+ static const bool EnableContiguousRegions = false;
+#else
+ static const uptr RegionSizeLog = 30U;
+ static const uptr GroupSizeLog = 21U;
+#endif
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
+};
+
+struct TrustyConfig {
+ static const bool MaySupportMemoryTagging = true;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+
+ struct Primary {
+ using SizeClassMap = TrustySizeClassMap;
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 20U;
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = false;
+ static const uptr MapSizeIncrement = 1UL << 12;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
+};
+#endif
+
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#elif SCUDO_TRUSTY
+typedef TrustyConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
new file mode 100644
index 000000000000..5477236ac1f3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
@@ -0,0 +1,149 @@
+//===-- allocator_config_wrapper.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
+#define SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
+
+#include "condition_variable.h"
+#include "internal_defs.h"
+#include "secondary.h"
+
+namespace {
+
+template <typename T> struct removeConst {
+ using type = T;
+};
+template <typename T> struct removeConst<const T> {
+ using type = T;
+};
+
+// This is only used for SFINAE when detecting if a type is defined.
+template <typename T> struct voidAdaptor {
+ using type = void;
+};
+
+// This is used for detecting the case that defines the flag with wrong type and
+// it'll be viewed as undefined optional flag.
+template <typename L, typename R> struct assertSameType {
+ template <typename, typename> struct isSame {
+ static constexpr bool value = false;
+ };
+ template <typename T> struct isSame<T, T> {
+ static constexpr bool value = true;
+ };
+ static_assert(isSame<L, R>::value, "Flag type mismatches");
+ using type = R;
+};
+
+} // namespace
+
+namespace scudo {
+
+#define OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, MEMBER) \
+ template <typename Config, typename = TYPE> struct NAME##State { \
+ static constexpr removeConst<TYPE>::type getValue() { return DEFAULT; } \
+ }; \
+ template <typename Config> \
+ struct NAME##State< \
+ Config, typename assertSameType<decltype(Config::MEMBER), TYPE>::type> { \
+ static constexpr removeConst<TYPE>::type getValue() { \
+ return Config::MEMBER; \
+ } \
+ };
+
+#define OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT, MEMBER) \
+ template <typename Config, typename Void = void> struct NAME##Type { \
+ static constexpr bool enabled() { return false; } \
+ using NAME = DEFAULT; \
+ }; \
+ template <typename Config> \
+ struct NAME##Type<Config, \
+ typename voidAdaptor<typename Config::MEMBER>::type> { \
+ static constexpr bool enabled() { return true; } \
+ using NAME = typename Config::MEMBER; \
+ };
+
+template <typename AllocatorConfig> struct BaseConfig {
+#define BASE_REQUIRED_TEMPLATE_TYPE(NAME) \
+ template <typename T> using NAME = typename AllocatorConfig::template NAME<T>;
+
+#define BASE_OPTIONAL(TYPE, NAME, DEFAULT) \
+ OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \
+ static constexpr removeConst<TYPE>::type get##NAME() { \
+ return NAME##State<AllocatorConfig>::getValue(); \
+ }
+
+#include "allocator_config.def"
+}; // BaseConfig
+
+template <typename AllocatorConfig> struct PrimaryConfig {
+ // TODO: Pass this flag through template argument to remove this hard-coded
+ // function.
+ static constexpr bool getMaySupportMemoryTagging() {
+ return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+ }
+
+#define PRIMARY_REQUIRED_TYPE(NAME) \
+ using NAME = typename AllocatorConfig::Primary::NAME;
+
+#define PRIMARY_REQUIRED(TYPE, NAME) \
+ static constexpr removeConst<TYPE>::type get##NAME() { \
+ return AllocatorConfig::Primary::NAME; \
+ }
+
+#define PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT) \
+ OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME) \
+ static constexpr removeConst<TYPE>::type get##NAME() { \
+ return NAME##State<typename AllocatorConfig::Primary>::getValue(); \
+ }
+
+#define PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT) \
+ OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT, NAME) \
+ static constexpr bool has##NAME() { \
+ return NAME##Type<typename AllocatorConfig::Primary>::enabled(); \
+ } \
+ using NAME = typename NAME##Type<typename AllocatorConfig::Primary>::NAME;
+
+#include "allocator_config.def"
+
+}; // PrimaryConfig
+
+template <typename AllocatorConfig> struct SecondaryConfig {
+ // TODO: Pass this flag through template argument to remove this hard-coded
+ // function.
+ static constexpr bool getMaySupportMemoryTagging() {
+ return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+ }
+
+#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \
+ template <typename T> \
+ using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
+#include "allocator_config.def"
+
+ struct CacheConfig {
+ // TODO: Pass this flag through template argument to remove this hard-coded
+ // function.
+ static constexpr bool getMaySupportMemoryTagging() {
+ return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+ }
+
+#define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) \
+ OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, Cache::NAME) \
+ static constexpr removeConst<TYPE>::type get##NAME() { \
+ return NAME##State<typename AllocatorConfig::Secondary>::getValue(); \
+ }
+#include "allocator_config.def"
+ }; // CacheConfig
+}; // SecondaryConfig
+
+#undef OPTIONAL_TEMPLATE
+#undef OPTIONAL_TEMPLATE_TYPE
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
new file mode 100644
index 000000000000..a68ffd16291c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -0,0 +1,145 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+ memory_order_relaxed = 0,
+ memory_order_consume = 1,
+ memory_order_acquire = 2,
+ memory_order_release = 3,
+ memory_order_acq_rel = 4,
+ memory_order_seq_cst = 5
+};
+static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
+static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
+static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
+static_assert(memory_order_release == __ATOMIC_RELEASE, "");
+static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
+static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
+
+struct atomic_u8 {
+ typedef u8 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+ typedef u16 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+ typedef s32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+ typedef u32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+ alignas(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+ typedef uptr Type;
+ volatile Type ValDoNotUse;
+};
+
+template <typename T>
+inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type V;
+ __atomic_load(&A->ValDoNotUse, &V, MO);
+ return V;
+}
+
+template <typename T>
+inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type R;
+ __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+ return R;
+}
+
+template <typename T>
+inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+ __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+inline typename T::Type atomic_load_relaxed(const volatile T *A) {
+ return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+ atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+inline typename T::Type
+atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
+ typename T::Type Xchg, memory_order MO) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
+ return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h
new file mode 100644
index 000000000000..248e096d07b6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h
@@ -0,0 +1,43 @@
+//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_BYTEMAP_H_
+#define SCUDO_BYTEMAP_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+template <uptr Size> class FlatByteMap {
+public:
+ void init() { DCHECK(Size == 0 || Map[0] == 0); }
+
+ void unmapTestOnly() { memset(Map, 0, Size); }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Size);
+ DCHECK_EQ(0U, Map[Index]);
+ Map[Index] = Value;
+ }
+ u8 operator[](uptr Index) {
+ DCHECK_LT(Index, Size);
+ return Map[Index];
+ }
+
+ void disable() {}
+ void enable() {}
+
+private:
+ u8 Map[Size] = {};
+};
+
+} // namespace scudo
+
+#endif // SCUDO_BYTEMAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp
new file mode 100644
index 000000000000..efa4055bcbc1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp
@@ -0,0 +1,99 @@
+//===-- checksum.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+#include "atomic_helpers.h"
+#include "chunk.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+#if SCUDO_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+#else
+#include <sys/auxv.h>
+#endif
+#elif defined(__loongarch__)
+#include <sys/auxv.h>
+#endif
+
+namespace scudo {
+
+Checksum HashAlgorithm = {Checksum::BSD};
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+#ifndef signature_HYGON_ebx // They are not defined in gcc.
+// HYGON: "HygonGenuine".
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
+#endif
+
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ const bool IsHygon = (Ebx == signature_HYGON_ebx) &&
+ (Edx == signature_HYGON_edx) &&
+ (Ecx == signature_HYGON_ecx);
+ if (!IsIntel && !IsAMD && !IsHygon)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+#elif defined(__arm__) || defined(__aarch64__)
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool hasHardwareCRC32() {
+#if SCUDO_FUCHSIA
+ u32 HWCap;
+ const zx_status_t Status =
+ zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK)
+ return false;
+ return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
+#else
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+#endif // SCUDO_FUCHSIA
+}
+#elif defined(__loongarch__)
+// The definition is only pulled in by <sys/auxv.h> since glibc 2.38, so
+// supply it if missing.
+#ifndef HWCAP_LOONGARCH_CRC32
+#define HWCAP_LOONGARCH_CRC32 (1 << 6)
+#endif
+// Query HWCAP for platform capability, according to *Software Development and
+// Build Convention for LoongArch Architectures* v0.1, Section 9.1.
+//
+// Link:
+// https://github.com/loongson/la-softdev-convention/blob/v0.1/la-softdev-convention.adoc#kernel-development
+bool hasHardwareCRC32() {
+ return !!(getauxval(AT_HWCAP) & HWCAP_LOONGARCH_CRC32);
+}
+#else
+// No hardware CRC32 implemented in Scudo for other architectures.
+bool hasHardwareCRC32() { return false; }
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
new file mode 100644
index 000000000000..32ca372b097f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
@@ -0,0 +1,63 @@
+//===-- checksum.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKSUM_H_
+#define SCUDO_CHECKSUM_H_
+
+#include "internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -mcrc32 (earlier: -msse4.2)
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#if defined(__CRC32__)
+// NB: clang has <crc32intrin.h> but GCC does not
+#include <smmintrin.h>
+#define CRC32_INTRINSIC \
+ FIRST_32_SECOND_64(__builtin_ia32_crc32si, __builtin_ia32_crc32di)
+#elif defined(__SSE4_2__)
+#include <smmintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+#endif
+#ifdef __ARM_FEATURE_CRC32
+#include <arm_acle.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+#endif
+#ifdef __loongarch__
+#include <larchintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crcc_w_w_w, __crcc_w_d_w)
+#endif
+
+namespace scudo {
+
+enum class Checksum : u8 {
+ BSD = 0,
+ HardwareCRC32 = 1,
+};
+
+// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
+// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
+// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
+// odds with CRC32, but enough for our needs.
+inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
+ for (u8 I = 0; I < sizeof(Data); I++) {
+ Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
+ Sum = static_cast<u16>(Sum + (Data & 0xff));
+ Data >>= 8;
+ }
+ return Sum;
+}
+
+bool hasHardwareCRC32();
+WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKSUM_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
new file mode 100644
index 000000000000..9228df047189
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
@@ -0,0 +1,143 @@
+//===-- chunk.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHUNK_H_
+#define SCUDO_CHUNK_H_
+
+#include "platform.h"
+
+#include "atomic_helpers.h"
+#include "checksum.h"
+#include "common.h"
+#include "report.h"
+
+namespace scudo {
+
+extern Checksum HashAlgorithm;
+
+inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for crc32_hw.cpp. This means that other hardware
+ // specific instructions were likely emitted at other places, and as a result
+ // there is no reason to not use it here.
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
+ return static_cast<u16>(Crc ^ (Crc >> 16));
+#else
+ if (HashAlgorithm == Checksum::HardwareCRC32) {
+ u32 Crc = computeHardwareCRC32(Seed, Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = computeHardwareCRC32(Crc, Array[I]);
+ return static_cast<u16>(Crc ^ (Crc >> 16));
+ } else {
+ u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed), Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Checksum = computeBSDChecksum(Checksum, Array[I]);
+ return Checksum;
+ }
+#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
+ // defined(__ARM_FEATURE_CRC32)
+}
+
+namespace Chunk {
+
+// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
+// the associated `UnpackedHeader` fields of their respective enum class type
+// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
+// happening, as it will error, complaining the number of bits is not enough.
+enum Origin : u8 {
+ Malloc = 0,
+ New = 1,
+ NewArray = 2,
+ Memalign = 3,
+};
+
+enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
+
+typedef u64 PackedHeader;
+// Update the 'Mask' constants to reflect changes in this structure.
+struct UnpackedHeader {
+ uptr ClassId : 8;
+ u8 State : 2;
+ // Origin if State == Allocated, or WasZeroed otherwise.
+ u8 OriginOrWasZeroed : 2;
+ uptr SizeOrUnusedBytes : 20;
+ uptr Offset : 16;
+ uptr Checksum : 16;
+};
+typedef atomic_u64 AtomicPackedHeader;
+static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");
+
+// Those constants are required to silence some -Werror=conversion errors when
+// assigning values to the related bitfield variables.
+constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr u8 StateMask = (1U << 2) - 1;
+constexpr u8 OriginMask = (1U << 2) - 1;
+constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
+constexpr uptr OffsetMask = (1UL << 16) - 1;
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
+
+constexpr uptr getHeaderSize() {
+ return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+}
+
+inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+}
+
+// We do not need a cryptographically strong hash for the checksum, but a CRC
+// type function that can alert us in the event a header is invalid or
+// corrupted. Ideally slightly better than a simple xor of all fields.
+static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+ UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
+ ARRAY_SIZE(HeaderHolder));
+}
+
+inline void storeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+}
+
+inline void loadHeader(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
+ reportHeaderCorruption(const_cast<void *>(Ptr));
+}
+
+inline bool isValid(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ return NewUnpackedHeader->Checksum ==
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+}
+
+} // namespace Chunk
+
+} // namespace scudo
+
+#endif // SCUDO_CHUNK_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
new file mode 100644
index 000000000000..fcf65652c5fc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
@@ -0,0 +1,1736 @@
+//===-- combined.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMBINED_H_
+#define SCUDO_COMBINED_H_
+
+#include "allocator_config_wrapper.h"
+#include "atomic_helpers.h"
+#include "chunk.h"
+#include "common.h"
+#include "flags.h"
+#include "flags_parser.h"
+#include "local_cache.h"
+#include "mem_map.h"
+#include "memtag.h"
+#include "mutex.h"
+#include "options.h"
+#include "quarantine.h"
+#include "report.h"
+#include "secondary.h"
+#include "stack_depot.h"
+#include "string_utils.h"
+#include "tsd.h"
+
+#include "scudo/interface.h"
+
+#ifdef GWP_ASAN_HOOKS
+#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/optional/segv_handler.h"
+#endif // GWP_ASAN_HOOKS
+
+extern "C" inline void EmptyCallback() {}
+
+#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
+// This function is not part of the NDK so it does not appear in any public
+// header files. We only declare/use it when targeting the platform.
+extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
+ size_t num_entries);
+#endif
+
+namespace scudo {
+
+template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
+class Allocator {
+public:
+ using AllocatorConfig = BaseConfig<Config>;
+ using PrimaryT =
+ typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
+ using SecondaryT =
+ typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
+ using CacheT = typename PrimaryT::CacheT;
+ typedef Allocator<Config, PostInitCallback> ThisT;
+ typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
+
+ void callPostInitCallback() {
+ pthread_once(&PostInitNonce, PostInitCallback);
+ }
+
+ struct QuarantineCallback {
+ explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
+ : Allocator(Instance), Cache(LocalCache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void recycle(void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+ if (UNLIKELY(Header.State != Chunk::State::Quarantined))
+ reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
+
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(Ptr);
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
+ Cache.deallocate(Header.ClassId, BlockBegin);
+ }
+
+ // We take a shortcut when allocating a quarantine batch by working with the
+ // appropriate class ID instead of using Size. The compiler should optimize
+ // the class ID computation and work with the associated cache directly.
+ void *allocate(UNUSED uptr Size) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ void *Ptr = Cache.allocate(QuarantineClassId);
+ // Quarantine batch allocation failure is fatal.
+ if (UNLIKELY(!Ptr))
+ reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
+
+ Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
+ Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header = {};
+ Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
+ Header.State = Chunk::State::Allocated;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+ // Reset tag to 0 as this chunk may have been previously used for a tagged
+ // user allocation.
+ if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
+ Allocator.Primary.Options.load())))
+ storeTags(reinterpret_cast<uptr>(Ptr),
+ reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
+
+ return Ptr;
+ }
+
+ void deallocate(void *Ptr) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ DCHECK_EQ(Header.ClassId, QuarantineClassId);
+ DCHECK_EQ(Header.Offset, 0);
+ DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
+
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+ Cache.deallocate(QuarantineClassId,
+ reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize()));
+ }
+
+ private:
+ ThisT &Allocator;
+ CacheT &Cache;
+ };
+
+ typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+ typedef typename QuarantineT::CacheT QuarantineCacheT;
+
+ void init() {
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ HashAlgorithm = Checksum::HardwareCRC32;
+
+ if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
+ Cookie = static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(this) >> 4));
+
+ initFlags();
+ reportUnrecognizedFlags();
+
+ // Store some flags locally.
+ if (getFlags()->may_return_null)
+ Primary.Options.set(OptionBit::MayReturnNull);
+ if (getFlags()->zero_contents)
+ Primary.Options.setFillContentsMode(ZeroFill);
+ else if (getFlags()->pattern_fill_contents)
+ Primary.Options.setFillContentsMode(PatternOrZeroFill);
+ if (getFlags()->dealloc_type_mismatch)
+ Primary.Options.set(OptionBit::DeallocTypeMismatch);
+ if (getFlags()->delete_size_mismatch)
+ Primary.Options.set(OptionBit::DeleteSizeMismatch);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
+ systemSupportsMemoryTagging())
+ Primary.Options.set(OptionBit::UseMemoryTagging);
+
+ QuarantineMaxChunkSize =
+ static_cast<u32>(getFlags()->quarantine_max_chunk_size);
+
+ Stats.init();
+ // TODO(chiahungduan): Given that we support setting the default value in
+ // the PrimaryConfig and CacheConfig, consider to deprecate the use of
+ // `release_to_os_interval_ms` flag.
+ const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
+ Primary.init(ReleaseToOsIntervalMs);
+ Secondary.init(&Stats, ReleaseToOsIntervalMs);
+ Quarantine.init(
+ static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+ static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+ }
+
+ void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->enable();
+ RingBufferInitLock.unlock();
+ }
+
+ void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
+ RingBufferInitLock.lock();
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->disable();
+ }
+
+ // Initialize the embedded GWP-ASan instance. Requires the main allocator to
+ // be functional, best called from PostInitCallback.
+ void initGwpAsan() {
+#ifdef GWP_ASAN_HOOKS
+ gwp_asan::options::Options Opt;
+ Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
+ Opt.MaxSimultaneousAllocations =
+ getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
+ Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
+ Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
+ Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
+ // Embedded GWP-ASan is locked through the Scudo atfork handler (via
+ // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
+ // handler.
+ Opt.InstallForkHandlers = false;
+ Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
+ GuardedAlloc.init(Opt);
+
+ if (Opt.InstallSignalHandlers)
+ gwp_asan::segv_handler::installSignalHandlers(
+ &GuardedAlloc, Printf,
+ gwp_asan::backtrace::getPrintBacktraceFunction(),
+ gwp_asan::backtrace::getSegvBacktraceFunction(),
+ Opt.Recoverable);
+
+ GuardedAllocSlotSize =
+ GuardedAlloc.getAllocatorState()->maximumAllocationSize();
+ Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
+ GuardedAllocSlotSize);
+#endif // GWP_ASAN_HOOKS
+ }
+
+#ifdef GWP_ASAN_HOOKS
+ const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
+ return GuardedAlloc.getMetadataRegion();
+ }
+
+ const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
+ return GuardedAlloc.getAllocatorState();
+ }
+#endif // GWP_ASAN_HOOKS
+
+ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ TSDRegistry.initThreadMaybe(this, MinimalInit);
+ }
+
+ void unmapTestOnly() {
+ unmapRingBuffer();
+ TSDRegistry.unmapTestOnly(this);
+ Primary.unmapTestOnly();
+ Secondary.unmapTestOnly();
+#ifdef GWP_ASAN_HOOKS
+ if (getFlags()->GWP_ASAN_InstallSignalHandlers)
+ gwp_asan::segv_handler::uninstallSignalHandlers();
+ GuardedAlloc.uninitTestOnly();
+#endif // GWP_ASAN_HOOKS
+ }
+
+ TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+ QuarantineT *getQuarantine() { return &Quarantine; }
+
+ // The Cache must be provided zero-initialized.
+ void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+
+ // Release the resources used by a TSD, which involves:
+ // - draining the local quarantine cache to the global quarantine;
+ // - releasing the cached pointers back to the Primary;
+ // - unlinking the local stats from the global ones (destroying the cache does
+ // the last two items).
+ void commitBack(TSD<ThisT> *TSD) {
+ TSD->assertLocked(/*BypassCheck=*/true);
+ Quarantine.drain(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().destroy(&Stats);
+ }
+
+ void drainCache(TSD<ThisT> *TSD) {
+ TSD->assertLocked(/*BypassCheck=*/true);
+ Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().drain();
+ }
+ void drainCaches() { TSDRegistry.drainCaches(this); }
+
+ ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
+ if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
+ return Ptr;
+ auto UntaggedPtr = untagPointer(Ptr);
+ if (UntaggedPtr != Ptr)
+ return UntaggedPtr;
+ // Secondary, or pointer allocated while memory tagging is unsupported or
+ // disabled. The tag mismatch is okay in the latter case because tags will
+ // not be checked.
+ return addHeaderTag(Ptr);
+ }
+
+ ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
+ if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
+ return Ptr;
+ return addFixedTag(Ptr, 2);
+ }
+
+ ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
+ return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
+ }
+
+ NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
+#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
+ // Discard collectStackTrace() frame and allocator function frame.
+ constexpr uptr DiscardFrames = 2;
+ uptr Stack[MaxTraceSize + DiscardFrames];
+ uptr Size =
+ android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
+ Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
+ return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
+#else
+ return 0;
+#endif
+ }
+
+ uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
+ uptr ClassId) {
+ if (!Options.get(OptionBit::UseOddEvenTags))
+ return 0;
+
+ // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
+ // even, and vice versa. Blocks are laid out Size bytes apart, and adding
+ // Size to Ptr will flip the least significant set bit of Size in Ptr, so
+ // that bit will have the pattern 010101... for consecutive blocks, which we
+ // can use to determine which tag mask to use.
+ return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
+ }
+
+ NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
+ uptr Alignment = MinAlignment,
+ bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
+ initThreadMaybe();
+
+ const Options Options = Primary.Options.load();
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (Options.get(OptionBit::MayReturnNull))
+ return nullptr;
+ reportAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (Alignment < MinAlignment)
+ Alignment = MinAlignment;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.shouldSample())) {
+ if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
+ Stats.lock();
+ Stats.add(StatAllocated, GuardedAllocSlotSize);
+ Stats.sub(StatFree, GuardedAllocSlotSize);
+ Stats.unlock();
+ return Ptr;
+ }
+ }
+#endif // GWP_ASAN_HOOKS
+
+ const FillContentsMode FillContents = ZeroContents ? ZeroFill
+ : TSDRegistry.getDisableMemInit()
+ ? NoFill
+ : Options.getFillContentsMode();
+
+ // If the requested size happens to be 0 (more common than you might think),
+ // allocate MinAlignment bytes on top of the header. Then add the extra
+ // bytes required to fulfill the alignment requirements: we allocate enough
+ // to be sure that there will be an address in the block that will satisfy
+ // the alignment.
+ const uptr NeededSize =
+ roundUp(Size, MinAlignment) +
+ ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
+
+ // Takes care of extravagantly large sizes as well as integer overflows.
+ static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
+ if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
+ if (Options.get(OptionBit::MayReturnNull))
+ return nullptr;
+ reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
+ }
+ DCHECK_LE(Size, NeededSize);
+
+ void *Block = nullptr;
+ uptr ClassId = 0;
+ uptr SecondaryBlockEnd = 0;
+ if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
+ ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+ DCHECK_NE(ClassId, 0U);
+ typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
+ Block = TSD->getCache().allocate(ClassId);
+ // If the allocation failed, retry in each successively larger class until
+ // it fits. If it fails to fit in the largest class, fallback to the
+ // Secondary.
+ if (UNLIKELY(!Block)) {
+ while (ClassId < SizeClassMap::LargestClassId && !Block)
+ Block = TSD->getCache().allocate(++ClassId);
+ if (!Block)
+ ClassId = 0;
+ }
+ }
+ if (UNLIKELY(ClassId == 0)) {
+ Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
+ FillContents);
+ }
+
+ if (UNLIKELY(!Block)) {
+ if (Options.get(OptionBit::MayReturnNull))
+ return nullptr;
+ printStats();
+ reportOutOfMemory(NeededSize);
+ }
+
+ const uptr UserPtr = roundUp(
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Alignment);
+ const uptr SizeOrUnusedBytes =
+ ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
+
+ if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
+ return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
+ FillContents);
+ }
+
+ return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
+ SizeOrUnusedBytes, FillContents);
+ }
+
+ NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
+ UNUSED uptr Alignment = MinAlignment) {
+ if (UNLIKELY(!Ptr))
+ return;
+
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+ GuardedAlloc.deallocate(Ptr);
+ Stats.lock();
+ Stats.add(StatFree, GuardedAllocSlotSize);
+ Stats.sub(StatAllocated, GuardedAllocSlotSize);
+ Stats.unlock();
+ return;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+ void *TaggedPtr = Ptr;
+ Ptr = getHeaderTaggedPointer(Ptr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+
+ const Options Options = Primary.Options.load();
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
+ if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
+ // With the exception of memalign'd chunks, that can be still be free'd.
+ if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc)
+ reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+ Header.OriginOrWasZeroed, Origin);
+ }
+ }
+
+ const uptr Size = getSize(Ptr, &Header);
+ if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
+ if (UNLIKELY(DeleteSize != Size))
+ reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+ }
+
+ quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
+ }
+
+ void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
+ initThreadMaybe();
+
+ const Options Options = Primary.Options.load();
+ if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
+ if (Options.get(OptionBit::MayReturnNull))
+ return nullptr;
+ reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
+ }
+
+ // The following cases are handled by the C wrappers.
+ DCHECK_NE(OldPtr, nullptr);
+ DCHECK_NE(NewSize, 0);
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
+ uptr OldSize = GuardedAlloc.getSize(OldPtr);
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (NewPtr)
+ memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
+ GuardedAlloc.deallocate(OldPtr);
+ Stats.lock();
+ Stats.add(StatFree, GuardedAllocSlotSize);
+ Stats.sub(StatAllocated, GuardedAllocSlotSize);
+ Stats.unlock();
+ return NewPtr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ void *OldTaggedPtr = OldPtr;
+ OldPtr = getHeaderTaggedPointer(OldPtr);
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, OldPtr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
+
+ // Pointer has to be allocated with a malloc-type function. Some
+ // applications think that it is OK to realloc a memalign'ed pointer, which
+ // will trigger this check. It really isn't.
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
+ if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
+ Header.OriginOrWasZeroed,
+ Chunk::Origin::Malloc);
+ }
+
+ void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
+ uptr BlockEnd;
+ uptr OldSize;
+ const uptr ClassId = Header.ClassId;
+ if (LIKELY(ClassId)) {
+ BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
+ SizeClassMap::getSizeByClassId(ClassId);
+ OldSize = Header.SizeOrUnusedBytes;
+ } else {
+ BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
+ OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
+ Header.SizeOrUnusedBytes);
+ }
+ // If the new chunk still fits in the previously allocated block (with a
+ // reasonable delta), we just keep the old block, and update the chunk
+ // header to reflect the size change.
+ if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
+ if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
+ // If we have reduced the size, set the extra bytes to the fill value
+ // so that we are ready to grow it again in the future.
+ if (NewSize < OldSize) {
+ const FillContentsMode FillContents =
+ TSDRegistry.getDisableMemInit() ? NoFill
+ : Options.getFillContentsMode();
+ if (FillContents != NoFill) {
+ memset(reinterpret_cast<char *>(OldTaggedPtr) + NewSize,
+ FillContents == ZeroFill ? 0 : PatternFillByte,
+ OldSize - NewSize);
+ }
+ }
+
+ Header.SizeOrUnusedBytes =
+ (ClassId ? NewSize
+ : BlockEnd -
+ (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
+ Chunk::SizeOrUnusedBytesMask;
+ Chunk::storeHeader(Cookie, OldPtr, &Header);
+ if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
+ if (ClassId) {
+ resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
+ reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
+ NewSize, untagPointer(BlockEnd));
+ storePrimaryAllocationStackMaybe(Options, OldPtr);
+ } else {
+ storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
+ }
+ }
+ return OldTaggedPtr;
+ }
+ }
+
+ // Otherwise we allocate a new one, and deallocate the old one. Some
+ // allocators will allocate an even larger chunk (by a fixed factor) to
+ // allow for potential further in-place realloc. The gains of such a trick
+ // are currently unclear.
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (LIKELY(NewPtr)) {
+ memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
+ quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // TODO(kostyak): disable() is currently best-effort. There are some small
+ // windows of time when an allocation could still succeed after
+ // this function finishes. We will revisit that later.
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ initThreadMaybe();
+#ifdef GWP_ASAN_HOOKS
+ GuardedAlloc.disable();
+#endif
+ TSDRegistry.disable();
+ Stats.disable();
+ Quarantine.disable();
+ Primary.disable();
+ Secondary.disable();
+ disableRingBuffer();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ initThreadMaybe();
+ enableRingBuffer();
+ Secondary.enable();
+ Primary.enable();
+ Quarantine.enable();
+ Stats.enable();
+ TSDRegistry.enable();
+#ifdef GWP_ASAN_HOOKS
+ GuardedAlloc.enable();
+#endif
+ }
+
+ // The function returns the amount of bytes required to store the statistics,
+ // which might be larger than the amount of bytes provided. Note that the
+ // statistics buffer is not necessarily constant between calls to this
+ // function. This can be called with a null buffer or zero size for buffer
+ // sizing purposes.
+ uptr getStats(char *Buffer, uptr Size) {
+ ScopedString Str;
+ const uptr Length = getStats(&Str) + 1;
+ if (Length < Size)
+ Size = Length;
+ if (Buffer && Size) {
+ memcpy(Buffer, Str.data(), Size);
+ Buffer[Size - 1] = '\0';
+ }
+ return Length;
+ }
+
+ void printStats() {
+ ScopedString Str;
+ getStats(&Str);
+ Str.output();
+ }
+
+ void printFragmentationInfo() {
+ ScopedString Str;
+ Primary.getFragmentationInfo(&Str);
+ // Secondary allocator dumps the fragmentation data in getStats().
+ Str.output();
+ }
+
+ void releaseToOS(ReleaseToOS ReleaseType) {
+ initThreadMaybe();
+ if (ReleaseType == ReleaseToOS::ForceAll)
+ drainCaches();
+ Primary.releaseToOS(ReleaseType);
+ Secondary.releaseToOS();
+ }
+
+ // Iterate over all chunks and call a callback for all busy chunks located
+ // within the provided memory range. Said callback must not use this allocator
+ // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
+ void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
+ void *Arg) {
+ initThreadMaybe();
+ if (archSupportsMemoryTagging())
+ Base = untagPointer(Base);
+ const uptr From = Base;
+ const uptr To = Base + Size;
+ bool MayHaveTaggedPrimary =
+ allocatorSupportsMemoryTagging<AllocatorConfig>() &&
+ systemSupportsMemoryTagging();
+ auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
+ Arg](uptr Block) {
+ if (Block < From || Block >= To)
+ return;
+ uptr Chunk;
+ Chunk::UnpackedHeader Header;
+ if (MayHaveTaggedPrimary) {
+ // A chunk header can either have a zero tag (tagged primary) or the
+ // header tag (secondary, or untagged primary). We don't know which so
+ // try both.
+ ScopedDisableMemoryTagChecks x;
+ if (!getChunkFromBlock(Block, &Chunk, &Header) &&
+ !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ return;
+ } else {
+ if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ return;
+ }
+ if (Header.State == Chunk::State::Allocated) {
+ uptr TaggedChunk = Chunk;
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ TaggedChunk = untagPointer(TaggedChunk);
+ if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
+ TaggedChunk = loadTag(Chunk);
+ Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
+ Arg);
+ }
+ };
+ Primary.iterateOverBlocks(Lambda);
+ Secondary.iterateOverBlocks(Lambda);
+#ifdef GWP_ASAN_HOOKS
+ GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg);
+#endif
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return Primary.Options.load().get(OptionBit::MayReturnNull);
+ }
+
+ bool setOption(Option O, sptr Value) {
+ initThreadMaybe();
+ if (O == Option::MemtagTuning) {
+ // Enabling odd/even tags involves a tradeoff between use-after-free
+ // detection and buffer overflow detection. Odd/even tags make it more
+ // likely for buffer overflows to be detected by increasing the size of
+ // the guaranteed "red zone" around the allocation, but on the other hand
+ // use-after-free is less likely to be detected because the tag space for
+ // any particular chunk is cut in half. Therefore we use this tuning
+ // setting to control whether odd/even tags are enabled.
+ if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
+ Primary.Options.set(OptionBit::UseOddEvenTags);
+ else if (Value == M_MEMTAG_TUNING_UAF)
+ Primary.Options.clear(OptionBit::UseOddEvenTags);
+ return true;
+ } else {
+ // We leave it to the various sub-components to decide whether or not they
+ // want to handle the option, but we do not want to short-circuit
+ // execution if one of the setOption was to return false.
+ const bool PrimaryResult = Primary.setOption(O, Value);
+ const bool SecondaryResult = Secondary.setOption(O, Value);
+ const bool RegistryResult = TSDRegistry.setOption(O, Value);
+ return PrimaryResult && SecondaryResult && RegistryResult;
+ }
+ return false;
+ }
+
+ // Return the usable size for a given chunk. Technically we lie, as we just
+ // report the actual size of a chunk. This is done to counteract code actively
+ // writing past the end of a chunk (like sqlite3) when the usable size allows
+ // for it, which then forces realloc to copy the usable size of a chunk as
+ // opposed to its actual size.
+ uptr getUsableSize(const void *Ptr) {
+ if (UNLIKELY(!Ptr))
+ return 0;
+
+ return getAllocSize(Ptr);
+ }
+
+ uptr getAllocSize(const void *Ptr) {
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+
+ return getSize(Ptr, &Header);
+ }
+
+ void getStats(StatCounters S) {
+ initThreadMaybe();
+ Stats.get(S);
+ }
+
+ // Returns true if the pointer provided was allocated by the current
+ // allocator instance, which is compliant with tcmalloc's ownership concept.
+ // A corrupted chunk will not be reported as owned, which is WAI.
+ bool isOwned(const void *Ptr) {
+ initThreadMaybe();
+#ifdef GWP_ASAN_HOOKS
+ if (GuardedAlloc.pointerIsMine(Ptr))
+ return true;
+#endif // GWP_ASAN_HOOKS
+ if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
+ return false;
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
+ Chunk::UnpackedHeader Header;
+ return Chunk::isValid(Cookie, Ptr, &Header) &&
+ Header.State == Chunk::State::Allocated;
+ }
+
+ bool useMemoryTaggingTestOnly() const {
+ return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
+ }
+ void disableMemoryTagging() {
+ // If we haven't been initialized yet, we need to initialize now in order to
+ // prevent a future call to initThreadMaybe() from enabling memory tagging
+ // based on feature detection. But don't call initThreadMaybe() because it
+ // may end up calling the allocator (via pthread_atfork, via the post-init
+ // callback), which may cause mappings to be created with memory tagging
+ // enabled.
+ TSDRegistry.initOnceMaybe(this);
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
+ Secondary.disableMemoryTagging();
+ Primary.Options.clear(OptionBit::UseMemoryTagging);
+ }
+ }
+
+ void setTrackAllocationStacks(bool Track) {
+ initThreadMaybe();
+ if (getFlags()->allocation_ring_buffer_size <= 0) {
+ DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
+ return;
+ }
+
+ if (Track) {
+ initRingBufferMaybe();
+ Primary.Options.set(OptionBit::TrackAllocationStacks);
+ } else
+ Primary.Options.clear(OptionBit::TrackAllocationStacks);
+ }
+
+ void setFillContents(FillContentsMode FillContents) {
+ initThreadMaybe();
+ Primary.Options.setFillContentsMode(FillContents);
+ }
+
+ void setAddLargeAllocationSlack(bool AddSlack) {
+ initThreadMaybe();
+ if (AddSlack)
+ Primary.Options.set(OptionBit::AddLargeAllocationSlack);
+ else
+ Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
+ }
+
+ const char *getStackDepotAddress() {
+ initThreadMaybe();
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
+ }
+
+ uptr getStackDepotSize() {
+ initThreadMaybe();
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB ? RB->StackDepotSize : 0;
+ }
+
+ const char *getRegionInfoArrayAddress() const {
+ return Primary.getRegionInfoArrayAddress();
+ }
+
+ static uptr getRegionInfoArraySize() {
+ return PrimaryT::getRegionInfoArraySize();
+ }
+
+ const char *getRingBufferAddress() {
+ initThreadMaybe();
+ return reinterpret_cast<char *>(getRingBuffer());
+ }
+
+ uptr getRingBufferSize() {
+ initThreadMaybe();
+ AllocationRingBuffer *RB = getRingBuffer();
+ return RB && RB->RingBufferElements
+ ? ringBufferSizeInBytes(RB->RingBufferElements)
+ : 0;
+ }
+
+ static const uptr MaxTraceSize = 64;
+
+ static void collectTraceMaybe(const StackDepot *Depot,
+ uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
+ uptr RingPos, Size;
+ if (!Depot->find(Hash, &RingPos, &Size))
+ return;
+ for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
+ Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos + I));
+ }
+
+ static void getErrorInfo(struct scudo_error_info *ErrorInfo,
+ uintptr_t FaultAddr, const char *DepotPtr,
+ size_t DepotSize, const char *RegionInfoPtr,
+ const char *RingBufferPtr, size_t RingBufferSize,
+ const char *Memory, const char *MemoryTags,
+ uintptr_t MemoryAddr, size_t MemorySize) {
+ // N.B. we need to support corrupted data in any of the buffers here. We get
+ // this information from an external process (the crashing process) that
+ // should not be able to crash the crash dumper (crash_dump on Android).
+ // See also the get_error_info_fuzzer.
+ *ErrorInfo = {};
+ if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
+ MemoryAddr + MemorySize < MemoryAddr)
+ return;
+
+ const StackDepot *Depot = nullptr;
+ if (DepotPtr) {
+ // check for corrupted StackDepot. First we need to check whether we can
+ // read the metadata, then whether the metadata matches the size.
+ if (DepotSize < sizeof(*Depot))
+ return;
+ Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
+ if (!Depot->isValid(DepotSize))
+ return;
+ }
+
+ size_t NextErrorReport = 0;
+
+ // Check for OOB in the current block and the two surrounding blocks. Beyond
+ // that, UAF is more likely.
+ if (extractTag(FaultAddr) != 0)
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
+ MemorySize, 0, 2);
+
+ // Check the ring buffer. For primary allocations this will only find UAF;
+ // for secondary allocations we can find either UAF or OOB.
+ getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RingBufferPtr, RingBufferSize);
+
+ // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
+ // Beyond that we are likely to hit false positives.
+ if (extractTag(FaultAddr) != 0)
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
+ MemorySize, 2, 16);
+ }
+
+private:
+ typedef typename PrimaryT::SizeClassMap SizeClassMap;
+
+ static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
+ static const uptr MinAlignment = 1UL << MinAlignmentLog;
+ static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
+
+ static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
+ "Minimal alignment must at least cover a chunk header.");
+ static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
+ MinAlignment >= archMemoryTagGranuleSize(),
+ "");
+
+ static const u32 BlockMarker = 0x44554353U;
+
+ // These are indexes into an "array" of 32-bit values that store information
+ // inline with a chunk that is relevant to diagnosing memory tag faults, where
+ // 0 corresponds to the address of the user memory. This means that only
+ // negative indexes may be used. The smallest index that may be used is -2,
+ // which corresponds to 8 bytes before the user memory, because the chunk
+ // header size is 8 bytes and in allocators that support memory tagging the
+ // minimum alignment is at least the tag granule size (16 on aarch64).
+ static const sptr MemTagAllocationTraceIndex = -2;
+ static const sptr MemTagAllocationTidIndex = -1;
+
+ u32 Cookie = 0;
+ u32 QuarantineMaxChunkSize = 0;
+
+ GlobalStats Stats;
+ PrimaryT Primary;
+ SecondaryT Secondary;
+ QuarantineT Quarantine;
+ TSDRegistryT TSDRegistry;
+ pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
+
+#ifdef GWP_ASAN_HOOKS
+ gwp_asan::GuardedPoolAllocator GuardedAlloc;
+ uptr GuardedAllocSlotSize = 0;
+#endif // GWP_ASAN_HOOKS
+
+ struct AllocationRingBuffer {
+ struct Entry {
+ atomic_uptr Ptr;
+ atomic_uptr AllocationSize;
+ atomic_u32 AllocationTrace;
+ atomic_u32 AllocationTid;
+ atomic_u32 DeallocationTrace;
+ atomic_u32 DeallocationTid;
+ };
+ StackDepot *Depot = nullptr;
+ uptr StackDepotSize = 0;
+ MemMapT RawRingBufferMap;
+ MemMapT RawStackDepotMap;
+ u32 RingBufferElements = 0;
+ atomic_uptr Pos;
+ // An array of Size (at least one) elements of type Entry is immediately
+ // following to this struct.
+ };
+ static_assert(sizeof(AllocationRingBuffer) %
+ alignof(typename AllocationRingBuffer::Entry) ==
+ 0,
+ "invalid alignment");
+
+ // Lock to initialize the RingBuffer
+ HybridMutex RingBufferInitLock;
+
+ // Pointer to memory mapped area starting with AllocationRingBuffer struct,
+ // and immediately followed by Size elements of type Entry.
+ atomic_uptr RingBufferAddress = {};
+
+ AllocationRingBuffer *getRingBuffer() {
+ return reinterpret_cast<AllocationRingBuffer *>(
+ atomic_load(&RingBufferAddress, memory_order_acquire));
+ }
+
+ // The following might get optimized out by the compiler.
+ NOINLINE void performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be small. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ Chunk::UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
+ SizeClassMap::MaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset & Chunk::OffsetMask;
+ if (UNLIKELY(Header.Offset != MaxOffset))
+ reportSanityCheckError("offset");
+
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+ if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
+ reportSanityCheckError("size (or unused bytes)");
+
+ const uptr LargestClassId = SizeClassMap::LargestClassId;
+ Header.ClassId = LargestClassId;
+ if (UNLIKELY(Header.ClassId != LargestClassId))
+ reportSanityCheckError("class ID");
+ }
+
+ static inline void *getBlockBegin(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
+ (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
+ }
+
+ // Return the size of a chunk as requested during its allocation.
+ inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (LIKELY(Header->ClassId))
+ return SizeOrUnusedBytes;
+ if (allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(const_cast<void *>(Ptr));
+ return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
+ reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
+ }
+
+ ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
+ void *Block, const uptr UserPtr,
+ const uptr SizeOrUnusedBytes,
+ const FillContentsMode FillContents) {
+ // Compute the default pointer before adding the header tag
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+
+ Block = addHeaderTag(Block);
+ // Only do content fill when it's from primary allocator because secondary
+ // allocator has filled the content.
+ if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
+ // This condition is not necessarily unlikely, but since memset is
+ // costly, we might as well mark it as such.
+ memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
+ PrimaryT::getSizeByClassId(ClassId));
+ }
+
+ Chunk::UnpackedHeader Header = {};
+
+ if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
+ const uptr Offset = UserPtr - DefaultAlignedPtr;
+ DCHECK_GE(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ }
+
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.State = Chunk::State::Allocated;
+ Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
+ Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ Chunk::storeHeader(Cookie, reinterpret_cast<void *>(addHeaderTag(UserPtr)),
+ &Header);
+
+ return reinterpret_cast<void *>(UserPtr);
+ }
+
+ NOINLINE void *
+ initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
+ void *Block, const uptr UserPtr, const uptr Size,
+ const uptr SizeOrUnusedBytes,
+ const FillContentsMode FillContents) {
+ const Options Options = Primary.Options.load();
+ DCHECK(useMemoryTagging<AllocatorConfig>(Options));
+
+ // Compute the default pointer before adding the header tag
+ const uptr DefaultAlignedPtr =
+ reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ void *TaggedPtr = Ptr;
+
+ if (LIKELY(ClassId)) {
+ // Init the primary chunk.
+ //
+ // We only need to zero or tag the contents for Primary backed
+ // allocations. We only set tags for primary allocations in order to avoid
+ // faulting potentially large numbers of pages for large secondary
+ // allocations. We assume that guard pages are enough to protect these
+ // allocations.
+ //
+ // FIXME: When the kernel provides a way to set the background tag of a
+ // mapping, we should be able to tag secondary allocations as well.
+ //
+ // When memory tagging is enabled, zeroing the contents is done as part of
+ // setting the tag.
+
+ Chunk::UnpackedHeader Header;
+ const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
+ const uptr BlockUptr = reinterpret_cast<uptr>(Block);
+ const uptr BlockEnd = BlockUptr + BlockSize;
+ // If possible, try to reuse the UAF tag that was set by deallocate().
+ // For simplicity, only reuse tags if we have the same start address as
+ // the previous allocation. This handles the majority of cases since
+ // most allocations will not be more aligned than the minimum alignment.
+ //
+ // We need to handle situations involving reclaimed chunks, and retag
+ // the reclaimed portions if necessary. In the case where the chunk is
+ // fully reclaimed, the chunk's header will be zero, which will trigger
+ // the code path for new mappings and invalid chunks that prepares the
+ // chunk from scratch. There are three possibilities for partial
+ // reclaiming:
+ //
+ // (1) Header was reclaimed, data was partially reclaimed.
+ // (2) Header was not reclaimed, all data was reclaimed (e.g. because
+ // data started on a page boundary).
+ // (3) Header was not reclaimed, data was partially reclaimed.
+ //
+ // Case (1) will be handled in the same way as for full reclaiming,
+ // since the header will be zero.
+ //
+ // We can detect case (2) by loading the tag from the start
+ // of the chunk. If it is zero, it means that either all data was
+ // reclaimed (since we never use zero as the chunk tag), or that the
+ // previous allocation was of size zero. Either way, we need to prepare
+ // a new chunk from scratch.
+ //
+ // We can detect case (3) by moving to the next page (if covered by the
+ // chunk) and loading the tag of its first granule. If it is zero, it
+ // means that all following pages may need to be retagged. On the other
+ // hand, if it is nonzero, we can assume that all following pages are
+ // still tagged, according to the logic that if any of the pages
+ // following the next page were reclaimed, the next page would have been
+ // reclaimed as well.
+ uptr TaggedUserPtr;
+ uptr PrevUserPtr;
+ if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) &&
+ PrevUserPtr == UserPtr &&
+ (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
+ uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
+ const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
+ if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
+ PrevEnd = NextPage;
+ TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
+ resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
+ if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
+ // If an allocation needs to be zeroed (i.e. calloc) we can normally
+ // avoid zeroing the memory now since we can rely on memory having
+ // been zeroed on free, as this is normally done while setting the
+ // UAF tag. But if tagging was disabled per-thread when the memory
+ // was freed, it would not have been retagged and thus zeroed, and
+ // therefore it needs to be zeroed now.
+ memset(TaggedPtr, 0,
+ Min(Size, roundUp(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
+ } else if (Size) {
+ // Clear any stack metadata that may have previously been stored in
+ // the chunk data.
+ memset(TaggedPtr, 0, archMemoryTagGranuleSize());
+ }
+ } else {
+ const uptr OddEvenMask =
+ computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
+ TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
+ }
+ storePrimaryAllocationStackMaybe(Options, Ptr);
+ } else {
+ // Init the secondary chunk.
+
+ Block = addHeaderTag(Block);
+ Ptr = addHeaderTag(Ptr);
+ storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
+ storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
+ }
+
+ Chunk::UnpackedHeader Header = {};
+
+ if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
+ const uptr Offset = UserPtr - DefaultAlignedPtr;
+ DCHECK_GE(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ }
+
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.State = Chunk::State::Allocated;
+ Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
+ Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ Chunk::storeHeader(Cookie, Ptr, &Header);
+
+ return TaggedPtr;
+ }
+
+ void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
+ Chunk::UnpackedHeader *Header,
+ uptr Size) NO_THREAD_SAFETY_ANALYSIS {
+ void *Ptr = getHeaderTaggedPointer(TaggedPtr);
+ // If the quarantine is disabled, the actual size of a chunk is 0 or larger
+ // than the maximum allowed, we return a chunk directly to the backend.
+ // This purposefully underflows for Size == 0.
+ const bool BypassQuarantine = !Quarantine.getCacheSize() ||
+ ((Size - 1) >= QuarantineMaxChunkSize) ||
+ !Header->ClassId;
+ if (BypassQuarantine)
+ Header->State = Chunk::State::Available;
+ else
+ Header->State = Chunk::State::Quarantined;
+
+ void *BlockBegin;
+ if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
+ Header->OriginOrWasZeroed = 0U;
+ if (BypassQuarantine && allocatorSupportsMemoryTagging<AllocatorConfig>())
+ Ptr = untagPointer(Ptr);
+ BlockBegin = getBlockBegin(Ptr, Header);
+ } else {
+ Header->OriginOrWasZeroed =
+ Header->ClassId && !TSDRegistry.getDisableMemInit();
+ BlockBegin =
+ retagBlock(Options, TaggedPtr, Ptr, Header, Size, BypassQuarantine);
+ }
+
+ Chunk::storeHeader(Cookie, Ptr, Header);
+
+ if (BypassQuarantine) {
+ const uptr ClassId = Header->ClassId;
+ if (LIKELY(ClassId)) {
+ bool CacheDrained;
+ {
+ typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
+ CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
+ }
+ // When we have drained some blocks back to the Primary from TSD, that
+ // implies that we may have the chance to release some pages as well.
+ // Note that in order not to block other thread's accessing the TSD,
+ // release the TSD first then try the page release.
+ if (CacheDrained)
+ Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
+ } else {
+ Secondary.deallocate(Options, BlockBegin);
+ }
+ } else {
+ typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
+ Quarantine.put(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
+ }
+ }
+
+ NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
+ Chunk::UnpackedHeader *Header, const uptr Size,
+ bool BypassQuarantine) {
+ DCHECK(useMemoryTagging<AllocatorConfig>(Options));
+
+ const u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
+ storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
+ if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
+ uptr TaggedBegin, TaggedEnd;
+ const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
+ Header->ClassId);
+ // Exclude the previous tag so that immediate use after free is
+ // detected 100% of the time.
+ setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
+ &TaggedEnd);
+ }
+
+ Ptr = untagPointer(Ptr);
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ if (BypassQuarantine && !Header->ClassId) {
+ storeTags(reinterpret_cast<uptr>(BlockBegin),
+ reinterpret_cast<uptr>(Ptr));
+ }
+
+ return BlockBegin;
+ }
+
+ bool getChunkFromBlock(uptr Block, uptr *Chunk,
+ Chunk::UnpackedHeader *Header) {
+ *Chunk =
+ Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block));
+ return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header);
+ }
+
+ static uptr getChunkOffsetFromBlock(const char *Block) {
+ u32 Offset = 0;
+ if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
+ Offset = reinterpret_cast<const u32 *>(Block)[1];
+ return Offset + Chunk::getHeaderSize();
+ }
+
+ // Set the tag of the granule past the end of the allocation to 0, to catch
+ // linear overflows even if a previous larger allocation used the same block
+ // and tag. Only do this if the granule past the end is in our block, because
+ // this would otherwise lead to a SEGV if the allocation covers the entire
+ // block and our block is at the end of a mapping. The tag of the next block's
+ // header granule will be set to 0, so it will serve the purpose of catching
+ // linear overflows in this case.
+ //
+ // For allocations of size 0 we do not end up storing the address tag to the
+ // memory tag space, which getInlineErrorInfo() normally relies on to match
+ // address tags against chunks. To allow matching in this case we store the
+ // address tag in the first byte of the chunk.
+ void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
+ DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
+ uptr UntaggedEnd = untagPointer(End);
+ if (UntaggedEnd != BlockEnd) {
+ storeTag(UntaggedEnd);
+ if (Size == 0)
+ *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
+ }
+ }
+
+ void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr BlockEnd) {
+ // Prepare the granule before the chunk to store the chunk header by setting
+ // its tag to 0. Normally its tag will already be 0, but in the case where a
+ // chunk holding a low alignment allocation is reused for a higher alignment
+ // allocation, the chunk may already have a non-zero tag from the previous
+ // allocation.
+ storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
+
+ uptr TaggedBegin, TaggedEnd;
+ setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
+
+ storeEndMarker(TaggedEnd, Size, BlockEnd);
+ return reinterpret_cast<void *>(TaggedBegin);
+ }
+
+ void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
+ uptr BlockEnd) {
+ uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
+ uptr RoundNewPtr;
+ if (RoundOldPtr >= NewPtr) {
+ // If the allocation is shrinking we just need to set the tag past the end
+ // of the allocation to 0. See explanation in storeEndMarker() above.
+ RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
+ } else {
+ // Set the memory tag of the region
+ // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
+ // to the pointer tag stored in OldPtr.
+ RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
+ }
+ storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
+ }
+
+ void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
+ return;
+ auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
+ Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(RB->Depot);
+ Ptr32[MemTagAllocationTidIndex] = getThreadID();
+ }
+
+ void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
+ u32 AllocationTrace, u32 AllocationTid,
+ uptr AllocationSize, u32 DeallocationTrace,
+ u32 DeallocationTid) {
+ uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
+ typename AllocationRingBuffer::Entry *Entry =
+ getRingBufferEntry(RB, Pos % RB->RingBufferElements);
+
+ // First invalidate our entry so that we don't attempt to interpret a
+ // partially written state in getSecondaryErrorInfo(). The fences below
+ // ensure that the compiler does not move the stores to Ptr in between the
+ // stores to the other fields.
+ atomic_store_relaxed(&Entry->Ptr, 0);
+
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+ atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
+ atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
+ atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
+ atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
+ atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+
+ atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
+ }
+
+ void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
+ uptr Size) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
+ return;
+ u32 Trace = collectStackTrace(RB->Depot);
+ u32 Tid = getThreadID();
+
+ auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
+ Ptr32[MemTagAllocationTraceIndex] = Trace;
+ Ptr32[MemTagAllocationTidIndex] = Tid;
+
+ storeRingBufferEntry(RB, untagPointer(Ptr), Trace, Tid, Size, 0, 0);
+ }
+
+ void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
+ u8 PrevTag, uptr Size) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (!RB)
+ return;
+ auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
+ u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
+ u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
+
+ u32 DeallocationTrace = collectStackTrace(RB->Depot);
+ u32 DeallocationTid = getThreadID();
+
+ storeRingBufferEntry(RB, addFixedTag(untagPointer(Ptr), PrevTag),
+ AllocationTrace, AllocationTid, Size,
+ DeallocationTrace, DeallocationTid);
+ }
+
+ static const size_t NumErrorReports =
+ sizeof(((scudo_error_info *)nullptr)->reports) /
+ sizeof(((scudo_error_info *)nullptr)->reports[0]);
+
+ static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
+ size_t &NextErrorReport, uintptr_t FaultAddr,
+ const StackDepot *Depot,
+ const char *RegionInfoPtr, const char *Memory,
+ const char *MemoryTags, uintptr_t MemoryAddr,
+ size_t MemorySize, size_t MinDistance,
+ size_t MaxDistance) {
+ uptr UntaggedFaultAddr = untagPointer(FaultAddr);
+ u8 FaultAddrTag = extractTag(FaultAddr);
+ BlockInfo Info =
+ PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
+
+ auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
+ if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
+ Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
+ return false;
+ *Data = &Memory[Addr - MemoryAddr];
+ *Tag = static_cast<u8>(
+ MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
+ return true;
+ };
+
+ auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
+ Chunk::UnpackedHeader *Header, const u32 **Data,
+ u8 *Tag) {
+ const char *BlockBegin;
+ u8 BlockBeginTag;
+ if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
+ return false;
+ uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
+ *ChunkAddr = Addr + ChunkOffset;
+
+ const char *ChunkBegin;
+ if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
+ return false;
+ *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
+ ChunkBegin - Chunk::getHeaderSize());
+ *Data = reinterpret_cast<const u32 *>(ChunkBegin);
+
+ // Allocations of size 0 will have stashed the tag in the first byte of
+ // the chunk, see storeEndMarker().
+ if (Header->SizeOrUnusedBytes == 0)
+ *Tag = static_cast<u8>(*ChunkBegin);
+
+ return true;
+ };
+
+ if (NextErrorReport == NumErrorReports)
+ return;
+
+ auto CheckOOB = [&](uptr BlockAddr) {
+ if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
+ return false;
+
+ uptr ChunkAddr;
+ Chunk::UnpackedHeader Header;
+ const u32 *Data;
+ uint8_t Tag;
+ if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
+ Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
+ return false;
+
+ auto *R = &ErrorInfo->reports[NextErrorReport++];
+ R->error_type =
+ UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
+ R->allocation_address = ChunkAddr;
+ R->allocation_size = Header.SizeOrUnusedBytes;
+ if (Depot) {
+ collectTraceMaybe(Depot, R->allocation_trace,
+ Data[MemTagAllocationTraceIndex]);
+ }
+ R->allocation_tid = Data[MemTagAllocationTidIndex];
+ return NextErrorReport == NumErrorReports;
+ };
+
+ if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
+ return;
+
+ for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
+ if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
+ CheckOOB(Info.BlockBegin - I * Info.BlockSize))
+ return;
+ }
+
+ static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
+ size_t &NextErrorReport,
+ uintptr_t FaultAddr,
+ const StackDepot *Depot,
+ const char *RingBufferPtr,
+ size_t RingBufferSize) {
+ auto *RingBuffer =
+ reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
+ size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
+ if (!RingBuffer || RingBufferElements == 0 || !Depot)
+ return;
+ uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
+
+ for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
+ NextErrorReport != NumErrorReports;
+ --I) {
+ auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
+ uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
+ if (!EntryPtr)
+ continue;
+
+ uptr UntaggedEntryPtr = untagPointer(EntryPtr);
+ uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
+ u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
+ u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
+ u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
+ u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
+
+ if (DeallocationTid) {
+ // For UAF we only consider in-bounds fault addresses because
+ // out-of-bounds UAF is rare and attempting to detect it is very likely
+ // to result in false positives.
+ if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
+ continue;
+ } else {
+ // Ring buffer OOB is only possible with secondary allocations. In this
+ // case we are guaranteed a guard region of at least a page on either
+ // side of the allocation (guard page on the right, guard page + tagged
+ // region on the left), so ignore any faults outside of that range.
+ if (FaultAddr < EntryPtr - getPageSizeCached() ||
+ FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
+ continue;
+
+ // For UAF the ring buffer will contain two entries, one for the
+ // allocation and another for the deallocation. Don't report buffer
+ // overflow/underflow using the allocation entry if we have already
+ // collected a report from the deallocation entry.
+ bool Found = false;
+ for (uptr J = 0; J != NextErrorReport; ++J) {
+ if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
+ Found = true;
+ break;
+ }
+ }
+ if (Found)
+ continue;
+ }
+
+ auto *R = &ErrorInfo->reports[NextErrorReport++];
+ if (DeallocationTid)
+ R->error_type = USE_AFTER_FREE;
+ else if (FaultAddr < EntryPtr)
+ R->error_type = BUFFER_UNDERFLOW;
+ else
+ R->error_type = BUFFER_OVERFLOW;
+
+ R->allocation_address = UntaggedEntryPtr;
+ R->allocation_size = EntrySize;
+ collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
+ R->allocation_tid = AllocationTid;
+ collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
+ R->deallocation_tid = DeallocationTid;
+ }
+ }
+
+ uptr getStats(ScopedString *Str) {
+ Primary.getStats(Str);
+ Secondary.getStats(Str);
+ Quarantine.getStats(Str);
+ TSDRegistry.getStats(Str);
+ return Str->length();
+ }
+
+ static typename AllocationRingBuffer::Entry *
+ getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
+ char *RBEntryStart =
+ &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
+ return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
+ RBEntryStart)[N];
+ }
+ static const typename AllocationRingBuffer::Entry *
+ getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
+ const char *RBEntryStart =
+ &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
+ return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
+ RBEntryStart)[N];
+ }
+
+ void initRingBufferMaybe() {
+ ScopedLock L(RingBufferInitLock);
+ if (getRingBuffer() != nullptr)
+ return;
+
+ int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
+ if (ring_buffer_size <= 0)
+ return;
+
+ u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
+
+ // We store alloc and free stacks for each entry.
+ constexpr u32 kStacksPerRingBufferEntry = 2;
+ constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
+ static_assert(isPowerOfTwo(kMaxU32Pow2));
+ // On Android we always have 3 frames at the bottom: __start_main,
+ // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
+ // Allocator::allocate. This leaves 10 frames for the user app. The next
+ // smallest power of two (8) would only leave 2, which is clearly too
+ // little.
+ constexpr u32 kFramesPerStack = 16;
+ static_assert(isPowerOfTwo(kFramesPerStack));
+
+ if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
+ return;
+ u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
+ AllocationRingBufferSize));
+ if (TabSize > UINT32_MAX / kFramesPerStack)
+ return;
+ u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
+
+ uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
+ sizeof(atomic_u32) * TabSize;
+ MemMapT DepotMap;
+ DepotMap.map(
+ /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
+ "scudo:stack_depot");
+ auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
+ Depot->init(RingSize, TabSize);
+
+ MemMapT MemMap;
+ MemMap.map(
+ /*Addr=*/0U,
+ roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
+ getPageSizeCached()),
+ "scudo:ring_buffer");
+ auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
+ RB->RawRingBufferMap = MemMap;
+ RB->RingBufferElements = AllocationRingBufferSize;
+ RB->Depot = Depot;
+ RB->StackDepotSize = StackDepotSize;
+ RB->RawStackDepotMap = DepotMap;
+
+ atomic_store(&RingBufferAddress, reinterpret_cast<uptr>(RB),
+ memory_order_release);
+ }
+
+ void unmapRingBuffer() {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB == nullptr)
+ return;
+ // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
+ // is very important.
+ RB->RawStackDepotMap.unmap(RB->RawStackDepotMap.getBase(),
+ RB->RawStackDepotMap.getCapacity());
+ // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
+ // itself. Take over the ownership before calling unmap() so that any
+ // operation along with unmap() won't touch inaccessible pages.
+ MemMapT RawRingBufferMap = RB->RawRingBufferMap;
+ RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
+ RawRingBufferMap.getCapacity());
+ atomic_store(&RingBufferAddress, 0, memory_order_release);
+ }
+
+ static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
+ return sizeof(AllocationRingBuffer) +
+ RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
+ }
+
+ static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
+ if (Bytes < sizeof(AllocationRingBuffer)) {
+ return 0;
+ }
+ return (Bytes - sizeof(AllocationRingBuffer)) /
+ sizeof(typename AllocationRingBuffer::Entry);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMBINED_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp
new file mode 100644
index 000000000000..06e930638f6f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp
@@ -0,0 +1,24 @@
+//===-- common.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+uptr PageSizeCached;
+uptr getPageSize();
+
+uptr getPageSizeSlow() {
+ PageSizeCached = getPageSize();
+ CHECK_NE(PageSizeCached, 0);
+ return PageSizeCached;
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
new file mode 100644
index 000000000000..151fbd317e74
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
@@ -0,0 +1,236 @@
+//===-- common.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMMON_H_
+#define SCUDO_COMMON_H_
+
+#include "internal_defs.h"
+
+#include "fuchsia.h"
+#include "linux.h"
+#include "trusty.h"
+
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+
+namespace scudo {
+
+template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
+ static_assert(sizeof(Dest) == sizeof(Source), "");
+ Dest D;
+ memcpy(&D, &S, sizeof(D));
+ return D;
+}
+
+inline constexpr bool isPowerOfTwo(uptr X) {
+ if (X == 0)
+ return false;
+ return (X & (X - 1)) == 0;
+}
+
+inline constexpr uptr roundUp(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
+ return (X + Boundary - 1) & ~(Boundary - 1);
+}
+inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
+ return ((X + Boundary - 1) / Boundary) * Boundary;
+}
+
+inline constexpr uptr roundDown(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
+ return X & ~(Boundary - 1);
+}
+inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
+ return (X / Boundary) * Boundary;
+}
+
+inline constexpr bool isAligned(uptr X, uptr Alignment) {
+ DCHECK(isPowerOfTwo(Alignment));
+ return (X & (Alignment - 1)) == 0;
+}
+inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
+ return X % Alignment == 0;
+}
+
+template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
+
+template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
+
+template <class T> void Swap(T &A, T &B) {
+ T Tmp = A;
+ A = B;
+ B = Tmp;
+}
+
+inline uptr getMostSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
+}
+
+inline uptr roundUpPowerOfTwo(uptr Size) {
+ DCHECK(Size);
+ if (isPowerOfTwo(Size))
+ return Size;
+ const uptr Up = getMostSignificantSetBitIndex(Size);
+ DCHECK_LT(Size, (1UL << (Up + 1)));
+ DCHECK_GT(Size, (1UL << Up));
+ return 1UL << (Up + 1);
+}
+
+inline uptr getLeastSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return static_cast<uptr>(__builtin_ctzl(X));
+}
+
+inline uptr getLog2(uptr X) {
+ DCHECK(isPowerOfTwo(X));
+ return getLeastSignificantSetBitIndex(X);
+}
+
+inline u32 getRandomU32(u32 *State) {
+ // ANSI C linear congruential PRNG (16-bit output).
+ // return (*State = *State * 1103515245 + 12345) >> 16;
+ // XorShift (32-bit output).
+ *State ^= *State << 13;
+ *State ^= *State >> 17;
+ *State ^= *State << 5;
+ return *State;
+}
+
+inline u32 getRandomModN(u32 *State, u32 N) {
+ return getRandomU32(State) % N; // [0, N)
+}
+
+template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
+ if (N <= 1)
+ return;
+ u32 State = *RandState;
+ for (u32 I = N - 1; I > 0; I--)
+ Swap(A[I], A[getRandomModN(&State, I + 1)]);
+ *RandState = State;
+}
+
+inline void computePercentage(uptr Numerator, uptr Denominator, uptr *Integral,
+ uptr *Fractional) {
+ constexpr uptr Digits = 100;
+ if (Denominator == 0) {
+ *Integral = 100;
+ *Fractional = 0;
+ return;
+ }
+
+ *Integral = Numerator * Digits / Denominator;
+ *Fractional =
+ (((Numerator * Digits) % Denominator) * Digits + Denominator / 2) /
+ Denominator;
+}
+
+// Platform specific functions.
+
+extern uptr PageSizeCached;
+uptr getPageSizeSlow();
+inline uptr getPageSizeCached() {
+#if SCUDO_ANDROID && defined(PAGE_SIZE)
+ // Most Android builds have a build-time constant page size.
+ return PAGE_SIZE;
+#endif
+ if (LIKELY(PageSizeCached))
+ return PageSizeCached;
+ return getPageSizeSlow();
+}
+
+// Returns 0 if the number of CPUs could not be determined.
+u32 getNumberOfCPUs();
+
+const char *getEnv(const char *Name);
+
+u64 getMonotonicTime();
+// Gets the time faster but with less accuracy. Can call getMonotonicTime
+// if no fast version is available.
+u64 getMonotonicTimeFast();
+
+u32 getThreadID();
+
+// Our randomness gathering function is limited to 256 bytes to ensure we get
+// as many bytes as requested, and avoid interruptions (on Linux).
+constexpr uptr MaxRandomLength = 256U;
+bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
+
+// Platform memory mapping functions.
+
+#define MAP_ALLOWNOMEM (1U << 0)
+#define MAP_NOACCESS (1U << 1)
+#define MAP_RESIZABLE (1U << 2)
+#define MAP_MEMTAG (1U << 3)
+#define MAP_PRECOMMIT (1U << 4)
+
+// Our platform memory mapping use is restricted to 3 scenarios:
+// - reserve memory at a random address (MAP_NOACCESS);
+// - commit memory in a previously reserved space;
+// - commit memory at a random address.
+// As such, only a subset of parameters combinations is valid, which is checked
+// by the function implementation. The Data parameter allows to pass opaque
+// platform specific data to the function.
+// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+// Indicates that we are getting rid of the whole mapping, which might have
+// further consequences on Data, depending on the platform.
+#define UNMAP_ALL (1U << 0)
+
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
+ MapPlatformData *Data = nullptr);
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data = nullptr);
+
+// Logging related functions.
+
+void setAbortMessage(const char *Message);
+
+struct BlockInfo {
+ uptr BlockBegin;
+ uptr BlockSize;
+ uptr RegionBegin;
+ uptr RegionEnd;
+};
+
+enum class Option : u8 {
+ ReleaseInterval, // Release to OS interval in milliseconds.
+ MemtagTuning, // Whether to tune tagging for UAF or overflow.
+ ThreadDisableMemInit, // Whether to disable automatic heap initialization and,
+ // where possible, memory tagging, on this thread.
+ MaxCacheEntriesCount, // Maximum number of blocks that can be cached.
+ MaxCacheEntrySize, // Maximum size of a block that can be cached.
+ MaxTSDsCount, // Number of usable TSDs for the shared registry.
+};
+
+enum class ReleaseToOS : u8 {
+ Normal, // Follow the normal rules for releasing pages to the OS
+ Force, // Force release pages to the OS, but avoid cases that take too long.
+ ForceAll, // Force release every page possible regardless of how long it will
+ // take.
+};
+
+constexpr unsigned char PatternFillByte = 0xAB;
+
+enum FillContentsMode {
+ NoFill = 0,
+ ZeroFill = 1,
+ PatternOrZeroFill = 2 // Pattern fill unless the memory is known to be
+ // zero-initialized already.
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMMON_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h
new file mode 100644
index 000000000000..3f16c86651e7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h
@@ -0,0 +1,44 @@
+//===-- condition_variable.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_H_
+#define SCUDO_CONDITION_VARIABLE_H_
+
+#include "condition_variable_base.h"
+
+#include "common.h"
+#include "platform.h"
+
+#include "condition_variable_linux.h"
+
+namespace scudo {
+
+// A default implementation of default condition variable. It doesn't do a real
+// `wait`, instead it spins a short amount of time only.
+class ConditionVariableDummy
+ : public ConditionVariableBase<ConditionVariableDummy> {
+public:
+ void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
+
+ void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
+ M.unlock();
+
+ constexpr u32 SpinTimes = 64;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+
+ M.lock();
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h
new file mode 100644
index 000000000000..416c327fed49
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h
@@ -0,0 +1,56 @@
+//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
+#define SCUDO_CONDITION_VARIABLE_BASE_H_
+
+#include "mutex.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+template <typename Derived> class ConditionVariableBase {
+public:
+ constexpr ConditionVariableBase() = default;
+
+ void bindTestOnly(HybridMutex &Mutex) {
+#if SCUDO_DEBUG
+ boundMutex = &Mutex;
+#else
+ (void)Mutex;
+#endif
+ }
+
+ void notifyAll(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->notifyAllImpl(M);
+ }
+
+ void wait(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->waitImpl(M);
+ }
+
+protected:
+ Derived *getDerived() { return static_cast<Derived *>(this); }
+
+#if SCUDO_DEBUG
+ // Because thread-safety analysis doesn't support pointer aliasing, we are not
+ // able to mark the proper annotations without false positive. Instead, we
+ // pass the lock and do the same-lock check separately.
+ HybridMutex *boundMutex = nullptr;
+#endif
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
new file mode 100644
index 000000000000..e6d9bd1771a4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
@@ -0,0 +1,52 @@
+//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "condition_variable_linux.h"
+
+#include "atomic_helpers.h"
+
+#include <limits.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+namespace scudo {
+
+void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter);
+ atomic_store_relaxed(&Counter, V + 1);
+
+ // TODO(chiahungduan): Move the waiters from the futex waiting queue
+ // `Counter` to futex waiting queue `M` so that the awoken threads won't be
+ // blocked again due to locked `M` by current thread.
+ if (LastNotifyAll != V) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
+ INT_MAX, nullptr, nullptr, 0);
+ }
+
+ LastNotifyAll = V + 1;
+}
+
+void ConditionVariableLinux::waitImpl(HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter) + 1;
+ atomic_store_relaxed(&Counter, V);
+
+ // TODO: Use ScopedUnlock when it's supported.
+ M.unlock();
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
+ nullptr, nullptr, 0);
+ M.lock();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
new file mode 100644
index 000000000000..cd073287326d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
@@ -0,0 +1,38 @@
+//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
+#define SCUDO_CONDITION_VARIABLE_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "atomic_helpers.h"
+#include "condition_variable_base.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+class ConditionVariableLinux
+ : public ConditionVariableBase<ConditionVariableLinux> {
+public:
+ void notifyAllImpl(HybridMutex &M) REQUIRES(M);
+
+ void waitImpl(HybridMutex &M) REQUIRES(M);
+
+private:
+ u32 LastNotifyAll = 0;
+ atomic_u32 Counter = {};
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
new file mode 100644
index 000000000000..910cf9460313
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
@@ -0,0 +1,29 @@
+//===-- crc32_hw.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+namespace scudo {
+
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
+}
+#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
+ // defined(__ARM_FEATURE_CRC32)
+
+#if defined(__loongarch__)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ // The LoongArch CRC intrinsics have the two input arguments swapped, and
+ // expect them to be signed.
+ return static_cast<u32>(
+ CRC32_INTRINSIC(static_cast<long>(Data), static_cast<int>(Crc)));
+}
+#endif // defined(__loongarch__)
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
new file mode 100644
index 000000000000..f498edfbd326
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -0,0 +1,76 @@
+//===-- flags.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "common.h"
+#include "flags_parser.h"
+
+#include "scudo/interface.h"
+
+namespace scudo {
+
+Flags *getFlags() {
+ static Flags F;
+ return &F;
+}
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "flags.inc"
+#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ GWP_ASAN_##Name = DefaultValue;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
+}
+
+void registerFlags(FlagParser *Parser, Flags *F) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag(#Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->Name));
+#include "flags.inc"
+#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ Flags *F = getFlags();
+ F->setDefaults();
+ FlagParser Parser;
+ registerFlags(&Parser, F);
+ Parser.parseString(getCompileDefinitionScudoDefaultOptions());
+ Parser.parseString(getScudoDefaultOptions());
+ Parser.parseString(getEnv("SCUDO_OPTIONS"));
+ if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
+ Parser.parseStringPair("allocation_ring_buffer_size", V);
+ }
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.h
new file mode 100644
index 000000000000..2cd0a5b1334b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.h
@@ -0,0 +1,38 @@
+//===-- flags.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "flags.inc"
+#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Type GWP_ASAN_##Name;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
+
+ void setDefaults();
+};
+
+Flags *getFlags();
+void initFlags();
+class FlagParser;
+void registerFlags(FlagParser *Parser, Flags *F);
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
new file mode 100644
index 000000000000..ff0c28e1db7c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
@@ -0,0 +1,51 @@
+//===-- flags.inc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+#error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, quarantine_size_kb, 0,
+ "Size (in kilobytes) of quarantine used to delay the actual "
+ "deallocation of chunks. Lower value may reduce memory usage but "
+ "decrease the effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
+ "Size (in kilobytes) of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine.")
+
+SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
+ "Size (in bytes) up to which chunks will be quarantined (if lower "
+ "than or equal to).")
+
+SCUDO_FLAG(bool, dealloc_type_mismatch, false,
+ "Terminate on a type mismatch in allocation-deallocation functions, "
+ "eg: malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, delete_size_mismatch, true,
+ "Terminate on a size mismatch between a sized-delete and the actual "
+ "size of a chunk (as provided to new/new[]).")
+
+SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
+
+SCUDO_FLAG(bool, pattern_fill_contents, false,
+ "Pattern fill chunk contents on allocation.")
+
+SCUDO_FLAG(bool, may_return_null, true,
+ "Indicate whether the allocator should terminate instead of "
+ "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
+ "invalid allocation alignments, etc.")
+
+SCUDO_FLAG(int, release_to_os_interval_ms, 5000,
+ "Interval (in milliseconds) at which to attempt release of unused "
+ "memory to the OS. Negative values disable the feature.")
+
+SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
+ "Entries to keep in the allocation ring buffer for scudo. "
+ "Values less or equal to zero disable the buffer.")
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp
new file mode 100644
index 000000000000..3d8c6f3789b4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp
@@ -0,0 +1,178 @@
+//===-- flags_parser.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags_parser.h"
+#include "common.h"
+#include "report.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+class UnknownFlagsRegistry {
+ static const u32 MaxUnknownFlags = 16;
+ const char *UnknownFlagsNames[MaxUnknownFlags];
+ u32 NumberOfUnknownFlags;
+
+public:
+ void add(const char *Name) {
+ CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
+ UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
+ }
+
+ void report() {
+ if (!NumberOfUnknownFlags)
+ return;
+ Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
+ NumberOfUnknownFlags);
+ for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
+ Printf(" %s\n", UnknownFlagsNames[I]);
+ NumberOfUnknownFlags = 0;
+ }
+};
+static UnknownFlagsRegistry UnknownFlags;
+
+void reportUnrecognizedFlags() { UnknownFlags.report(); }
+
+void FlagParser::printFlagDescriptions() {
+ Printf("Available flags for Scudo:\n");
+ for (u32 I = 0; I < NumberOfFlags; ++I)
+ Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
+}
+
+static bool isSeparator(char C) {
+ return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+ C == '\r';
+}
+
+static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void FlagParser::skipWhitespace() {
+ while (isSeparator(Buffer[Pos]))
+ ++Pos;
+}
+
+void FlagParser::parseFlag() {
+ const uptr NameStart = Pos;
+ while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ if (Buffer[Pos] != '=')
+ reportError("expected '='");
+ const char *Name = Buffer + NameStart;
+ const uptr ValueStart = ++Pos;
+ const char *Value;
+ if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+ const char Quote = Buffer[Pos++];
+ while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+ ++Pos;
+ if (Buffer[Pos] == 0)
+ reportError("unterminated string");
+ Value = Buffer + ValueStart + 1;
+ ++Pos; // consume the closing quote
+ } else {
+ while (!isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ Value = Buffer + ValueStart;
+ }
+ if (!runHandler(Name, Value, '='))
+ reportError("flag parsing failed.");
+}
+
+void FlagParser::parseFlags() {
+ while (true) {
+ skipWhitespace();
+ if (Buffer[Pos] == 0)
+ break;
+ parseFlag();
+ }
+}
+
+void FlagParser::parseString(const char *S) {
+ if (!S)
+ return;
+ // Backup current parser state to allow nested parseString() calls.
+ const char *OldBuffer = Buffer;
+ const uptr OldPos = Pos;
+ Buffer = S;
+ Pos = 0;
+
+ parseFlags();
+
+ Buffer = OldBuffer;
+ Pos = OldPos;
+}
+
+inline bool parseBool(const char *Value, bool *b) {
+ if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+ strncmp(Value, "false", 5) == 0) {
+ *b = false;
+ return true;
+ }
+ if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+ strncmp(Value, "true", 4) == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+void FlagParser::parseStringPair(const char *Name, const char *Value) {
+ if (!runHandler(Name, Value, '\0'))
+ reportError("flag parsing failed.");
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value,
+ const char Sep) {
+ for (u32 I = 0; I < NumberOfFlags; ++I) {
+ const uptr Len = strlen(Flags[I].Name);
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
+ continue;
+ bool Ok = false;
+ switch (Flags[I].Type) {
+ case FlagType::FT_bool:
+ Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
+ if (!Ok)
+ reportInvalidFlag("bool", Value);
+ break;
+ case FlagType::FT_int:
+ char *ValueEnd;
+ errno = 0;
+ long V = strtol(Value, &ValueEnd, 10);
+ if (errno != 0 || // strtol failed (over or underflow)
+ V > INT_MAX || V < INT_MIN || // overflows integer
+ // contains unexpected characters
+ (*ValueEnd != '"' && *ValueEnd != '\'' &&
+ !isSeparatorOrNull(*ValueEnd))) {
+ reportInvalidFlag("int", Value);
+ break;
+ }
+ *reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
+ Ok = true;
+ break;
+ }
+ return Ok;
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ UnknownFlags.add(Name);
+ return true;
+}
+
+void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var) {
+ CHECK_LT(NumberOfFlags, MaxFlags);
+ Flags[NumberOfFlags].Name = Name;
+ Flags[NumberOfFlags].Desc = Desc;
+ Flags[NumberOfFlags].Type = Type;
+ Flags[NumberOfFlags].Var = Var;
+ ++NumberOfFlags;
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
new file mode 100644
index 000000000000..ded496fda3b9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -0,0 +1,56 @@
+//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_PARSER_H_
+#define SCUDO_FLAGS_PARSER_H_
+
+#include "report.h"
+#include "string_utils.h"
+
+#include <stddef.h>
+
+namespace scudo {
+
+enum class FlagType : u8 {
+ FT_bool,
+ FT_int,
+};
+
+class FlagParser {
+public:
+ void registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var);
+ void parseString(const char *S);
+ void printFlagDescriptions();
+ void parseStringPair(const char *Name, const char *Value);
+
+private:
+ static const u32 MaxFlags = 20;
+ struct Flag {
+ const char *Name;
+ const char *Desc;
+ FlagType Type;
+ void *Var;
+ } Flags[MaxFlags];
+
+ u32 NumberOfFlags = 0;
+ const char *Buffer = nullptr;
+ uptr Pos = 0;
+
+ void reportFatalError(const char *Error);
+ void skipWhitespace();
+ void parseFlags();
+ void parseFlag();
+ bool runHandler(const char *Name, const char *Value, char Sep);
+};
+
+void reportUnrecognizedFlags();
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_PARSER_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
new file mode 100644
index 000000000000..2144f1b63f89
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -0,0 +1,236 @@
+//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#include <stdlib.h> // for getenv()
+#include <zircon/compiler.h>
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+uptr getPageSize() { return _zx_system_get_page_size(); }
+
+void NORETURN die() { __builtin_trap(); }
+
+// We zero-initialize the Extra parameter of map(), make sure this is consistent
+// with ZX_HANDLE_INVALID.
+static_assert(ZX_HANDLE_INVALID == 0, "");
+
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ ScopedString Error;
+ Error.append("SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, zx_status_get_string(Status));
+ outputRaw(Error.data());
+ die();
+}
+
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
+ // Only scenario so far.
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
+
+ const zx_status_t Status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ Size, &Data->Vmar, &Data->VmarBase);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_allocate", Size);
+ return nullptr;
+ }
+ return reinterpret_cast<void *>(Data->VmarBase);
+}
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+ MapPlatformData *Data) {
+ DCHECK_EQ(Size % getPageSizeCached(), 0);
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // For MAP_NOACCESS, just allocate a Vmar and return.
+ if (Flags & MAP_NOACCESS)
+ return allocateVmar(Size, Data, AllowNoMem);
+
+ const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
+ ? Data->Vmar
+ : _zx_vmar_root_self();
+
+ zx_status_t Status;
+ zx_handle_t Vmo;
+ uint64_t VmoSize = 0;
+ if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
+ // If a Vmo was specified, it's a resize operation.
+ CHECK(Addr);
+ DCHECK(Flags & MAP_RESIZABLE);
+ Vmo = Data->Vmo;
+ VmoSize = Data->VmoSize;
+ Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmo_set_size", VmoSize + Size);
+ return nullptr;
+ }
+ } else {
+ // Otherwise, create a Vmo and set its name.
+ Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmo_create", Size);
+ return nullptr;
+ }
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+ }
+
+ uintptr_t P;
+ zx_vm_option_t MapFlags =
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+ if (Addr)
+ DCHECK(Data);
+ const uint64_t Offset =
+ Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
+ if (Offset)
+ MapFlags |= ZX_VM_SPECIFIC;
+ Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return nullptr;
+ }
+
+ if (Flags & MAP_PRECOMMIT) {
+ Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size,
+ /*buffer=*/nullptr, /*buffer_size=*/0);
+ }
+
+ // No need to track the Vmo if we don't intend on resizing it. Close it.
+ if (Flags & MAP_RESIZABLE) {
+ DCHECK(Data);
+ if (Data->Vmo == ZX_HANDLE_INVALID)
+ Data->Vmo = Vmo;
+ else
+ DCHECK_EQ(Data->Vmo, Vmo);
+ } else {
+ CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
+ }
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_op_range", Size);
+ return nullptr;
+ }
+
+ if (Data)
+ Data->VmoSize += Size;
+
+ return reinterpret_cast<void *>(P);
+}
+
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
+ if (Flags & UNMAP_ALL) {
+ DCHECK_NE(Data, nullptr);
+ const zx_handle_t Vmar = Data->Vmar;
+ DCHECK_NE(Vmar, _zx_vmar_root_self());
+ // Destroying the vmar effectively unmaps the whole mapping.
+ CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
+ CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
+ } else {
+ const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
+ ? Data->Vmar
+ : _zx_vmar_root_self();
+ const zx_status_t Status =
+ _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmar_unmap", Size);
+ }
+ if (Data) {
+ if (Data->Vmo != ZX_HANDLE_INVALID)
+ CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+ memset(Data, 0, sizeof(*Data));
+ }
+}
+
+void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ const zx_vm_option_t Prot =
+ (Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size);
+ if (Status != ZX_OK)
+ dieOnError(Status, "zx_vmar_protect", Size);
+}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data) {
+ // TODO: DCHECK the BaseAddress is consistent with the data in
+ // MapPlatformData.
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
+ const zx_status_t Status =
+ _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
+// because the Fuchsia implementation of sync_mutex_t has clang thread safety
+// annotations. Were we to apply proper capability annotations to the top level
+// HybridMutex class itself, they would not be needed. As it stands, the
+// thread analysis thinks that we are locking the mutex and accidentally leaving
+// it locked on the way out.
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ // Size and alignment must be compatible between both types.
+ return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_lock(&M);
+}
+
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_unlock(&M);
+}
+
+void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
+
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); }
+
+u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
+
+u32 getThreadID() { return 0; }
+
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, "");
+ if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
+ return false;
+ _zx_cprng_draw(Buffer, Length);
+ return true;
+}
+
+void outputRaw(const char *Buffer) {
+ __sanitizer_log_write(Buffer, strlen(Buffer));
+}
+
+void setAbortMessage(const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
new file mode 100644
index 000000000000..c1dfd7638ec5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
@@ -0,0 +1,32 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <stdint.h>
+#include <zircon/types.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+ zx_handle_t Vmar;
+ zx_handle_t Vmo;
+ uintptr_t VmarBase;
+ uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
new file mode 100644
index 000000000000..2cef1c44fadc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -0,0 +1,52 @@
+//===-- get_error_info_fuzzer.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SCUDO_FUZZ
+#include "allocator_config.h"
+#include "combined.h"
+#include "common.h"
+
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include <string>
+#include <vector>
+
+extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
+ using AllocatorT = scudo::Allocator<scudo::AndroidConfig>;
+ FuzzedDataProvider FDP(Data, Size);
+
+ uintptr_t FaultAddr = FDP.ConsumeIntegral<uintptr_t>();
+ uintptr_t MemoryAddr = FDP.ConsumeIntegral<uintptr_t>();
+
+ std::string MemoryAndTags =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+ const char *Memory = MemoryAndTags.c_str();
+ // Assume 16-byte alignment.
+ size_t MemorySize = (MemoryAndTags.length() / 17) * 16;
+ const char *MemoryTags = Memory + MemorySize;
+
+ std::string StackDepotBytes =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+
+ std::string RegionInfoBytes =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+ std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0);
+ for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size();
+ ++i) {
+ RegionInfo[i] = RegionInfoBytes[i];
+ }
+
+ std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
+
+ scudo_error_info ErrorInfo;
+ AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(),
+ StackDepotBytes.size(), RegionInfo.data(),
+ RingBufferBytes.data(), RingBufferBytes.size(),
+ Memory, MemoryTags, MemoryAddr, MemorySize);
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
new file mode 100644
index 000000000000..a2dedea910cc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -0,0 +1,182 @@
+//===-- scudo/interface.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_H_
+#define SCUDO_INTERFACE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern "C" {
+
+__attribute__((weak)) const char *__scudo_default_options(void);
+
+// Post-allocation & pre-deallocation hooks.
+__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
+__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
+
+// `realloc` involves both deallocation and allocation but they are not reported
+// atomically. In one specific case which may keep taking a snapshot right in
+// the middle of `realloc` reporting the deallocation and allocation, it may
+// confuse the user by missing memory from `realloc`. To alleviate that case,
+// define the two `realloc` hooks to get the knowledge of the bundled hook
+// calls. These hooks are optional and should only be used when a hooks user
+// wants to track reallocs more closely.
+//
+// See more details in the comment of `realloc` in wrapper_c.inc.
+__attribute__((weak)) void
+__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
+__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
+
+void __scudo_print_stats(void);
+
+typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
+
+// Determine the likely cause of a tag check fault or other memory protection
+// error on a system with memory tagging support. The results are returned via
+// the error_info data structure. Up to three possible causes are returned in
+// the reports array, in decreasing order of probability. The remaining elements
+// of reports are zero-initialized.
+//
+// This function may be called from a different process from the one that
+// crashed. In this case, various data structures must be copied from the
+// crashing process to the process that analyzes the crash.
+//
+// This interface is not guaranteed to be stable and may change at any time.
+// Furthermore, the version of scudo in the crashing process must be the same as
+// the version in the process that analyzes the crash.
+//
+// fault_addr is the fault address. On aarch64 this is available in the system
+// register FAR_ELx, or siginfo.si_addr in Linux 5.11 or above. This address
+// must include the pointer tag; this is available if SA_EXPOSE_TAGBITS was set
+// in sigaction.sa_flags when the signal handler was registered. Note that the
+// kernel strips the tag from the field sigcontext.fault_address, so this
+// address is not suitable to be passed as fault_addr.
+//
+// stack_depot is a pointer to the stack depot data structure, which may be
+// obtained by calling the function __scudo_get_stack_depot_addr() in the
+// crashing process. The size of the stack depot is available by calling the
+// function __scudo_get_stack_depot_size().
+//
+// region_info is a pointer to the region info data structure, which may be
+// obtained by calling the function __scudo_get_region_info_addr() in the
+// crashing process. The size of the region info is available by calling the
+// function __scudo_get_region_info_size().
+//
+// memory is a pointer to a region of memory surrounding the fault address.
+// The more memory available via this pointer, the more likely it is that the
+// function will be able to analyze a crash correctly. It is recommended to
+// provide an amount of memory equal to 16 * the primary allocator's largest
+// size class either side of the fault address.
+//
+// memory_tags is a pointer to an array of memory tags for the memory accessed
+// via memory. Each byte of this array corresponds to a region of memory of size
+// equal to the architecturally defined memory tag granule size (16 on aarch64).
+//
+// memory_addr is the start address of memory in the crashing process's address
+// space.
+//
+// memory_size is the size of the memory region referred to by the memory
+// pointer.
+void __scudo_get_error_info(struct scudo_error_info *error_info,
+ uintptr_t fault_addr, const char *stack_depot,
+ size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size,
+ const char *memory, const char *memory_tags,
+ uintptr_t memory_addr, size_t memory_size);
+
+enum scudo_error_type {
+ UNKNOWN,
+ USE_AFTER_FREE,
+ BUFFER_OVERFLOW,
+ BUFFER_UNDERFLOW,
+};
+
+struct scudo_error_report {
+ enum scudo_error_type error_type;
+
+ uintptr_t allocation_address;
+ uintptr_t allocation_size;
+
+ uint32_t allocation_tid;
+ uintptr_t allocation_trace[64];
+
+ uint32_t deallocation_tid;
+ uintptr_t deallocation_trace[64];
+};
+
+struct scudo_error_info {
+ struct scudo_error_report reports[3];
+};
+
+const char *__scudo_get_stack_depot_addr(void);
+size_t __scudo_get_stack_depot_size(void);
+
+const char *__scudo_get_region_info_addr(void);
+size_t __scudo_get_region_info_size(void);
+
+const char *__scudo_get_ring_buffer_addr(void);
+size_t __scudo_get_ring_buffer_size(void);
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+#ifndef M_PURGE_ALL
+#define M_PURGE_ALL -104
+#endif
+
+// Tune the allocator's choice of memory tags to make it more likely that
+// a certain class of memory errors will be detected. The value argument should
+// be one of the M_MEMTAG_TUNING_* constants below.
+#ifndef M_MEMTAG_TUNING
+#define M_MEMTAG_TUNING -102
+#endif
+
+// Per-thread memory initialization tuning. The value argument should be one of:
+// 1: Disable automatic heap initialization and, where possible, memory tagging,
+// on this thread.
+// 0: Normal behavior.
+#ifndef M_THREAD_DISABLE_MEM_INIT
+#define M_THREAD_DISABLE_MEM_INIT -103
+#endif
+
+#ifndef M_CACHE_COUNT_MAX
+#define M_CACHE_COUNT_MAX -200
+#endif
+
+#ifndef M_CACHE_SIZE_MAX
+#define M_CACHE_SIZE_MAX -201
+#endif
+
+#ifndef M_TSDS_COUNT_MAX
+#define M_TSDS_COUNT_MAX -202
+#endif
+
+// Tune for buffer overflows.
+#ifndef M_MEMTAG_TUNING_BUFFER_OVERFLOW
+#define M_MEMTAG_TUNING_BUFFER_OVERFLOW 0
+#endif
+
+// Tune for use-after-free.
+#ifndef M_MEMTAG_TUNING_UAF
+#define M_MEMTAG_TUNING_UAF 1
+#endif
+
+// Print internal stats to the log.
+#ifndef M_LOG_STATS
+#define M_LOG_STATS -205
+#endif
+
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
new file mode 100644
index 000000000000..27c6b451ffe7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -0,0 +1,166 @@
+//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERNAL_DEFS_H_
+#define SCUDO_INTERNAL_DEFS_H_
+
+#include "platform.h"
+
+#include <stdint.h>
+
+#ifndef SCUDO_DEBUG
+#define SCUDO_DEBUG 0
+#endif
+
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
+
+// String related macros.
+
+#define STRINGIFY_(S) #S
+#define STRINGIFY(S) STRINGIFY_(S)
+#define CONCATENATE_(S, C) S##C
+#define CONCATENATE(S, C) CONCATENATE_(S, C)
+
+// Attributes & builtins related macros.
+
+#define INTERFACE __attribute__((visibility("default")))
+#define HIDDEN __attribute__((visibility("hidden")))
+#define WEAK __attribute__((weak))
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define ALIAS(X) __attribute__((alias(X)))
+#define FORMAT(F, A) __attribute__((format(printf, F, A)))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(X) generates prefetchnt0 on x86
+#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
+#else
+#define PREFETCH(X) __builtin_prefetch(X)
+#endif
+#define UNUSED __attribute__((unused))
+#define USED __attribute__((used))
+#define NOEXCEPT noexcept
+
+// This check is only available on Clang. This is essentially an alias of
+// C++20's 'constinit' specifier which will take care of this when (if?) we can
+// ask all libc's that use Scudo to compile us with C++20. Dynamic
+// initialization is bad; Scudo is designed to be lazy-initializated on the
+// first call to malloc/free (and friends), and this generally happens in the
+// loader somewhere in libdl's init. After the loader is done, control is
+// transferred to libc's initialization, and the dynamic initializers are run.
+// If there's a dynamic initializer for Scudo, then it will clobber the
+// already-initialized Scudo, and re-initialize all its members back to default
+// values, causing various explosions. Unfortunately, marking
+// scudo::Allocator<>'s constructor as 'constexpr' isn't sufficient to prevent
+// dynamic initialization, as default initialization is fine under 'constexpr'
+// (but not 'constinit'). Clang at -O0, and gcc at all opt levels will emit a
+// dynamic initializer for any constant-initialized variables if there is a mix
+// of default-initialized and constant-initialized variables.
+//
+// If you're looking at this because your build failed, you probably introduced
+// a new member to scudo::Allocator<> (possibly transiently) that didn't have an
+// initializer. The fix is easy - just add one.
+#if defined(__has_attribute)
+#if __has_attribute(require_constant_initialization)
+#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION \
+ __attribute__((__require_constant_initialization__))
+#else
+#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION
+#endif
+#endif
+
+namespace scudo {
+
+typedef uintptr_t uptr;
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef intptr_t sptr;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+// The following two functions have platform specific implementations.
+void outputRaw(const char *Buffer);
+void NORETURN die();
+
+#define RAW_CHECK_MSG(Expr, Msg) \
+ do { \
+ if (UNLIKELY(!(Expr))) { \
+ outputRaw(Msg); \
+ die(); \
+ } \
+ } while (false)
+
+#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
+
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2);
+#define CHECK_IMPL(C1, Op, C2) \
+ do { \
+ if (UNLIKELY(!(C1 Op C2))) { \
+ scudo::reportCheckFailed(__FILE__, __LINE__, #C1 " " #Op " " #C2, \
+ (scudo::u64)C1, (scudo::u64)C2); \
+ scudo::die(); \
+ } \
+ } while (false)
+
+#define CHECK(A) CHECK_IMPL((A), !=, 0)
+#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
+#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
+#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
+#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
+#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
+#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
+
+#if SCUDO_DEBUG
+#define DCHECK(A) CHECK(A)
+#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
+#define DCHECK_NE(A, B) CHECK_NE(A, B)
+#define DCHECK_LT(A, B) CHECK_LT(A, B)
+#define DCHECK_LE(A, B) CHECK_LE(A, B)
+#define DCHECK_GT(A, B) CHECK_GT(A, B)
+#define DCHECK_GE(A, B) CHECK_GE(A, B)
+#else
+#define DCHECK(A) \
+ do { \
+ } while (false && (A))
+#define DCHECK_EQ(A, B) \
+ do { \
+ } while (false && (A) == (B))
+#define DCHECK_NE(A, B) \
+ do { \
+ } while (false && (A) != (B))
+#define DCHECK_LT(A, B) \
+ do { \
+ } while (false && (A) < (B))
+#define DCHECK_LE(A, B) \
+ do { \
+ } while (false && (A) <= (B))
+#define DCHECK_GT(A, B) \
+ do { \
+ } while (false && (A) > (B))
+#define DCHECK_GE(A, B) \
+ do { \
+ } while (false && (A) >= (B))
+#endif
+
+// The superfluous die() call effectively makes this macro NORETURN.
+#define UNREACHABLE(Msg) \
+ do { \
+ CHECK(0 && Msg); \
+ die(); \
+ } while (0)
+
+} // namespace scudo
+
+#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
new file mode 100644
index 000000000000..274695108109
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -0,0 +1,242 @@
+//===-- linux.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "internal_defs.h"
+#include "linux.h"
+#include "mutex.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
+
+void NORETURN die() { abort(); }
+
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
+void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+#if defined(__aarch64__)
+#ifndef PROT_MTE
+#define PROT_MTE 0x20
+#endif
+ if (Flags & MAP_MEMTAG)
+ MmapProt |= PROT_MTE;
+#endif
+ if (Addr)
+ MmapFlags |= MAP_FIXED;
+ void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(errno == ENOMEM ? Size : 0);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (Name)
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#endif
+ return P;
+}
+
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ if (munmap(Addr, Size) != 0)
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
+}
+
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
+void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
+ if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
+ reportProtectError(Addr, Size, Prot);
+}
+
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ UNUSED MapPlatformData *Data) {
+ void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+ return atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+ u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire);
+ if (V == Unlocked)
+ return;
+ if (V != Sleeping)
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ while (V != Unlocked) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+ nullptr, nullptr, 0);
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ }
+}
+
+void HybridMutex::unlock() {
+ if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+ atomic_store(&M, Unlocked, memory_order_release);
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+ nullptr, nullptr, 0);
+ }
+}
+
+void HybridMutex::assertHeldImpl() {
+ CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
+}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
+u32 getNumberOfCPUs() {
+ cpu_set_t CPUs;
+ // sched_getaffinity can fail for a variety of legitimate reasons (lack of
+ // CAP_SYS_NICE, syscall filtering, etc), in which case we shall return 0.
+ if (sched_getaffinity(0, sizeof(cpu_set_t), &CPUs) != 0)
+ return 0;
+ return static_cast<u32>(CPU_COUNT(&CPUs));
+}
+
+u32 getThreadID() {
+#if SCUDO_ANDROID
+ return static_cast<u32>(gettid());
+#else
+ return static_cast<u32>(syscall(SYS_gettid));
+#endif
+}
+
+// Blocking is possibly unused if the getrandom block is not compiled in.
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ ssize_t ReadBytes;
+#if defined(SYS_getrandom)
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+ // Up to 256 bytes, getrandom will not be interrupted.
+ ReadBytes =
+ syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
+ if (ReadBytes == static_cast<ssize_t>(Length))
+ return true;
+#endif // defined(SYS_getrandom)
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ const int FileDesc = open("/dev/urandom", O_RDONLY);
+ if (FileDesc == -1)
+ return false;
+ ReadBytes = read(FileDesc, Buffer, Length);
+ close(FileDesc);
+ return (ReadBytes == static_cast<ssize_t>(Length));
+}
+
+// Allocation free syslog-like API.
+extern "C" WEAK int async_safe_write_log(int pri, const char *tag,
+ const char *msg);
+
+void outputRaw(const char *Buffer) {
+ if (&async_safe_write_log) {
+ constexpr s32 AndroidLogInfo = 4;
+ constexpr uptr MaxLength = 1024U;
+ char LocalBuffer[MaxLength];
+ while (strlen(Buffer) > MaxLength) {
+ uptr P;
+ for (P = MaxLength - 1; P > 0; P--) {
+ if (Buffer[P] == '\n') {
+ memcpy(LocalBuffer, Buffer, P);
+ LocalBuffer[P] = '\0';
+ async_safe_write_log(AndroidLogInfo, "scudo", LocalBuffer);
+ Buffer = &Buffer[P + 1];
+ break;
+ }
+ }
+ // If no newline was found, just log the buffer.
+ if (P == 0)
+ break;
+ }
+ async_safe_write_log(AndroidLogInfo, "scudo", Buffer);
+ } else {
+ (void)write(2, Buffer, strlen(Buffer));
+ }
+}
+
+extern "C" WEAK void android_set_abort_message(const char *);
+
+void setAbortMessage(const char *Message) {
+ if (&android_set_abort_message)
+ android_set_abort_message(Message);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
new file mode 100644
index 000000000000..72acb6da83a7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
@@ -0,0 +1,25 @@
+//===-- linux.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LINUX_H_
+#define SCUDO_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+namespace scudo {
+
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h
new file mode 100644
index 000000000000..0137667d1dcf
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h
@@ -0,0 +1,240 @@
+//===-- list.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LIST_H_
+#define SCUDO_LIST_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Intrusive POD singly and doubly linked list.
+// An object with all zero fields should represent a valid empty list. clear()
+// should be called on all non-zero-initialized objects before using.
+
+template <class T> class IteratorBase {
+public:
+ explicit IteratorBase(T *CurrentT) : Current(CurrentT) {}
+ IteratorBase &operator++() {
+ Current = Current->Next;
+ return *this;
+ }
+ bool operator!=(IteratorBase Other) const { return Current != Other.Current; }
+ T &operator*() { return *Current; }
+
+private:
+ T *Current;
+};
+
+template <class T> struct IntrusiveList {
+ bool empty() const { return Size == 0; }
+ uptr size() const { return Size; }
+
+ T *front() { return First; }
+ const T *front() const { return First; }
+ T *back() { return Last; }
+ const T *back() const { return Last; }
+
+ void clear() {
+ First = Last = nullptr;
+ Size = 0;
+ }
+
+ typedef IteratorBase<T> Iterator;
+ typedef IteratorBase<const T> ConstIterator;
+
+ Iterator begin() { return Iterator(First); }
+ Iterator end() { return Iterator(nullptr); }
+
+ ConstIterator begin() const { return ConstIterator(First); }
+ ConstIterator end() const { return ConstIterator(nullptr); }
+
+ void checkConsistency() const;
+
+protected:
+ uptr Size = 0;
+ T *First = nullptr;
+ T *Last = nullptr;
+};
+
+template <class T> void IntrusiveList<T>::checkConsistency() const {
+ if (Size == 0) {
+ CHECK_EQ(First, nullptr);
+ CHECK_EQ(Last, nullptr);
+ } else {
+ uptr Count = 0;
+ for (T *I = First;; I = I->Next) {
+ Count++;
+ if (I == Last)
+ break;
+ }
+ CHECK_EQ(this->size(), Count);
+ CHECK_EQ(Last->Next, nullptr);
+ }
+}
+
+template <class T> struct SinglyLinkedList : public IntrusiveList<T> {
+ using IntrusiveList<T>::First;
+ using IntrusiveList<T>::Last;
+ using IntrusiveList<T>::Size;
+ using IntrusiveList<T>::empty;
+
+ void push_back(T *X) {
+ X->Next = nullptr;
+ if (empty())
+ First = X;
+ else
+ Last->Next = X;
+ Last = X;
+ Size++;
+ }
+
+ void push_front(T *X) {
+ if (empty())
+ Last = X;
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ Size--;
+ }
+
+ // Insert X next to Prev
+ void insert(T *Prev, T *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ X->Next = Prev->Next;
+ Prev->Next = X;
+ if (Last == Prev)
+ Last = X;
+ ++Size;
+ }
+
+ void extract(T *Prev, T *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ DCHECK_EQ(Prev->Next, X);
+ Prev->Next = X->Next;
+ if (Last == X)
+ Last = Prev;
+ Size--;
+ }
+
+ void append_back(SinglyLinkedList<T> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else {
+ Last->Next = L->First;
+ Last = L->Last;
+ Size += L->size();
+ }
+ L->clear();
+ }
+};
+
+template <class T> struct DoublyLinkedList : IntrusiveList<T> {
+ using IntrusiveList<T>::First;
+ using IntrusiveList<T>::Last;
+ using IntrusiveList<T>::Size;
+ using IntrusiveList<T>::empty;
+
+ void push_front(T *X) {
+ X->Prev = nullptr;
+ if (empty()) {
+ Last = X;
+ } else {
+ DCHECK_EQ(First->Prev, nullptr);
+ First->Prev = X;
+ }
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+
+ // Inserts X before Y.
+ void insert(T *X, T *Y) {
+ if (Y == First)
+ return push_front(X);
+ T *Prev = Y->Prev;
+ // This is a hard CHECK to ensure consistency in the event of an intentional
+ // corruption of Y->Prev, to prevent a potential write-{4,8}.
+ CHECK_EQ(Prev->Next, Y);
+ Prev->Next = X;
+ X->Prev = Prev;
+ X->Next = Y;
+ Y->Prev = X;
+ Size++;
+ }
+
+ void push_back(T *X) {
+ X->Next = nullptr;
+ if (empty()) {
+ First = X;
+ } else {
+ DCHECK_EQ(Last->Next, nullptr);
+ Last->Next = X;
+ }
+ X->Prev = Last;
+ Last = X;
+ Size++;
+ }
+
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ else
+ First->Prev = nullptr;
+ Size--;
+ }
+
+ // The consistency of the adjacent links is aggressively checked in order to
+ // catch potential corruption attempts, that could yield a mirrored
+ // write-{4,8} primitive. nullptr checks are deemed less vital.
+ void remove(T *X) {
+ T *Prev = X->Prev;
+ T *Next = X->Next;
+ if (Prev) {
+ CHECK_EQ(Prev->Next, X);
+ Prev->Next = Next;
+ }
+ if (Next) {
+ CHECK_EQ(Next->Prev, X);
+ Next->Prev = Prev;
+ }
+ if (First == X) {
+ DCHECK_EQ(Prev, nullptr);
+ First = Next;
+ } else {
+ DCHECK_NE(Prev, nullptr);
+ }
+ if (Last == X) {
+ DCHECK_EQ(Next, nullptr);
+ Last = Prev;
+ } else {
+ DCHECK_NE(Next, nullptr);
+ }
+ Size--;
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LIST_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
new file mode 100644
index 000000000000..46d6affdc033
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -0,0 +1,189 @@
+//===-- local_cache.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LOCAL_CACHE_H_
+#define SCUDO_LOCAL_CACHE_H_
+
+#include "internal_defs.h"
+#include "list.h"
+#include "platform.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ DCHECK(isEmpty());
+ Stats.init();
+ if (LIKELY(S))
+ S->link(&Stats);
+ Allocator = A;
+ initCache();
+ }
+
+ void destroy(GlobalStats *S) {
+ drain();
+ if (LIKELY(S))
+ S->unlink(&Stats);
+ }
+
+ void *allocate(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ if (C->Count == 0) {
+ // Refill half of the number of max cached.
+ DCHECK_GT(C->MaxCount / 2, 0U);
+ if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
+ return nullptr;
+ DCHECK_GT(C->Count, 0);
+ }
+ // We read ClassSize first before accessing Chunks because it's adjacent to
+ // Count, while Chunks might be further off (depending on Count). That keeps
+ // the memory accesses in close quarters.
+ const uptr ClassSize = C->ClassSize;
+ CompactPtrT CompactP = C->Chunks[--C->Count];
+ Stats.add(StatAllocated, ClassSize);
+ Stats.sub(StatFree, ClassSize);
+ return Allocator->decompactPtr(ClassId, CompactP);
+ }
+
+ bool deallocate(uptr ClassId, void *P) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+
+ // If the cache is full, drain half of blocks back to the main allocator.
+ const bool NeedToDrainCache = C->Count == C->MaxCount;
+ if (NeedToDrainCache)
+ drain(C, ClassId);
+ // See comment in allocate() about memory accesses.
+ const uptr ClassSize = C->ClassSize;
+ C->Chunks[C->Count++] =
+ Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
+ Stats.sub(StatAllocated, ClassSize);
+ Stats.add(StatFree, ClassSize);
+
+ return NeedToDrainCache;
+ }
+
+ bool isEmpty() const {
+ for (uptr I = 0; I < NumClasses; ++I)
+ if (PerClassArray[I].Count)
+ return false;
+ return true;
+ }
+
+ void drain() {
+ // Drain BatchClassId last as it may be needed while draining normal blocks.
+ for (uptr I = 0; I < NumClasses; ++I) {
+ if (I == BatchClassId)
+ continue;
+ while (PerClassArray[I].Count > 0)
+ drain(&PerClassArray[I], I);
+ }
+ while (PerClassArray[BatchClassId].Count > 0)
+ drain(&PerClassArray[BatchClassId], BatchClassId);
+ DCHECK(isEmpty());
+ }
+
+ void *getBatchClassBlock() {
+ void *B = allocate(BatchClassId);
+ if (UNLIKELY(!B))
+ reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
+ return B;
+ }
+
+ LocalStats &getStats() { return Stats; }
+
+ void getStats(ScopedString *Str) {
+ bool EmptyCache = true;
+ for (uptr I = 0; I < NumClasses; ++I) {
+ if (PerClassArray[I].Count == 0)
+ continue;
+
+ EmptyCache = false;
+ // The size of BatchClass is set to 0 intentionally. See the comment in
+ // initCache() for more details.
+ const uptr ClassSize = I == BatchClassId
+ ? SizeClassAllocator::getSizeByClassId(I)
+ : PerClassArray[I].ClassSize;
+ // Note that the string utils don't support printing u16 thus we cast it
+ // to a common use type uptr.
+ Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
+ static_cast<uptr>(PerClassArray[I].Count),
+ static_cast<uptr>(PerClassArray[I].MaxCount));
+ }
+
+ if (EmptyCache)
+ Str->append(" No block is cached.\n");
+ }
+
+ static u16 getMaxCached(uptr Size) {
+ return Min(SizeClassMap::MaxNumCachedHint,
+ SizeClassMap::getMaxCachedHint(Size));
+ }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr BatchClassId = SizeClassMap::BatchClassId;
+ struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
+ u16 Count;
+ u16 MaxCount;
+ // Note: ClassSize is zero for the transfer batch.
+ uptr ClassSize;
+ CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
+ };
+ PerClass PerClassArray[NumClasses] = {};
+ LocalStats Stats;
+ SizeClassAllocator *Allocator = nullptr;
+
+ NOINLINE void initCache() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *P = &PerClassArray[I];
+ const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+ P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
+ if (I != BatchClassId) {
+ P->ClassSize = Size;
+ } else {
+ // ClassSize in this struct is only used for malloc/free stats, which
+ // should only track user allocations, not internal movements.
+ P->ClassSize = 0;
+ }
+ }
+ }
+
+ void destroyBatch(uptr ClassId, void *B) {
+ if (ClassId != BatchClassId)
+ deallocate(BatchClassId, B);
+ }
+
+ NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
+ const u16 NumBlocksRefilled =
+ Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
+ DCHECK_LE(NumBlocksRefilled, MaxRefill);
+ C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
+ return NumBlocksRefilled != 0;
+ }
+
+ NOINLINE void drain(PerClass *C, uptr ClassId) {
+ const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
+ Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
+ // u16 will be promoted to int by arithmetic type conversion.
+ C->Count = static_cast<u16>(C->Count - Count);
+ for (u16 I = 0; I < C->Count; I++)
+ C->Chunks[I] = C->Chunks[I + Count];
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LOCAL_CACHE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
new file mode 100644
index 000000000000..115cc34e7060
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
@@ -0,0 +1,84 @@
+//===-- mem_map.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map.h"
+
+#include "common.h"
+
+namespace scudo {
+
+bool MemMapDefault::mapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *MappedAddr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ if (MappedAddr == nullptr)
+ return false;
+ Base = reinterpret_cast<uptr>(MappedAddr);
+ MappedBase = Base;
+ Capacity = Size;
+ return true;
+}
+
+void MemMapDefault::unmapImpl(uptr Addr, uptr Size) {
+ if (Size == Capacity) {
+ Base = MappedBase = Capacity = 0;
+ } else {
+ if (Base == Addr) {
+ Base = Addr + Size;
+ MappedBase = MappedBase == 0 ? Base : Max(MappedBase, Base);
+ }
+ Capacity -= Size;
+ }
+
+ ::scudo::unmap(reinterpret_cast<void *>(Addr), Size, UNMAP_ALL, &Data);
+}
+
+bool MemMapDefault::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *RemappedPtr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ const uptr RemappedAddr = reinterpret_cast<uptr>(RemappedPtr);
+ MappedBase = MappedBase == 0 ? RemappedAddr : Min(MappedBase, RemappedAddr);
+ return RemappedAddr == Addr;
+}
+
+void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ DCHECK_NE(MappedBase, 0U);
+ DCHECK_GE(From, MappedBase);
+ return ::scudo::releasePagesToOS(MappedBase, From - MappedBase, Size, &Data);
+}
+
+void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ return ::scudo::setMemoryPermission(Addr, Size, Flags);
+}
+
+void ReservedMemoryDefault::releaseImpl() {
+ ::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
+}
+
+bool ReservedMemoryDefault::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *Reserved = ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name,
+ Flags | MAP_NOACCESS, &Data);
+ if (Reserved == nullptr)
+ return false;
+
+ Base = reinterpret_cast<uptr>(Reserved);
+ Capacity = Size;
+
+ return true;
+}
+
+ReservedMemoryDefault::MemMapT ReservedMemoryDefault::dispatchImpl(uptr Addr,
+ uptr Size) {
+ ReservedMemoryDefault::MemMapT NewMap(Addr, Size);
+ NewMap.setMapPlatformData(Data);
+ return NewMap;
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
new file mode 100644
index 000000000000..b92216cf271d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
@@ -0,0 +1,92 @@
+//===-- mem_map.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_H_
+#define SCUDO_MEM_MAP_H_
+
+#include "mem_map_base.h"
+
+#include "common.h"
+#include "internal_defs.h"
+
+// TODO: This is only used for `MapPlatformData`. Remove these includes when we
+// have all three platform specific `MemMap` and `ReservedMemory`
+// implementations.
+#include "fuchsia.h"
+#include "linux.h"
+#include "trusty.h"
+
+#include "mem_map_fuchsia.h"
+#include "mem_map_linux.h"
+
+namespace scudo {
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class MemMapDefault final : public MemMapBase<MemMapDefault> {
+public:
+ constexpr MemMapDefault() = default;
+ MemMapDefault(uptr Base, uptr Capacity) : Base(Base), Capacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+ void setMapPlatformData(MapPlatformData &NewData) { Data = NewData; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ uptr MappedBase = 0;
+ MapPlatformData Data = {};
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryDefault final
+ : public ReservedMemory<ReservedMemoryDefault, MemMapDefault> {
+public:
+ constexpr ReservedMemoryDefault() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ MapPlatformData Data = {};
+};
+
+#if SCUDO_LINUX
+using ReservedMemoryT = ReservedMemoryLinux;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_FUCHSIA
+using ReservedMemoryT = ReservedMemoryFuchsia;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_TRUSTY
+using ReservedMemoryT = ReservedMemoryDefault;
+using MemMapT = ReservedMemoryT::MemMapT;
+#else
+#error \
+ "Unsupported platform, please implement the ReservedMemory for your platform!"
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
new file mode 100644
index 000000000000..99ab0cba604f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -0,0 +1,129 @@
+//===-- mem_map_base.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_BASE_H_
+#define SCUDO_MEM_MAP_BASE_H_
+
+#include "common.h"
+
+namespace scudo {
+
+// In Scudo, every memory operation will be fulfilled through a
+// platform-specific `MemMap` instance. The essential APIs are listed in the
+// `MemMapBase` below. This is implemented in CRTP, so for each implementation,
+// it has to implement all of the 'Impl' named functions.
+template <class Derived> class MemMapBase {
+public:
+ constexpr MemMapBase() = default;
+
+ // This is used to map a new set of contiguous pages. Note that the `Addr` is
+ // only a suggestion to the system.
+ bool map(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isAllocated());
+ return invokeImpl(&Derived::mapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to unmap partial/full pages from the beginning or the end.
+ // I.e., the result pages are expected to be still contiguous.
+ void unmap(uptr Addr, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
+ invokeImpl(&Derived::unmapImpl, Addr, Size);
+ }
+
+ // This is used to remap a mapped range (either from map() or dispatched from
+ // ReservedMemory). For example, we have reserved several pages and then we
+ // want to remap them with different accessibility.
+ bool remap(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to update the pages' access permission. For example, mark
+ // pages as no read/write permission.
+ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::setMemoryPermissionImpl, Addr, Size, Flags);
+ }
+
+ // Suggest releasing a set of contiguous physical pages back to the OS. Note
+ // that only physical pages are supposed to be released. Any release of
+ // virtual pages may lead to undefined behavior.
+ void releasePagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releasePagesToOSImpl, From, Size);
+ }
+ // This is similar to the above one except that any subsequent access to the
+ // released pages will return with zero-filled pages.
+ void releaseAndZeroPagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releaseAndZeroPagesToOSImpl, From, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isAllocated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+// `ReservedMemory` is a special memory handle which can be viewed as a page
+// allocator. `ReservedMemory` will reserve a contiguous pages and the later
+// page request can be fulfilled at the designated address. This is used when
+// we want to ensure the virtual address of the MemMap will be in a known range.
+// This is implemented in CRTP, so for each
+// implementation, it has to implement all of the 'Impl' named functions.
+template <class Derived, typename MemMapTy> class ReservedMemory {
+public:
+ using MemMapT = MemMapTy;
+ constexpr ReservedMemory() = default;
+
+ // Reserve a chunk of memory at a suggested address.
+ bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isCreated());
+ return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
+ }
+
+ // Release the entire reserved memory.
+ void release() {
+ DCHECK(isCreated());
+ invokeImpl(&Derived::releaseImpl);
+ }
+
+ // Dispatch a sub-range of reserved memory. Note that any fragmentation of
+ // the reserved pages is managed by each implementation.
+ MemMapT dispatch(uptr Addr, uptr Size) {
+ DCHECK(isCreated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::dispatchImpl, Addr, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isCreated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_BASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
new file mode 100644
index 000000000000..9d6df2bc6999
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -0,0 +1,261 @@
+//===-- mem_map_fuchsia.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map_fuchsia.h"
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "string_utils.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ ScopedString Error;
+ Error.append("SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, _zx_status_get_string(Status));
+ outputRaw(Error.data());
+ die();
+}
+
+static void setVmoName(zx_handle_t Vmo, const char *Name) {
+ size_t Len = strlen(Name);
+ DCHECK_LT(Len, ZX_MAX_NAME_LEN);
+ zx_status_t Status = _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, Len);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+// Returns the (cached) base address of the root VMAR.
+static uptr getRootVmarBase() {
+ static atomic_uptr CachedResult = {0};
+
+ uptr Result = atomic_load(&CachedResult, memory_order_acquire);
+ if (UNLIKELY(!Result)) {
+ zx_info_vmar_t VmarInfo;
+ zx_status_t Status =
+ _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &VmarInfo,
+ sizeof(VmarInfo), nullptr, nullptr);
+ CHECK_EQ(Status, ZX_OK);
+ CHECK_NE(VmarInfo.base, 0);
+
+ atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
+ Result = VmarInfo.base;
+ }
+
+ return Result;
+}
+
+// Lazily creates and then always returns the same zero-sized VMO.
+static zx_handle_t getPlaceholderVmo() {
+ static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
+
+ zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
+ if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
+ // Create a zero-sized placeholder VMO.
+ zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", 0);
+
+ setVmoName(Vmo, "scudo:reserved");
+
+ // Atomically store its handle. If some other thread wins the race, use its
+ // handle and discard ours.
+ zx_handle_t OldValue = atomic_compare_exchange_strong(
+ &StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
+ if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ Vmo = OldValue;
+ }
+ }
+
+ return Vmo;
+}
+
+// Checks if MAP_ALLOWNOMEM allows the given error code.
+static bool IsNoMemError(zx_status_t Status) {
+ // Note: _zx_vmar_map returns ZX_ERR_NO_RESOURCES if the VMAR does not contain
+ // a suitable free spot.
+ return Status == ZX_ERR_NO_MEMORY || Status == ZX_ERR_NO_RESOURCES;
+}
+
+// Note: this constructor is only called by ReservedMemoryFuchsia::dispatch.
+MemMapFuchsia::MemMapFuchsia(uptr Base, uptr Capacity)
+ : MapAddr(Base), WindowBase(Base), WindowSize(Capacity) {
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Capacity, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", Capacity);
+
+ setVmoName(Vmo, "scudo:dispatched");
+}
+
+bool MemMapFuchsia::mapImpl(UNUSED uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (AllowNoMem && IsNoMemError(Status))
+ return false;
+ dieOnError(Status, "zx_vmo_create", Size);
+ }
+
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ // Map it.
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, 0, Vmo, 0, Size, &MapAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (AllowNoMem && IsNoMemError(Status)) {
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ return false;
+ }
+ dieOnError(Status, "zx_vmar_map", Size);
+ }
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ WindowBase = MapAddr;
+ WindowSize = Size;
+ return true;
+}
+
+void MemMapFuchsia::unmapImpl(uptr Addr, uptr Size) {
+ zx_status_t Status;
+
+ if (Size == WindowSize) {
+ // NOTE: Closing first and then unmapping seems slightly faster than doing
+ // the same operations in the opposite order.
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = WindowBase = WindowSize = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ } else {
+ // Unmap the subrange.
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ // Decommit the pages that we just unmapped.
+ Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, Addr - MapAddr, Size,
+ nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+
+ if (Addr == WindowBase)
+ WindowBase += Size;
+ WindowSize -= Size;
+ }
+}
+
+bool MemMapFuchsia::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // NOTE: This will rename the *whole* VMO, not only the requested portion of
+ // it. But we cannot do better than this given the MemMap API. In practice,
+ // the upper layers of Scudo always pass the same Name for a given MemMap.
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ uptr MappedAddr;
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC_OVERWRITE;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, Addr - getRootVmarBase(),
+ Vmo, Addr - MapAddr, Size, &MappedAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (AllowNoMem && IsNoMemError(Status))
+ return false;
+ dieOnError(Status, "zx_vmar_map", Size);
+ }
+ DCHECK_EQ(Addr, MappedAddr);
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ return true;
+}
+
+void MemMapFuchsia::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ zx_status_t Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, From - MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ zx_vm_option_t MapFlags = 0;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_protect(_zx_vmar_root_self(), MapFlags, Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
+ UNUSED const char *Name, uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // Reserve memory by mapping the placeholder VMO without any permission.
+ zx_status_t Status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_ALLOW_FAULTS, 0,
+ getPlaceholderVmo(), 0, Size, &Base);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (AllowNoMem && IsNoMemError(Status))
+ return false;
+ dieOnError(Status, "zx_vmar_map", Size);
+ }
+
+ Capacity = Size;
+ return true;
+}
+
+void ReservedMemoryFuchsia::releaseImpl() {
+ zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(), Base, Capacity);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+ReservedMemoryFuchsia::MemMapT ReservedMemoryFuchsia::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryFuchsia::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
new file mode 100644
index 000000000000..2e66f89cfca5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
@@ -0,0 +1,75 @@
+//===-- mem_map_fuchsia.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_FUCHSIA_H_
+#define SCUDO_MEM_MAP_FUCHSIA_H_
+
+#include "mem_map_base.h"
+
+#if SCUDO_FUCHSIA
+
+#include <stdint.h>
+#include <zircon/types.h>
+
+namespace scudo {
+
+class MemMapFuchsia final : public MemMapBase<MemMapFuchsia> {
+public:
+ constexpr MemMapFuchsia() = default;
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return WindowBase; }
+ uptr getCapacityImpl() { return WindowSize; }
+
+private:
+ friend class ReservedMemoryFuchsia;
+
+ // Used by ReservedMemoryFuchsia::dispatch.
+ MemMapFuchsia(uptr Base, uptr Capacity);
+
+ // Virtual memory address corresponding to VMO offset 0.
+ uptr MapAddr = 0;
+
+ // Virtual memory base address and size of the VMO subrange that is still in
+ // use. unmapImpl() can shrink this range, either at the beginning or at the
+ // end.
+ uptr WindowBase = 0;
+ uptr WindowSize = 0;
+
+ zx_handle_t Vmo = ZX_HANDLE_INVALID;
+};
+
+class ReservedMemoryFuchsia final
+ : public ReservedMemory<ReservedMemoryFuchsia, MemMapFuchsia> {
+public:
+ constexpr ReservedMemoryFuchsia() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_MEM_MAP_FUCHSIA_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
new file mode 100644
index 000000000000..783c4f0d9ab0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
@@ -0,0 +1,153 @@
+//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "mem_map_linux.h"
+
+#include "common.h"
+#include "internal_defs.h"
+#include "linux.h"
+#include "mutex.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+// TODO(chiahungduan): Review if we still need the followings macros.
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+#if defined(__aarch64__)
+#ifndef PROT_MTE
+#define PROT_MTE 0x20
+#endif
+ if (Flags & MAP_MEMTAG)
+ MmapProt |= PROT_MTE;
+#endif
+ if (Addr)
+ MmapFlags |= MAP_FIXED;
+ void *P =
+ mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(errno == ENOMEM ? Size : 0);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (Name)
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#else
+ (void)Name;
+#endif
+
+ return P;
+}
+
+bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (P == nullptr)
+ return false;
+
+ MapBase = reinterpret_cast<uptr>(P);
+ MapCapacity = Size;
+ return true;
+}
+
+void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
+ // If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
+ // status.
+ if (Size == MapCapacity) {
+ MapBase = MapCapacity = 0;
+ } else {
+ // This is partial unmap and is unmapping the pages from the beginning,
+ // shift `MapBase` to the new base.
+ if (MapBase == Addr)
+ MapBase = Addr + Size;
+ MapCapacity -= Size;
+ }
+
+ if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
+ reportUnmapError(Addr, Size);
+}
+
+bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (reinterpret_cast<uptr>(P) != Addr)
+ reportMapError();
+ return true;
+}
+
+void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
+ if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
+ reportProtectError(Addr, Size, Prot);
+}
+
+void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ void *Addr = reinterpret_cast<void *>(From);
+
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ ReservedMemoryLinux::MemMapT MemMap;
+ if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
+ return false;
+
+ MapBase = MemMap.getBase();
+ MapCapacity = MemMap.getCapacity();
+
+ return true;
+}
+
+void ReservedMemoryLinux::releaseImpl() {
+ if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
+ reportUnmapError(getBase(), getCapacity());
+}
+
+ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryLinux::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h
new file mode 100644
index 000000000000..7a89b3bff5ed
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h
@@ -0,0 +1,67 @@
+//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_LINUX_H_
+#define SCUDO_MEM_MAP_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "mem_map_base.h"
+
+namespace scudo {
+
+class MemMapLinux final : public MemMapBase<MemMapLinux> {
+public:
+ constexpr MemMapLinux() = default;
+ MemMapLinux(uptr Base, uptr Capacity)
+ : MapBase(Base), MapCapacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryLinux final
+ : public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
+public:
+ // The following two are the Impls for function in `MemMapBase`.
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+ // These threes are specific to `ReservedMemory`.
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_MEM_MAP_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
new file mode 100644
index 000000000000..1f6983e99404
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
@@ -0,0 +1,335 @@
+//===-- memtag.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEMTAG_H_
+#define SCUDO_MEMTAG_H_
+
+#include "internal_defs.h"
+
+#if SCUDO_CAN_USE_MTE
+#include <sys/auxv.h>
+#include <sys/prctl.h>
+#endif
+
+namespace scudo {
+
+#if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) || \
+ defined(SCUDO_FUZZ)
+
+// We assume that Top-Byte Ignore is enabled if the architecture supports memory
+// tagging. Not all operating systems enable TBI, so we only claim architectural
+// support for memory tagging if the operating system enables TBI.
+// HWASan uses the top byte for its own purpose and Scudo should not touch it.
+#if SCUDO_CAN_USE_MTE && !defined(SCUDO_DISABLE_TBI) && \
+ !__has_feature(hwaddress_sanitizer)
+inline constexpr bool archSupportsMemoryTagging() { return true; }
+#else
+inline constexpr bool archSupportsMemoryTagging() { return false; }
+#endif
+
+inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
+
+inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
+
+inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
+
+#else
+
+inline constexpr bool archSupportsMemoryTagging() { return false; }
+
+inline NORETURN uptr archMemoryTagGranuleSize() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN uptr untagPointer(uptr Ptr) {
+ (void)Ptr;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN uint8_t extractTag(uptr Ptr) {
+ (void)Ptr;
+ UNREACHABLE("memory tagging not supported");
+}
+
+#endif
+
+#if __clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)
+
+#if SCUDO_CAN_USE_MTE
+
+inline bool systemSupportsMemoryTagging() {
+#ifndef HWCAP2_MTE
+#define HWCAP2_MTE (1 << 18)
+#endif
+ return getauxval(AT_HWCAP2) & HWCAP2_MTE;
+}
+
+inline bool systemDetectsMemoryTagFaultsTestOnly() {
+#ifndef PR_SET_TAGGED_ADDR_CTRL
+#define PR_SET_TAGGED_ADDR_CTRL 54
+#endif
+#ifndef PR_GET_TAGGED_ADDR_CTRL
+#define PR_GET_TAGGED_ADDR_CTRL 56
+#endif
+#ifndef PR_TAGGED_ADDR_ENABLE
+#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#endif
+#ifndef PR_MTE_TCF_SHIFT
+#define PR_MTE_TCF_SHIFT 1
+#endif
+#ifndef PR_MTE_TAG_SHIFT
+#define PR_MTE_TAG_SHIFT 3
+#endif
+#ifndef PR_MTE_TCF_NONE
+#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
+#endif
+#ifndef PR_MTE_TCF_SYNC
+#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+#endif
+#ifndef PR_MTE_TCF_MASK
+#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+#endif
+ int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+ if (res == -1)
+ return false;
+ return (static_cast<unsigned long>(res) & PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
+}
+
+inline void enableSystemMemoryTaggingTestOnly() {
+ prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
+ 0, 0, 0);
+}
+
+#else // !SCUDO_CAN_USE_MTE
+
+inline bool systemSupportsMemoryTagging() { return false; }
+
+inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN void enableSystemMemoryTaggingTestOnly() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+#endif // SCUDO_CAN_USE_MTE
+
+class ScopedDisableMemoryTagChecks {
+ uptr PrevTCO;
+
+public:
+ ScopedDisableMemoryTagChecks() {
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ mrs %0, tco
+ msr tco, #1
+ )"
+ : "=r"(PrevTCO));
+ }
+
+ ~ScopedDisableMemoryTagChecks() {
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ msr tco, %0
+ )"
+ :
+ : "r"(PrevTCO));
+ }
+};
+
+inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
+ ExcludeMask |= 1; // Always exclude Tag 0.
+ uptr TaggedPtr;
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
+ )"
+ : [TaggedPtr] "=r"(TaggedPtr)
+ : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
+ return TaggedPtr;
+}
+
+inline uptr addFixedTag(uptr Ptr, uptr Tag) {
+ DCHECK_LT(Tag, 16);
+ DCHECK_EQ(untagPointer(Ptr), Ptr);
+ return Ptr | (Tag << 56);
+}
+
+inline uptr storeTags(uptr Begin, uptr End) {
+ DCHECK_EQ(0, Begin % 16);
+ uptr LineSize, Next, Tmp;
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+
+ // Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
+ // of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
+ // indicates that the DC instructions are unavailable.
+ DCZID .req %[Tmp]
+ mrs DCZID, dczid_el0
+ tbnz DCZID, #4, 3f
+ and DCZID, DCZID, #15
+ mov %[LineSize], #4
+ lsl %[LineSize], %[LineSize], DCZID
+ .unreq DCZID
+
+ // Our main loop doesn't handle the case where we don't need to perform any
+ // DC GZVA operations. If the size of our tagged region is less than
+ // twice the cache line size, bail out to the slow path since it's not
+ // guaranteed that we'll be able to do a DC GZVA.
+ Size .req %[Tmp]
+ sub Size, %[End], %[Cur]
+ cmp Size, %[LineSize], lsl #1
+ b.lt 3f
+ .unreq Size
+
+ LineMask .req %[Tmp]
+ sub LineMask, %[LineSize], #1
+
+ // STZG until the start of the next cache line.
+ orr %[Next], %[Cur], LineMask
+ 1:
+ stzg %[Cur], [%[Cur]], #16
+ cmp %[Cur], %[Next]
+ b.lt 1b
+
+ // DC GZVA cache lines until we have no more full cache lines.
+ bic %[Next], %[End], LineMask
+ .unreq LineMask
+ 2:
+ dc gzva, %[Cur]
+ add %[Cur], %[Cur], %[LineSize]
+ cmp %[Cur], %[Next]
+ b.lt 2b
+
+ // STZG until the end of the tagged region. This loop is also used to handle
+ // slow path cases.
+ 3:
+ cmp %[Cur], %[End]
+ b.ge 4f
+ stzg %[Cur], [%[Cur]], #16
+ b 3b
+
+ 4:
+ )"
+ : [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
+ [Tmp] "=&r"(Tmp)
+ : [End] "r"(End)
+ : "memory");
+ DCHECK_EQ(0, Begin % 16);
+ return Begin;
+}
+
+inline void storeTag(uptr Ptr) {
+ DCHECK_EQ(0, Ptr % 16);
+ __asm__ __volatile__(R"(
+ .arch_extension memtag
+ stg %0, [%0]
+ )"
+ :
+ : "r"(Ptr)
+ : "memory");
+}
+
+inline uptr loadTag(uptr Ptr) {
+ DCHECK_EQ(0, Ptr % 16);
+ uptr TaggedPtr = Ptr;
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ ldg %0, [%0]
+ )"
+ : "+r"(TaggedPtr)
+ :
+ : "memory");
+ return TaggedPtr;
+}
+
+#else
+
+inline NORETURN bool systemSupportsMemoryTagging() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN void enableSystemMemoryTaggingTestOnly() {
+ UNREACHABLE("memory tagging not supported");
+}
+
+struct ScopedDisableMemoryTagChecks {
+ ScopedDisableMemoryTagChecks() {}
+};
+
+inline NORETURN uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
+ (void)Ptr;
+ (void)ExcludeMask;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN uptr addFixedTag(uptr Ptr, uptr Tag) {
+ (void)Ptr;
+ (void)Tag;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN uptr storeTags(uptr Begin, uptr End) {
+ (void)Begin;
+ (void)End;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN void storeTag(uptr Ptr) {
+ (void)Ptr;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline NORETURN uptr loadTag(uptr Ptr) {
+ (void)Ptr;
+ UNREACHABLE("memory tagging not supported");
+}
+
+#endif
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr *TaggedBegin, uptr *TaggedEnd) {
+ *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
+ *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
+}
+#pragma GCC diagnostic pop
+
+inline void *untagPointer(void *Ptr) {
+ return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
+}
+
+inline void *loadTag(void *Ptr) {
+ return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
+}
+
+inline void *addFixedTag(void *Ptr, uptr Tag) {
+ return reinterpret_cast<void *>(
+ addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
+}
+
+template <typename Config>
+inline constexpr bool allocatorSupportsMemoryTagging() {
+ return archSupportsMemoryTagging() && Config::getMaySupportMemoryTagging() &&
+ (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
+}
+
+} // namespace scudo
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
new file mode 100644
index 000000000000..4caa945219b5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
@@ -0,0 +1,97 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MUTEX_H_
+#define SCUDO_MUTEX_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "thread_annotations.h"
+
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
+namespace scudo {
+
+class CAPABILITY("mutex") HybridMutex {
+public:
+ bool tryLock() TRY_ACQUIRE(true);
+ NOINLINE void lock() ACQUIRE() {
+ if (LIKELY(tryLock()))
+ return;
+ // The compiler may try to fully unroll the loop, ending up in a
+ // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+ // is large, ugly and unneeded, a compact loop is better for our purpose
+ // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+ for (u8 I = 0U; I < NumberOfTries; I++) {
+ delayLoop();
+ if (tryLock())
+ return;
+ }
+ lockSlow();
+ }
+ void unlock() RELEASE();
+
+ // TODO(chiahungduan): In general, we may want to assert the owner of lock as
+ // well. Given the current uses of HybridMutex, it's acceptable without
+ // asserting the owner. Re-evaluate this when we have certain scenarios which
+ // requires a more fine-grained lock granularity.
+ ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
+ if (SCUDO_DEBUG)
+ assertHeldImpl();
+ }
+
+private:
+ void delayLoop() {
+ // The value comes from the average time spent in accessing caches (which
+ // are the fastest operations) so that we are unlikely to wait too long for
+ // fast operations.
+ constexpr u32 SpinTimes = 16;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+ }
+
+ void assertHeldImpl();
+
+ // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
+ // secondary allocator have different allocation times.
+ static constexpr u8 NumberOfTries = 32U;
+
+#if SCUDO_LINUX
+ atomic_u32 M = {};
+#elif SCUDO_FUCHSIA
+ sync_mutex_t M = {};
+#endif
+
+ void lockSlow() ACQUIRE();
+};
+
+class SCOPED_CAPABILITY ScopedLock {
+public:
+ explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() RELEASE() { Mutex.unlock(); }
+
+private:
+ HybridMutex &Mutex;
+
+ ScopedLock(const ScopedLock &) = delete;
+ void operator=(const ScopedLock &) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MUTEX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
new file mode 100644
index 000000000000..b20142a41590
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
@@ -0,0 +1,74 @@
+//===-- options.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_OPTIONS_H_
+#define SCUDO_OPTIONS_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "memtag.h"
+
+namespace scudo {
+
+enum class OptionBit {
+ MayReturnNull,
+ FillContents0of2,
+ FillContents1of2,
+ DeallocTypeMismatch,
+ DeleteSizeMismatch,
+ TrackAllocationStacks,
+ UseOddEvenTags,
+ UseMemoryTagging,
+ AddLargeAllocationSlack,
+};
+
+struct Options {
+ u32 Val;
+
+ bool get(OptionBit Opt) const { return Val & (1U << static_cast<u32>(Opt)); }
+
+ FillContentsMode getFillContentsMode() const {
+ return static_cast<FillContentsMode>(
+ (Val >> static_cast<u32>(OptionBit::FillContents0of2)) & 3);
+ }
+};
+
+template <typename Config> bool useMemoryTagging(const Options &Options) {
+ return allocatorSupportsMemoryTagging<Config>() &&
+ Options.get(OptionBit::UseMemoryTagging);
+}
+
+struct AtomicOptions {
+ atomic_u32 Val = {};
+
+ Options load() const { return Options{atomic_load_relaxed(&Val)}; }
+
+ void clear(OptionBit Opt) {
+ atomic_fetch_and(&Val, ~(1U << static_cast<u32>(Opt)),
+ memory_order_relaxed);
+ }
+
+ void set(OptionBit Opt) {
+ atomic_fetch_or(&Val, 1U << static_cast<u32>(Opt), memory_order_relaxed);
+ }
+
+ void setFillContentsMode(FillContentsMode FillContents) {
+ u32 Opts = atomic_load_relaxed(&Val), NewOpts;
+ do {
+ NewOpts = Opts;
+ NewOpts &= ~(3U << static_cast<u32>(OptionBit::FillContents0of2));
+ NewOpts |= static_cast<u32>(FillContents)
+ << static_cast<u32>(OptionBit::FillContents0of2);
+ } while (!atomic_compare_exchange_strong(&Val, &Opts, NewOpts,
+ memory_order_relaxed));
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_OPTIONS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
new file mode 100644
index 000000000000..5af1275e32d2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
@@ -0,0 +1,94 @@
+//===-- platform.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+// Transitive includes of stdint.h specify some of the defines checked below.
+#include <stdint.h>
+
+#if defined(__linux__) && !defined(__TRUSTY__)
+#define SCUDO_LINUX 1
+#else
+#define SCUDO_LINUX 0
+#endif
+
+// See https://android.googlesource.com/platform/bionic/+/master/docs/defines.md
+#if defined(__BIONIC__)
+#define SCUDO_ANDROID 1
+#else
+#define SCUDO_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+#define SCUDO_FUCHSIA 1
+#else
+#define SCUDO_FUCHSIA 0
+#endif
+
+#if defined(__TRUSTY__)
+#define SCUDO_TRUSTY 1
+#else
+#define SCUDO_TRUSTY 0
+#endif
+
+#if defined(__riscv) && (__riscv_xlen == 64)
+#define SCUDO_RISCV64 1
+#else
+#define SCUDO_RISCV64 0
+#endif
+
+#if defined(__LP64__)
+#define SCUDO_WORDSIZE 64U
+#else
+#define SCUDO_WORDSIZE 32U
+#endif
+
+#if SCUDO_WORDSIZE == 64U
+#define FIRST_32_SECOND_64(a, b) (b)
+#else
+#define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#ifndef SCUDO_CAN_USE_PRIMARY64
+#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
+#endif
+
+#ifndef SCUDO_CAN_USE_MTE
+#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
+#endif
+
+#ifndef SCUDO_ENABLE_HOOKS
+#define SCUDO_ENABLE_HOOKS 0
+#endif
+
+#ifndef SCUDO_MIN_ALIGNMENT_LOG
+// We force malloc-type functions to be aligned to std::max_align_t, but there
+// is no reason why the minimum alignment for all other functions can't be 8
+// bytes. Except obviously for applications making incorrect assumptions.
+// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
+#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
+#endif
+
+#if defined(__aarch64__)
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+#else
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define SCUDO_CACHE_LINE_SIZE 128
+#else
+#define SCUDO_CACHE_LINE_SIZE 64
+#endif
+
+#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+
+#endif // SCUDO_PLATFORM_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
new file mode 100644
index 000000000000..ebfb8dfe0a31
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
@@ -0,0 +1,1177 @@
+//===-- primary32.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY32_H_
+#define SCUDO_PRIMARY32_H_
+
+#include "allocator_common.h"
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "options.h"
+#include "release.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
+//
+// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
+// boundary, and keeps a bytemap of the mappable address space to track the size
+// class they are associated with.
+//
+// Mapped regions are split into equally sized Blocks according to the size
+// class they belong to, and the associated pointers are shuffled to prevent any
+// predictable address pattern (the predictability increases with the block
+// size).
+//
+// Regions for size class 0 are special and used to hold TransferBatches, which
+// allow to transfer arrays of pointers from the global size class freelist to
+// the thread specific freelist for said class, and back.
+//
+// Memory used by this allocator is never unmapped but can be partially
+// reclaimed if the platform allows for it.
+
+template <typename Config> class SizeClassAllocator32 {
+public:
+ typedef typename Config::CompactPtrT CompactPtrT;
+ typedef typename Config::SizeClassMap SizeClassMap;
+ static const uptr GroupSizeLog = Config::getGroupSizeLog();
+ // The bytemap can only track UINT8_MAX - 1 classes.
+ static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
+ // Regions should be large enough to hold the largest Block.
+ static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
+ "");
+ typedef SizeClassAllocator32<Config> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatchT)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
+ if (SCUDO_FUCHSIA)
+ reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+ if (SCUDO_TRUSTY)
+ reportError("SizeClassAllocator32 is not supported on Trusty");
+
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ PossibleRegions.init();
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(
+ Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ Sci->RandState = getRandomU32(&Seed);
+ // Sci->MaxRegionIndex is already initialized to 0.
+ Sci->MinRegionIndex = NumRegions;
+ Sci->ReleaseInfo.LastReleaseAtNs = Time;
+ }
+
+ // The default value in the primary config has the higher priority.
+ if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
+ ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
+ }
+
+ void unmapTestOnly() {
+ {
+ ScopedLock L(RegionsStashMutex);
+ while (NumberOfStashedRegions > 0) {
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ }
+ }
+
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ *Sci = {};
+ }
+
+ ScopedLock L(ByteMapMutex);
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+ }
+
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ const uptr BlockSize = getSizeByClassId(I);
+ DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
+ DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
+ }
+
+ SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Sci->AllocatedUser / BlockSize);
+ const uptr BlocksInUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
+ CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
+ return static_cast<CompactPtrT>(Ptr);
+ }
+
+ void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
+ return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
+ }
+
+ uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
+ const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
+ return CompactPtr & ~Mask;
+ }
+
+ uptr decompactGroupBase(uptr CompactPtrGroupBase) {
+ return CompactPtrGroupBase;
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
+ }
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
+ }
+
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ const u16 MaxBlockCount) {
+ DCHECK_LT(ClassId, NumClasses);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+
+ u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ if (UNLIKELY(PopCount == 0)) {
+ if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
+ return 0U;
+ PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
+ }
+
+ return PopCount;
+ }
+
+ // Push the array of free blocks to the designated batch group.
+ void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(Size, 0);
+
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ if (ClassId == SizeClassMap::BatchClassId) {
+ ScopedLock L(Sci->Mutex);
+ pushBatchClassBlocks(Sci, Array, Size);
+ return;
+ }
+
+ // TODO(chiahungduan): Consider not doing grouping if the group size is not
+ // greater than the block size with a certain scale.
+
+ // Sort the blocks so that blocks belonging to the same group can be pushed
+ // together.
+ bool SameGroup = true;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
+ SameGroup = false;
+ CompactPtrT Cur = Array[I];
+ u32 J = I;
+ while (J > 0 &&
+ compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
+ Array[J] = Array[J - 1];
+ --J;
+ }
+ Array[J] = Cur;
+ }
+
+ ScopedLock L(Sci->Mutex);
+ pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ // The BatchClassId must be locked last since other classes can use it.
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
+ if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
+ continue;
+ getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
+ }
+ getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
+ RegionsStashMutex.lock();
+ ByteMapMutex.lock();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ ByteMapMutex.unlock();
+ RegionsStashMutex.unlock();
+ getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ getSizeClassInfo(I)->Mutex.unlock();
+ }
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) {
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator32. We may consider locking each region on demand
+ // only.
+ Sci->Mutex.assertHeld();
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ }
+
+ // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
+ ByteMapMutex.assertHeld();
+
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
+ if (PossibleRegions[I] &&
+ (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
+ const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
+ const uptr From = I * RegionSize;
+ const uptr To = From + (RegionSize / BlockSize) * BlockSize;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+ }
+
+ void getStats(ScopedString *Str) {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ TotalMapped += Sci->AllocatedUser;
+ PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Sci->FreeListInfo.PushedBlocks;
+ }
+ Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getStats(Str, I, Sci);
+ }
+ }
+
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getSizeClassFragmentationInfo(Sci, I, Str);
+ }
+ }
+
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+ Config::getMinReleaseToOsIntervalMs());
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
+ }
+ // Not supported by the Primary, but not an error either.
+ return true;
+ }
+
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ // TODO: Once we have separate locks like primary64, we may consider using
+ // tryLock() as well.
+ ScopedLock L(Sci->Mutex);
+ return releaseToOSMaybe(Sci, ClassId, ReleaseType);
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
+ uptr TotalReleasedBytes = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
+ }
+ return TotalReleasedBytes;
+ }
+
+ const char *getRegionInfoArrayAddress() const { return nullptr; }
+ static uptr getRegionInfoArraySize() { return 0; }
+
+ static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
+ UNUSED uptr Ptr) {
+ return {};
+ }
+
+ AtomicOptions Options;
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
+ static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
+ Config::getRegionSizeLog();
+ static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
+ typedef FlatByteMap<NumRegions> ByteMap;
+
+ struct ReleaseToOsInfo {
+ uptr BytesInFreeListAtLastCheckpoint;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroupT> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
+ struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
+ HybridMutex Mutex;
+ BlocksInfo FreeListInfo GUARDED_BY(Mutex);
+ uptr CurrentRegion GUARDED_BY(Mutex);
+ uptr CurrentRegionAllocated GUARDED_BY(Mutex);
+ u32 RandState;
+ uptr AllocatedUser GUARDED_BY(Mutex);
+ // Lowest & highest region index allocated for this size class, to avoid
+ // looping through the whole NumRegions.
+ uptr MinRegionIndex GUARDED_BY(Mutex);
+ uptr MaxRegionIndex GUARDED_BY(Mutex);
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
+ };
+ static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
+
+ uptr computeRegionId(uptr Mem) {
+ const uptr Id = Mem >> Config::getRegionSizeLog();
+ CHECK_LT(Id, NumRegions);
+ return Id;
+ }
+
+ uptr allocateRegionSlow() {
+ uptr MapSize = 2 * RegionSize;
+ const uptr MapBase = reinterpret_cast<uptr>(
+ map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
+ if (!MapBase)
+ return 0;
+ const uptr MapEnd = MapBase + MapSize;
+ uptr Region = MapBase;
+ if (isAligned(Region, RegionSize)) {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions < MaxStashedRegions)
+ RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
+ else
+ MapSize = RegionSize;
+ } else {
+ Region = roundUp(MapBase, RegionSize);
+ unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
+ MapSize = RegionSize;
+ }
+ const uptr End = Region + MapSize;
+ if (End != MapEnd)
+ unmap(reinterpret_cast<void *>(End), MapEnd - End);
+
+ DCHECK_EQ(Region % RegionSize, 0U);
+ static_assert(Config::getRegionSizeLog() == GroupSizeLog,
+ "Memory group should be the same size as Region");
+
+ return Region;
+ }
+
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
+ DCHECK_LT(ClassId, NumClasses);
+ uptr Region = 0;
+ {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions > 0)
+ Region = RegionsStash[--NumberOfStashedRegions];
+ }
+ if (!Region)
+ Region = allocateRegionSlow();
+ if (LIKELY(Region)) {
+ // Sci->Mutex is held by the caller, updating the Min/Max is safe.
+ const uptr RegionIndex = computeRegionId(Region);
+ if (RegionIndex < Sci->MinRegionIndex)
+ Sci->MinRegionIndex = RegionIndex;
+ if (RegionIndex > Sci->MaxRegionIndex)
+ Sci->MaxRegionIndex = RegionIndex;
+ ScopedLock L(ByteMapMutex);
+ PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
+ }
+ return Region;
+ }
+
+ SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &SizeClassInfoArray[ClassId];
+ }
+
+ void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // Each popBlocks() request returns the entire TransferBatch. Returning
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroupT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Sci->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatchT *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ }
+ // Push the blocks to their batch group. The layout will be like,
+ //
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
+ //
+ // Each BlockGroup(BG) will associate with unique group id and the free blocks
+ // are managed by a list of TransferBatch(TB). To reduce the time of inserting
+ // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
+ // that we can get better performance of maintaining sorted property.
+ // Use `SameGroup=true` to indicate that all blocks in the array are from the
+ // same group then we will skip checking the group id of each block.
+ //
+ // The region mutex needs to be held while calling this method.
+ void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
+ DCHECK_GT(Size, 0U);
+
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
+ BG->Batches.clear();
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ TB->clear();
+
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
+ BG->Batches.push_front(TB);
+ BG->PushedBlocks = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
+
+ return BG;
+ };
+
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ CurBatch->clear();
+ Batches.push_front(CurBatch);
+ UnusedSlots = BG->MaxCachedPerBatch;
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ };
+
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
+
+ // In the following, `Cur` always points to the BatchGroup for blocks that
+ // will be pushed next. `Prev` is the element right before `Cur`.
+ BatchGroupT *Prev = nullptr;
+
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[0]));
+ if (Prev == nullptr)
+ Sci->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ // All the blocks are from the same group, just push without checking group
+ // id.
+ if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
+
+ InsertBlocks(Cur, Array, Size);
+ return;
+ }
+
+ // The blocks are sorted by group id. Determine the segment of group and
+ // push them to their group together.
+ u32 Count = 1;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
+ DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
+ InsertBlocks(Cur, Array + I - Count, Count);
+
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[I]));
+ DCHECK_NE(Prev, nullptr);
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ Count = 1;
+ } else {
+ ++Count;
+ }
+ }
+
+ InsertBlocks(Cur, Array + Size - Count, Count);
+ }
+
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
+ REQUIRES(Sci->Mutex) {
+ if (Sci->FreeListInfo.BlockList.empty())
+ return 0U;
+
+ SinglyLinkedList<TransferBatchT> &Batches =
+ Sci->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
+ Sci->FreeListInfo.PoppedBlocks += 1;
+ return 1U;
+ }
+
+ // So far, instead of always filling the blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent on the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
+ TransferBatchT *B = Batches.front();
+ DCHECK_NE(B, nullptr);
+ DCHECK_GT(B->getCount(), 0U);
+
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `Mutex`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
+ }
+
+ Sci->FreeListInfo.PoppedBlocks += PopCount;
+ return PopCount;
+ }
+
+ NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
+ uptr Region;
+ uptr Offset;
+ // If the size-class currently has a region associated to it, use it. The
+ // newly created blocks will be located after the currently allocated memory
+ // for that region (up to RegionSize). Otherwise, create a new region, where
+ // the new blocks will be carved from the beginning.
+ if (Sci->CurrentRegion) {
+ Region = Sci->CurrentRegion;
+ DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
+ Offset = Sci->CurrentRegionAllocated;
+ } else {
+ DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
+ Region = allocateRegion(Sci, ClassId);
+ if (UNLIKELY(!Region))
+ return false;
+ C->getStats().add(StatMapped, RegionSize);
+ Sci->CurrentRegion = Region;
+ Offset = 0;
+ }
+
+ const uptr Size = getSizeByClassId(ClassId);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
+ DCHECK_GT(MaxCount, 0U);
+ // The maximum number of blocks we should carve in the region is dictated
+ // by the maximum number of batches we want to fill, and the amount of
+ // memory left in the current region (we use the lowest of the two). This
+ // will not be 0 as we ensure that a region can at least hold one block (via
+ // static_assert and at the end of this function).
+ const u32 NumberOfBlocks =
+ Min(MaxNumBatches * MaxCount,
+ static_cast<u32>((RegionSize - Offset) / Size));
+ DCHECK_GT(NumberOfBlocks, 0U);
+
+ constexpr u32 ShuffleArraySize =
+ MaxNumBatches * TransferBatchT::MaxNumCached;
+ // Fill the transfer batches and put them in the size-class freelist. We
+ // need to randomize the blocks for security purposes, so we first fill a
+ // local array that we then shuffle before populating the batches.
+ CompactPtrT ShuffleArray[ShuffleArraySize];
+ DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
+
+ uptr P = Region + Offset;
+ for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
+ ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroupBase(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
+ }
+
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeList` is the internal interface so we should set
+ // the values back to avoid incorrectly setting the stats.
+ Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
+
+ const uptr AllocatedUser = Size * NumberOfBlocks;
+ C->getStats().add(StatFree, AllocatedUser);
+ DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
+ // If there is not enough room in the region currently associated to fit
+ // more blocks, we deassociate the region by resetting CurrentRegion and
+ // CurrentRegionAllocated. Otherwise, update the allocated amount.
+ if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
+ Sci->CurrentRegion = 0;
+ Sci->CurrentRegionAllocated = 0;
+ } else {
+ Sci->CurrentRegionAllocated += AllocatedUser;
+ }
+ Sci->AllocatedUser += AllocatedUser;
+
+ return true;
+ }
+
+ void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
+ if (Sci->AllocatedUser == 0)
+ return;
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
+ uptr PushedBytesDelta = 0;
+ if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
+ Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
+ "latest pushed bytes: %6zuK\n",
+ ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+ Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
+ InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
+ Sci->ReleaseInfo.LastReleasedBytes >> 10,
+ PushedBytesDelta >> 10);
+ }
+
+ void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
+ ScopedString *Str) REQUIRES(Sci->Mutex) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+
+ FragmentationRecorder Recorder;
+ if (!Sci->FreeListInfo.BlockList.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
+ ReleaseToOS::ForceAll);
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ }
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ uptr AllocatedPagesCount = 0;
+ if (TotalBlocks != 0U) {
+ for (uptr I = 0; I < NumberOfRegions; ++I) {
+ if (SkipRegion(I))
+ continue;
+ AllocatedPagesCount += RegionSize / PageSize;
+ }
+
+ DCHECK_NE(AllocatedPagesCount, 0U);
+ }
+
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
+ &Fractional);
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
+ }
+
+ NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Sci->Mutex) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
+ const uptr BytesInFreeList =
+ Sci->AllocatedUser -
+ (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
+ BlockSize;
+
+ if (UNLIKELY(BytesInFreeList == 0))
+ return 0;
+
+ // ====================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ====================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
+ }
+
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ DCHECK_NE(Last, 0U);
+ DCHECK_LE(First, Last);
+ uptr TotalReleasedBytes = 0;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+
+ // ==================================================================== //
+ // 2. Mark the free blocks and we can tell which pages are in-use by
+ // querying `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
+ NumberOfRegions, ReleaseType);
+ if (!Context.hasBlockMarked())
+ return 0;
+
+ // ==================================================================== //
+ // 3. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ ReleaseRecorder Recorder(Base);
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
+ }
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+
+ return TotalReleasedBytes;
+ }
+
+ bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
+ const uptr PageSize = getPageSizeCached();
+
+ if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ const uptr PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize)
+ return false;
+
+ // Releasing smaller blocks is expensive, so we want to make sure that a
+ // significant amount of bytes are free, and that there has been a good
+ // amount of batches pushed to the freelist before attempting to release.
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
+ if (PushedBytesDelta < Sci->AllocatedUser / 16U)
+ return false;
+
+ if (ReleaseType == ReleaseToOS::Normal) {
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ if (IntervalMs < 0)
+ return false;
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Sci->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return false;
+ }
+ }
+ } // if (ReleaseType == ReleaseToOS::Normal)
+
+ return true;
+ }
+
+ PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
+ const uptr BlockSize, const uptr Base,
+ const uptr NumberOfRegions,
+ ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr CurGroupBase =
+ compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
+
+ PageReleaseContext Context(BlockSize, NumberOfRegions,
+ /*ReleaseSize=*/RegionSize);
+
+ auto DecompactPtr = [](CompactPtrT CompactPtr) {
+ return reinterpret_cast<uptr>(CompactPtr);
+ };
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
+ // The `GroupSize` may not be divided by `BlockSize`, which means there is
+ // an unused space at the end of Region. Exclude that space to avoid
+ // unused page map entry.
+ uptr AllocatedGroupSize = GroupBase == CurGroupBase
+ ? Sci->CurrentRegionAllocated
+ : roundDownSlow(GroupSize, BlockSize);
+ if (AllocatedGroupSize == 0)
+ continue;
+
+ // TransferBatches are pushed in front of BG.Batches. The first one may
+ // not have all caches used.
+ const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+ BG.Batches.front()->getCount();
+ const uptr BytesInBG = NumBlocks * BlockSize;
+
+ if (ReleaseType != ReleaseToOS::ForceAll) {
+ if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+ continue;
+ }
+
+ const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize)
+ continue;
+
+ // Given the randomness property, we try to release the pages only if
+ // the bytes used by free blocks exceed certain proportion of allocated
+ // spaces.
+ if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
+ (100U - 1U - BlockSize / 16U)) {
+ continue;
+ }
+ }
+
+ // TODO: Consider updating this after page release if `ReleaseRecorder`
+ // can tell the released bytes in each group.
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+
+ const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ const uptr RegionIndex = (GroupBase - Base) / RegionSize;
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches)
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
+
+ const uptr To = GroupBase + AllocatedGroupSize;
+ Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
+ AllocatedGroupSize);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
+ RegionIndex, AllocatedGroupSize,
+ /*MayContainLastBlockInRegion=*/true);
+ }
+
+ // We may not be able to do the page release In a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.hasBlockMarked()))
+ break;
+ }
+
+ return Context;
+ }
+
+ SizeClassInfo SizeClassInfoArray[NumClasses] = {};
+
+ HybridMutex ByteMapMutex;
+ // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
+ ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ // Unless several threads request regions simultaneously from different size
+ // classes, the stash rarely contains more than 1 entry.
+ static constexpr uptr MaxStashedRegions = 4;
+ HybridMutex RegionsStashMutex;
+ uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
+ uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY32_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
new file mode 100644
index 000000000000..8a583bacb4a9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
@@ -0,0 +1,1737 @@
+//===-- primary64.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY64_H_
+#define SCUDO_PRIMARY64_H_
+
+#include "allocator_common.h"
+#include "bytemap.h"
+#include "common.h"
+#include "condition_variable.h"
+#include "list.h"
+#include "local_cache.h"
+#include "mem_map.h"
+#include "memtag.h"
+#include "options.h"
+#include "release.h"
+#include "stats.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
+//
+// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
+// Regions, specific to each size class. Note that the base of that mapping is
+// random (based to the platform specific map() capabilities). If
+// PrimaryEnableRandomOffset is set, each Region actually starts at a random
+// offset from its base.
+//
+// Regions are mapped incrementally on demand to fulfill allocation requests,
+// those mappings being split into equally sized Blocks based on the size class
+// they belong to. The Blocks created are shuffled to prevent predictable
+// address patterns (the predictability increases with the size of the Blocks).
+//
+// The 1st Region (for size class 0) holds the TransferBatches. This is a
+// structure used to transfer arrays of available pointers from the class size
+// freelist to the thread specific freelist, and back.
+//
+// The memory used by this allocator is never unmapped, but can be partially
+// released if the platform allows for it.
+
+template <typename Config> class SizeClassAllocator64 {
+public:
+ typedef typename Config::CompactPtrT CompactPtrT;
+ typedef typename Config::SizeClassMap SizeClassMap;
+ typedef typename Config::ConditionVariableT ConditionVariableT;
+ static const uptr CompactPtrScale = Config::getCompactPtrScale();
+ static const uptr RegionSizeLog = Config::getRegionSizeLog();
+ static const uptr GroupSizeLog = Config::getGroupSizeLog();
+ static_assert(RegionSizeLog >= GroupSizeLog,
+ "Group size shouldn't be greater than the region size");
+ static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
+ typedef SizeClassAllocator64<Config> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ static bool conditionVariableEnabled() {
+ return Config::hasConditionVariableT();
+ }
+
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr PagesInGroup = GroupSize / PageSize;
+ const uptr MinSizeClass = getSizeByClassId(1);
+ // When trying to release pages back to memory, visiting smaller size
+ // classes is expensive. Therefore, we only try to release smaller size
+ // classes when the amount of free blocks goes over a certain threshold (See
+ // the comment in releaseToOSMaybe() for more details). For example, for
+ // size class 32, we only do the release when the size of free blocks is
+ // greater than 97% of pages in a group. However, this may introduce another
+ // issue that if the number of free blocks is bouncing between 97% ~ 100%.
+ // Which means we may try many page releases but only release very few of
+ // them (less than 3% in a group). Even though we have
+ // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
+ // calls but it will be better to have another guard to mitigate this issue.
+ //
+ // Here we add another constraint on the minimum size requirement. The
+ // constraint is determined by the size of in-use blocks in the minimal size
+ // class. Take size class 32 as an example,
+ //
+ // +- one memory group -+
+ // +----------------------+------+
+ // | 97% of free blocks | |
+ // +----------------------+------+
+ // \ /
+ // 3% in-use blocks
+ //
+ // * The release size threshold is 97%.
+ //
+ // The 3% size in a group is about 7 pages. For two consecutive
+ // releaseToOSMaybe(), we require the difference between `PushedBlocks`
+ // should be greater than 7 pages. This mitigates the page releasing
+ // thrashing which is caused by memory usage bouncing around the threshold.
+ // The smallest size class takes longest time to do the page release so we
+ // use its size of in-use blocks as a heuristic.
+ SmallerBlockReleasePageDelta =
+ PagesInGroup * (1 + MinSizeClass / 16U) / 100;
+
+ u32 Seed;
+ const u64 Time = getMonotonicTimeFast();
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
+ Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12));
+
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->RandState = getRandomU32(&Seed);
+
+ if (Config::getEnableContiguousRegions()) {
+ ReservedMemoryT ReservedMemory = {};
+ // Reserve the space required for the Primary.
+ CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
+ "scudo:primary_reserve"));
+ const uptr PrimaryBase = ReservedMemory.getBase();
+
+ for (uptr I = 0; I < NumClasses; I++) {
+ MemMapT RegionMemMap = ReservedMemory.dispatch(
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
+ RegionInfo *Region = getRegionInfo(I);
+
+ initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset());
+ }
+ shuffle(RegionInfoArray, NumClasses, &Seed);
+ }
+
+ // The binding should be done after region shuffling so that it won't bind
+ // the FLLock from the wrong region.
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
+
+ // The default value in the primary config has the higher priority.
+ if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
+ ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
+ }
+
+ void unmapTestOnly() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ {
+ ScopedLock ML(Region->MMLock);
+ MemMapT MemMap = Region->MemMapInfo.MemMap;
+ if (MemMap.isAllocated())
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
+ *Region = {};
+ }
+ }
+
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(I);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
+ Region->FreeListInfo.PoppedBlocks);
+ }
+
+ RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
+ const uptr BlocksInUse =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ const u16 MaxBlockCount) {
+ DCHECK_LT(ClassId, NumClasses);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ u16 PopCount = 0;
+
+ {
+ ScopedLock L(Region->FLLock);
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
+ }
+
+ bool ReportRegionExhausted = false;
+
+ if (conditionVariableEnabled()) {
+ PopCount = popBlocksWithCV(C, ClassId, Region, ToArray, MaxBlockCount,
+ ReportRegionExhausted);
+ } else {
+ while (true) {
+ // When two threads compete for `Region->MMLock`, we only want one of
+ // them to call populateFreeListAndPopBatch(). To avoid both of them
+ // doing that, always check the freelist before mapping new pages.
+ ScopedLock ML(Region->MMLock);
+ {
+ ScopedLock FL(Region->FLLock);
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ return PopCount;
+ }
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted) {
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
+ }
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+ break;
+ }
+ }
+
+ if (UNLIKELY(ReportRegionExhausted)) {
+ Printf("Can't populate more pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
+
+ // Theoretically, BatchClass shouldn't be used up. Abort immediately when
+ // it happens.
+ if (ClassId == SizeClassMap::BatchClassId)
+ reportOutOfBatchClass();
+ }
+
+ return PopCount;
+ }
+
+ // Push the array of free blocks to the designated batch group.
+ void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(Size, 0);
+
+ RegionInfo *Region = getRegionInfo(ClassId);
+ if (ClassId == SizeClassMap::BatchClassId) {
+ ScopedLock L(Region->FLLock);
+ pushBatchClassBlocks(Region, Array, Size);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ return;
+ }
+
+ // TODO(chiahungduan): Consider not doing grouping if the group size is not
+ // greater than the block size with a certain scale.
+
+ bool SameGroup = true;
+ if (GroupSizeLog < RegionSizeLog) {
+ // Sort the blocks so that blocks belonging to the same group can be
+ // pushed together.
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
+ SameGroup = false;
+ CompactPtrT Cur = Array[I];
+ u32 J = I;
+ while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
+ Array[J] = Array[J - 1];
+ --J;
+ }
+ Array[J] = Cur;
+ }
+ }
+
+ {
+ ScopedLock L(Region->FLLock);
+ pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ // The BatchClassId must be locked last since other classes can use it.
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
+ if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
+ continue;
+ getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
+ getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
+ }
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ getRegionInfo(I)->FLLock.unlock();
+ getRegionInfo(I)->MMLock.unlock();
+ }
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator64. We may consider locking each region on demand
+ // only.
+ Region->FLLock.assertHeld();
+ Region->MMLock.assertHeld();
+ const uptr BlockSize = getSizeByClassId(I);
+ const uptr From = Region->RegionBeg;
+ const uptr To = From + Region->MemMapInfo.AllocatedUser;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void getStats(ScopedString *Str) {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ {
+ ScopedLock L(Region->MMLock);
+ TotalMapped += Region->MemMapInfo.MappedUser;
+ }
+ {
+ ScopedLock L(Region->FLLock);
+ PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Region->FreeListInfo.PushedBlocks;
+ }
+ }
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
+ "allocations; remains %zu; ReleaseToOsIntervalMs = %d\n",
+ TotalMapped >> 20, 0U, PoppedBlocks,
+ PoppedBlocks - PushedBlocks, IntervalMs >= 0 ? IntervalMs : -1);
+
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L1(Region->MMLock);
+ ScopedLock L2(Region->FLLock);
+ getStats(Str, I, Region);
+ }
+ }
+
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->MMLock);
+ getRegionFragmentationInfo(Region, I, Str);
+ }
+ }
+
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+ Config::getMinReleaseToOsIntervalMs());
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
+ }
+ // Not supported by the Primary, but not an error either.
+ return true;
+ }
+
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ // Note that the tryLock() may fail spuriously, given that it should rarely
+ // happen and page releasing is fine to skip, we don't take certain
+ // approaches to ensure one page release is done.
+ if (Region->MMLock.tryLock()) {
+ uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
+ Region->MMLock.unlock();
+ return BytesReleased;
+ }
+ return 0;
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
+ uptr TotalReleasedBytes = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->MMLock);
+ TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
+ }
+ return TotalReleasedBytes;
+ }
+
+ const char *getRegionInfoArrayAddress() const {
+ return reinterpret_cast<const char *>(RegionInfoArray);
+ }
+
+ static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
+
+ uptr getCompactPtrBaseByClassId(uptr ClassId) {
+ return getRegionInfo(ClassId)->RegionBeg;
+ }
+
+ CompactPtrT compactPtr(uptr ClassId, uptr Ptr) {
+ DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
+ return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr);
+ }
+
+ void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) {
+ DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
+ return reinterpret_cast<void *>(
+ decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
+ }
+
+ static BlockInfo findNearestBlock(const char *RegionInfoData,
+ uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
+ const RegionInfo *RegionInfoArray =
+ reinterpret_cast<const RegionInfo *>(RegionInfoData);
+
+ uptr ClassId;
+ uptr MinDistance = -1UL;
+ for (uptr I = 0; I != NumClasses; ++I) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ uptr Begin = RegionInfoArray[I].RegionBeg;
+ // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock.
+ // However, the RegionInfoData is passed with const qualifier and lock the
+ // mutex requires modifying RegionInfoData, which means we need to remove
+ // the const qualifier. This may lead to another undefined behavior (The
+ // first one is accessing `AllocatedUser` without locking. It's better to
+ // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
+ uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser;
+ if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
+ continue;
+ uptr RegionDistance;
+ if (Begin <= Ptr) {
+ if (Ptr < End)
+ RegionDistance = 0;
+ else
+ RegionDistance = Ptr - End;
+ } else {
+ RegionDistance = Begin - Ptr;
+ }
+
+ if (RegionDistance < MinDistance) {
+ MinDistance = RegionDistance;
+ ClassId = I;
+ }
+ }
+
+ BlockInfo B = {};
+ if (MinDistance <= 8192) {
+ B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
+ B.RegionEnd =
+ B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser;
+ B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
+ B.BlockBegin =
+ B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
+ sptr(B.BlockSize));
+ while (B.BlockBegin < B.RegionBegin)
+ B.BlockBegin += B.BlockSize;
+ while (B.RegionEnd < B.BlockBegin + B.BlockSize)
+ B.BlockBegin -= B.BlockSize;
+ }
+ return B;
+ }
+
+ AtomicOptions Options;
+
+private:
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+
+ static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
+ // Fill at most this number of batches from the newly map'd memory.
+ static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
+
+ struct ReleaseToOsInfo {
+ uptr BytesInFreeListAtLastCheckpoint;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroupT> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
+ struct PagesInfo {
+ MemMapT MemMap = {};
+ // Bytes mapped for user memory.
+ uptr MappedUser = 0;
+ // Bytes allocated for user memory.
+ uptr AllocatedUser = 0;
+ };
+
+ struct UnpaddedRegionInfo {
+ // Mutex for operations on freelist
+ HybridMutex FLLock;
+ ConditionVariableT FLLockCV GUARDED_BY(FLLock);
+ // Mutex for memmap operations
+ HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
+ // `RegionBeg` is initialized before thread creation and won't be changed.
+ uptr RegionBeg = 0;
+ u32 RandState GUARDED_BY(MMLock) = 0;
+ BlocksInfo FreeListInfo GUARDED_BY(FLLock);
+ PagesInfo MemMapInfo GUARDED_BY(MMLock);
+ // The minimum size of pushed blocks to trigger page release.
+ uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
+ bool Exhausted GUARDED_BY(MMLock) = false;
+ bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
+ };
+ struct RegionInfo : UnpaddedRegionInfo {
+ char Padding[SCUDO_CACHE_LINE_SIZE -
+ (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
+ };
+ static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
+
+ RegionInfo *getRegionInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &RegionInfoArray[ClassId];
+ }
+
+ uptr getRegionBaseByClassId(uptr ClassId) {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ Region->MMLock.assertHeld();
+
+ if (!Config::getEnableContiguousRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ return 0U;
+ }
+ return Region->MemMapInfo.MemMap.getBase();
+ }
+
+ static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
+ return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
+ }
+
+ static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) {
+ return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
+ }
+
+ static uptr compactPtrGroup(CompactPtrT CompactPtr) {
+ const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
+ return static_cast<uptr>(CompactPtr) & ~Mask;
+ }
+ static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
+ DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
+ return Base + (CompactPtrGroupBase << CompactPtrScale);
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
+ }
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
+ }
+
+ ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId,
+ MemMapT MemMap, bool EnableRandomOffset)
+ REQUIRES(Region->MMLock) {
+ DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
+ DCHECK(MemMap.isAllocated());
+
+ const uptr PageSize = getPageSizeCached();
+
+ Region->MemMapInfo.MemMap = MemMap;
+
+ Region->RegionBeg = MemMap.getBase();
+ if (EnableRandomOffset) {
+ Region->RegionBeg +=
+ (getRandomModN(&Region->RandState, 16) + 1) * PageSize;
+ }
+
+ // Releasing small blocks is expensive, set a higher threshold to avoid
+ // frequent page releases.
+ if (isSmallBlock(getSizeByClassId(ClassId)))
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
+ else
+ Region->TryReleaseThreshold = PageSize;
+ }
+
+ void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
+ REQUIRES(Region->FLLock) {
+ DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // Each popBlocks() request returns the entire TransferBatch. Returning
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroupT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Region->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatchT *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ }
+
+ // Push the blocks to their batch group. The layout will be like,
+ //
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
+ //
+ // Each BlockGroup(BG) will associate with unique group id and the free blocks
+ // are managed by a list of TransferBatch(TB). To reduce the time of inserting
+ // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
+ // that we can get better performance of maintaining sorted property.
+ // Use `SameGroup=true` to indicate that all blocks in the array are from the
+ // same group then we will skip checking the group id of each block.
+ void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Region->FLLock) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
+ DCHECK_GT(Size, 0U);
+
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
+ BG->Batches.clear();
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ TB->clear();
+
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
+ BG->Batches.push_front(TB);
+ BG->PushedBlocks = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
+
+ return BG;
+ };
+
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ CurBatch->clear();
+ Batches.push_front(CurBatch);
+ UnusedSlots = BG->MaxCachedPerBatch;
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ };
+
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
+
+ // In the following, `Cur` always points to the BatchGroup for blocks that
+ // will be pushed next. `Prev` is the element right before `Cur`.
+ BatchGroupT *Prev = nullptr;
+
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroup(Array[0]));
+ if (Prev == nullptr)
+ Region->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ // All the blocks are from the same group, just push without checking group
+ // id.
+ if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
+
+ InsertBlocks(Cur, Array, Size);
+ return;
+ }
+
+ // The blocks are sorted by group id. Determine the segment of group and
+ // push them to their group together.
+ u32 Count = 1;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
+ DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
+ InsertBlocks(Cur, Array + I - Count, Count);
+
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroup(Array[I]));
+ DCHECK_NE(Prev, nullptr);
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ Count = 1;
+ } else {
+ ++Count;
+ }
+ }
+
+ InsertBlocks(Cur, Array + Size - Count, Count);
+ }
+
+ u16 popBlocksWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount,
+ bool &ReportRegionExhausted) {
+ u16 PopCount = 0;
+
+ while (true) {
+ // We only expect one thread doing the freelist refillment and other
+ // threads will be waiting for either the completion of the
+ // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
+ // threads.
+ bool PopulateFreeList = false;
+ {
+ ScopedLock FL(Region->FLLock);
+ if (!Region->isPopulatingFreeList) {
+ Region->isPopulatingFreeList = true;
+ PopulateFreeList = true;
+ }
+ }
+
+ if (PopulateFreeList) {
+ ScopedLock ML(Region->MMLock);
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted) {
+ PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
+ MaxBlockCount);
+ }
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+
+ {
+ // Before reacquiring the `FLLock`, the freelist may be used up again
+ // and some threads are waiting for the freelist refillment by the
+ // current thread. It's important to set
+ // `Region->isPopulatingFreeList` to false so the threads about to
+ // sleep will notice the status change.
+ ScopedLock FL(Region->FLLock);
+ Region->isPopulatingFreeList = false;
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+
+ break;
+ }
+
+ // At here, there are two preconditions to be met before waiting,
+ // 1. The freelist is empty.
+ // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
+ // `populateFreeListAndPopBatch()`.
+ //
+ // Note that it has the chance that freelist is empty but
+ // Region->isPopulatingFreeList == false because all the new populated
+ // blocks were used up right after the refillment. Therefore, we have to
+ // check if someone is still populating the freelist.
+ ScopedLock FL(Region->FLLock);
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ break;
+
+ if (!Region->isPopulatingFreeList)
+ continue;
+
+ // Now the freelist is empty and someone's doing the refillment. We will
+ // wait until anyone refills the freelist or someone finishes doing
+ // `populateFreeListAndPopBatch()`. The refillment can be done by
+ // `populateFreeListAndPopBatch()`, `pushBlocks()`,
+ // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
+ Region->FLLockCV.wait(Region->FLLock);
+
+ PopCount = popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ if (PopCount != 0U)
+ break;
+ }
+
+ return PopCount;
+ }
+
+ u16 popBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *ToArray, const u16 MaxBlockCount)
+ REQUIRES(Region->FLLock) {
+ if (Region->FreeListInfo.BlockList.empty())
+ return 0U;
+
+ SinglyLinkedList<TransferBatchT> &Batches =
+ Region->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
+ ToArray[0] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
+ Region->FreeListInfo.PoppedBlocks += 1;
+ return 1U;
+ }
+
+ // So far, instead of always filling blocks to `MaxBlockCount`, we only
+ // examine single `TransferBatch` to minimize the time spent in the primary
+ // allocator. Besides, the sizes of `TransferBatch` and
+ // `CacheT::getMaxCached()` may also impact the time spent on accessing the
+ // primary allocator.
+ // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
+ // blocks and/or adjust the size of `TransferBatch` according to
+ // `CacheT::getMaxCached()`.
+ TransferBatchT *B = Batches.front();
+ DCHECK_NE(B, nullptr);
+ DCHECK_GT(B->getCount(), 0U);
+
+ // BachClassId should always take all blocks in the TransferBatch. Read the
+ // comment in `pushBatchClassBlocks()` for more details.
+ const u16 PopCount = ClassId == SizeClassMap::BatchClassId
+ ? B->getCount()
+ : Min(MaxBlockCount, B->getCount());
+ B->moveNToArray(ToArray, PopCount);
+
+ // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
+ // done without holding `FLLock`.
+ if (B->empty()) {
+ Batches.pop_front();
+ // `TransferBatch` of BatchClassId is self-contained, no need to
+ // deallocate. Read the comment in `pushBatchClassBlocks()` for more
+ // details.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking
+ // while allocating. Note that block used for constructing BatchGroup is
+ // recorded as free blocks in the last element of BatchGroup::Batches.
+ // Which means, once we pop the last TransferBatch, the block is
+ // implicitly deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
+ }
+
+ Region->FreeListInfo.PoppedBlocks += PopCount;
+
+ return PopCount;
+ }
+
+ NOINLINE u16 populateFreeListAndPopBlocks(CacheT *C, uptr ClassId,
+ RegionInfo *Region,
+ CompactPtrT *ToArray,
+ const u16 MaxBlockCount)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ if (!Config::getEnableContiguousRegions() &&
+ !Region->MemMapInfo.MemMap.isAllocated()) {
+ ReservedMemoryT ReservedMemory;
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
+ "scudo:primary_reserve",
+ MAP_ALLOWNOMEM))) {
+ Printf("Can't reserve pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
+ return 0U;
+ }
+ initRegion(Region, ClassId,
+ ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity()),
+ /*EnableRandomOffset=*/false);
+ }
+
+ DCHECK(Region->MemMapInfo.MemMap.isAllocated());
+ const uptr Size = getSizeByClassId(ClassId);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
+ const uptr RegionBeg = Region->RegionBeg;
+ const uptr MappedUser = Region->MemMapInfo.MappedUser;
+ const uptr TotalUserBytes =
+ Region->MemMapInfo.AllocatedUser + MaxCount * Size;
+ // Map more space for blocks, if necessary.
+ if (TotalUserBytes > MappedUser) {
+ // Do the mmap for the user memory.
+ const uptr MapSize =
+ roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
+ const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
+ if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
+ Region->Exhausted = true;
+ return 0U;
+ }
+
+ if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
+ RegionBeg + MappedUser, MapSize, "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE |
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
+ : 0)))) {
+ return 0U;
+ }
+ Region->MemMapInfo.MappedUser += MapSize;
+ C->getStats().add(StatMapped, MapSize);
+ }
+
+ const u32 NumberOfBlocks =
+ Min(MaxNumBatches * MaxCount,
+ static_cast<u32>((Region->MemMapInfo.MappedUser -
+ Region->MemMapInfo.AllocatedUser) /
+ Size));
+ DCHECK_GT(NumberOfBlocks, 0);
+
+ constexpr u32 ShuffleArraySize =
+ MaxNumBatches * TransferBatchT::MaxNumCached;
+ CompactPtrT ShuffleArray[ShuffleArraySize];
+ DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
+
+ const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
+ uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
+ for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
+ ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
+
+ ScopedLock L(Region->FLLock);
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroup(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
+ }
+
+ const u16 PopCount =
+ popBlocksImpl(C, ClassId, Region, ToArray, MaxBlockCount);
+ DCHECK_NE(PopCount, 0U);
+
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
+ // should set the values back to avoid incorrectly setting the stats.
+ Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
+
+ const uptr AllocatedUser = Size * NumberOfBlocks;
+ C->getStats().add(StatFree, AllocatedUser);
+ Region->MemMapInfo.AllocatedUser += AllocatedUser;
+
+ return PopCount;
+ }
+
+ void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ if (Region->MemMapInfo.MappedUser == 0)
+ return;
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUseBlocks =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList =
+ Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
+ uptr RegionPushedBytesDelta = 0;
+ if (BytesInFreeList >=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ Str->append(
+ "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu total: %6zu releases: %6zu last "
+ "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
+ Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10,
+ RegionPushedBytesDelta >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
+ }
+
+ void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
+ ScopedString *Str) REQUIRES(Region->MMLock) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+ {
+ ScopedLock L(Region->FLLock);
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ }
+
+ FragmentationRecorder Recorder;
+ if (!GroupsToRelease.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ }
+
+ ScopedLock L(Region->FLLock);
+ const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr AllocatedPagesCount =
+ roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
+ &Fractional);
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
+ }
+
+ NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ uptr BytesInFreeList;
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+
+ {
+ ScopedLock L(Region->FLLock);
+
+ BytesInFreeList = Region->MemMapInfo.AllocatedUser -
+ (Region->FreeListInfo.PoppedBlocks -
+ Region->FreeListInfo.PushedBlocks) *
+ BlockSize;
+ if (UNLIKELY(BytesInFreeList == 0))
+ return false;
+
+ // ==================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ==================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 2. Determine which groups can release the pages. Use a heuristic to
+ // gather groups that are candidates for doing a release.
+ // ==================================================================== //
+ if (ReleaseType == ReleaseToOS::ForceAll) {
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ } else {
+ GroupsToRelease =
+ collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId));
+ }
+ if (GroupsToRelease.empty())
+ return 0;
+ }
+
+ // Note that we have extracted the `GroupsToRelease` from region freelist.
+ // It's safe to let pushBlocks()/popBlocks() access the remaining region
+ // freelist. In the steps 3 and 4, we will temporarily release the FLLock
+ // and lock it again before step 5.
+
+ // ==================================================================== //
+ // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
+ // Then we can tell which pages are in-use by querying
+ // `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ if (UNLIKELY(!Context.hasBlockMarked())) {
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 4. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
+ Region->RegionBeg,
+ Context.getReleaseOffset());
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+
+ // ====================================================================== //
+ // 5. Merge the `GroupsToRelease` back to the freelist.
+ // ====================================================================== //
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+
+ return Recorder.getReleasedBytes();
+ }
+
+ bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
+ const uptr PageSize = getPageSizeCached();
+
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ if (BytesInFreeList <=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ }
+
+ const uptr RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (RegionPushedBytesDelta < PageSize)
+ return false;
+
+ // Releasing smaller blocks is expensive, so we want to make sure that a
+ // significant amount of bytes are free, and that there has been a good
+ // amount of batches pushed to the freelist before attempting to release.
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
+ if (RegionPushedBytesDelta < Region->TryReleaseThreshold)
+ return false;
+
+ if (ReleaseType == ReleaseToOS::Normal) {
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ if (IntervalMs < 0)
+ return false;
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Region->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return false;
+ }
+ }
+ } // if (ReleaseType == ReleaseToOS::Normal)
+
+ return true;
+ }
+
+ SinglyLinkedList<BatchGroupT>
+ collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr PageSize = getPageSizeCached();
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+
+ // We are examining each group and will take the minimum distance to the
+ // release threshold as the next Region::TryReleaseThreshold(). Note that if
+ // the size of free blocks has reached the release threshold, the distance
+ // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
+ // the comment on `SmallerBlockReleasePageDelta` for more details.
+ uptr MinDistToThreshold = GroupSize;
+
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ BG != nullptr;) {
+ // Group boundary is always GroupSize-aligned from CompactPtr base. The
+ // layout of memory groups is like,
+ //
+ // (CompactPtrBase)
+ // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ...
+ // | | |
+ // v v v
+ // +-----------------------+-----------------------+
+ // \ / \ /
+ // --- GroupSize --- --- GroupSize ---
+ //
+ // After decompacting the CompactPtrGroupBase, we expect the alignment
+ // property is held as well.
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
+ DCHECK_LE(Region->RegionBeg, BatchGroupBase);
+ DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
+ DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
+ // TransferBatches are pushed in front of BG.Batches. The first one may
+ // not have all caches used.
+ const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
+ BG->Batches.front()->getCount();
+ const uptr BytesInBG = NumBlocks * BlockSize;
+
+ if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
+ BG->BytesInBGAtLastCheckpoint = BytesInBG;
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ const uptr PushedBytesDelta = BytesInBG - BG->BytesInBGAtLastCheckpoint;
+
+ // Given the randomness property, we try to release the pages only if the
+ // bytes used by free blocks exceed certain proportion of group size. Note
+ // that this heuristic only applies when all the spaces in a BatchGroup
+ // are allocated.
+ if (isSmallBlock(BlockSize)) {
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr ReleaseThreshold =
+ (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
+ const bool HighDensity = BytesInBG >= ReleaseThreshold;
+ const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
+ // If all blocks in the group are released, we will do range marking
+ // which is fast. Otherwise, we will wait until we have accumulated
+ // a certain amount of free memory.
+ const bool ReachReleaseDelta =
+ MayHaveReleasedAll
+ ? true
+ : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
+
+ if (!HighDensity) {
+ DCHECK_LE(BytesInBG, ReleaseThreshold);
+ // The following is the usage of a memroy group,
+ //
+ // BytesInBG ReleaseThreshold
+ // / \ v
+ // +---+---------------------------+-----+
+ // | | | | |
+ // +---+---------------------------+-----+
+ // \ / ^
+ // PushedBytesDelta GroupEnd
+ MinDistToThreshold =
+ Min(MinDistToThreshold,
+ ReleaseThreshold - BytesInBG + PushedBytesDelta);
+ } else {
+ // If it reaches high density at this round, the next time we will try
+ // to release is based on SmallerBlockReleasePageDelta
+ MinDistToThreshold =
+ Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
+ }
+
+ if (!HighDensity || !ReachReleaseDelta) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+ }
+
+ // If `BG` is the first BatchGroupT in the list, we only need to advance
+ // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
+ // for `Prev`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // nil +--+ +--+
+ // |X | -> | | -> ...
+ // +--+ +--+
+ //
+ // Otherwise, `Prev` will be used to extract the `Cur` from the
+ // `FreeListInfo.BlockList`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | | -> |X | -> | | -> ...
+ // +--+ +--+ +--+
+ //
+ // After FreeListInfo.BlockList::extract(),
+ //
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | |-+ |X | +->| | -> ...
+ // +--+ | +--+ | +--+
+ // +--------+
+ //
+ // Note that we need to advance before pushing this BatchGroup to
+ // GroupsToRelease because it's a destructive operation.
+
+ BatchGroupT *Cur = BG;
+ BG = BG->Next;
+
+ // Ideally, we may want to update this only after successful release.
+ // However, for smaller blocks, each block marking is a costly operation.
+ // Therefore, we update it earlier.
+ // TODO: Consider updating this after releasing pages if `ReleaseRecorder`
+ // can tell the released bytes in each group.
+ Cur->BytesInBGAtLastCheckpoint = BytesInBG;
+
+ if (Prev != nullptr)
+ Region->FreeListInfo.BlockList.extract(Prev, Cur);
+ else
+ Region->FreeListInfo.BlockList.pop_front();
+ GroupsToRelease.push_back(Cur);
+ }
+
+ // Only small blocks have the adaptive `TryReleaseThreshold`.
+ if (isSmallBlock(BlockSize)) {
+ // If the MinDistToThreshold is not updated, that means each memory group
+ // may have only pushed less than a page size. In that case, just set it
+ // back to normal.
+ if (MinDistToThreshold == GroupSize)
+ MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
+ Region->TryReleaseThreshold = MinDistToThreshold;
+ }
+
+ return GroupsToRelease;
+ }
+
+ PageReleaseContext
+ markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase,
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
+ return decompactPtrInternal(CompactPtrBase, CompactPtr);
+ };
+
+ const uptr ReleaseBase = decompactGroupBase(
+ CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
+ const uptr LastGroupEnd =
+ Min(decompactGroupBase(CompactPtrBase,
+ GroupsToRelease.back()->CompactPtrGroupBase) +
+ GroupSize,
+ AllocatedUserEnd);
+ // The last block may straddle the group boundary. Rounding up to BlockSize
+ // to get the exact range.
+ const uptr ReleaseEnd =
+ roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
+ Region->RegionBeg;
+ const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
+ const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
+
+ PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+ ReleaseRangeSize, ReleaseOffset);
+ // We may not be able to do the page release in a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.ensurePageMapAllocated()))
+ return Context;
+
+ for (BatchGroupT &BG : GroupsToRelease) {
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
+ const bool MayContainLastBlockInRegion =
+ BatchGroupUsedEnd == AllocatedUserEnd;
+ const bool BlockAlignedWithUsedEnd =
+ (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
+
+ uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ if (!BlockAlignedWithUsedEnd)
+ ++MaxContainedBlocks;
+
+ const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+ BG.Batches.front()->getCount();
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches) {
+ if (&It != BG.Batches.front())
+ DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch);
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
+ }
+
+ Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
+ Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(
+ BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
+ }
+ }
+
+ DCHECK(Context.hasBlockMarked());
+
+ return Context;
+ }
+
+ void mergeGroupsToReleaseBack(RegionInfo *Region,
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ ScopedLock L(Region->FLLock);
+
+ // After merging two freelists, we may have redundant `BatchGroup`s that
+ // need to be recycled. The number of unused `BatchGroup`s is expected to be
+ // small. Pick a constant which is inferred from real programs.
+ constexpr uptr MaxUnusedSize = 8;
+ CompactPtrT Blocks[MaxUnusedSize];
+ u32 Idx = 0;
+ RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
+ // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
+ // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
+ // should just push it back to the freelist when we merge two `BatchGroup`s.
+ // This logic hasn't been implemented because we haven't supported releasing
+ // pages in `BatchClassRegion`.
+ DCHECK_NE(BatchClassRegion, Region);
+
+ // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
+ // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
+ // sorted.
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ ;) {
+ if (BG == nullptr || GroupsToRelease.empty()) {
+ if (!GroupsToRelease.empty())
+ Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
+ break;
+ }
+
+ DCHECK(!BG->Batches.empty());
+
+ if (BG->CompactPtrGroupBase <
+ GroupsToRelease.front()->CompactPtrGroupBase) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ BatchGroupT *Cur = GroupsToRelease.front();
+ TransferBatchT *UnusedTransferBatch = nullptr;
+ GroupsToRelease.pop_front();
+
+ if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
+ BG->PushedBlocks += Cur->PushedBlocks;
+ // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
+ // collecting the `GroupsToRelease`.
+ BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
+ const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
+
+ // Note that the first TransferBatches in both `Batches` may not be
+ // full and only the first TransferBatch can have non-full blocks. Thus
+ // we have to merge them before appending one to another.
+ if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
+ BG->Batches.append_back(&Cur->Batches);
+ } else {
+ TransferBatchT *NonFullBatch = Cur->Batches.front();
+ Cur->Batches.pop_front();
+ const u16 NonFullBatchCount = NonFullBatch->getCount();
+ // The remaining Batches in `Cur` are full.
+ BG->Batches.append_back(&Cur->Batches);
+
+ if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
+ // Only 1 non-full TransferBatch, push it to the front.
+ BG->Batches.push_front(NonFullBatch);
+ } else {
+ const u16 NumBlocksToMove = static_cast<u16>(
+ Min(static_cast<u16>(MaxCachedPerBatch -
+ BG->Batches.front()->getCount()),
+ NonFullBatchCount));
+ BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
+ NumBlocksToMove);
+ if (NonFullBatch->isEmpty())
+ UnusedTransferBatch = NonFullBatch;
+ else
+ BG->Batches.push_front(NonFullBatch);
+ }
+ }
+
+ const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U;
+ if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
+ Idx = 0;
+ }
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
+ if (UnusedTransferBatch) {
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId,
+ reinterpret_cast<uptr>(UnusedTransferBatch));
+ }
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
+ // larger than the first element in `GroupsToRelease`. We need to insert
+ // `GroupsToRelease::front()` (which is `Cur` below) before `BG`.
+ //
+ // 1. If `Prev` is nullptr, we simply push `Cur` to the front of
+ // FreeListInfo.BlockList.
+ // 2. Otherwise, use `insert()` which inserts an element next to `Prev`.
+ //
+ // Afterwards, we don't need to advance `BG` because the order between
+ // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
+ if (Prev == nullptr)
+ Region->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ DCHECK_EQ(Cur->Next, BG);
+ Prev = Cur;
+ }
+
+ if (Idx != 0) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
+ }
+
+ if (SCUDO_DEBUG) {
+ BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
+ for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
+ Prev = Cur, Cur = Cur->Next) {
+ CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
+ }
+ }
+
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+
+ // The minimum size of pushed blocks that we will try to release the pages in
+ // that size class.
+ uptr SmallerBlockReleasePageDelta = 0;
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY64_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
new file mode 100644
index 000000000000..b5f8db0e87c2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -0,0 +1,309 @@
+//===-- quarantine.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_QUARANTINE_H_
+#define SCUDO_QUARANTINE_H_
+
+#include "list.h"
+#include "mutex.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+struct QuarantineBatch {
+ // With the following count, a batch (and the header that protects it) occupy
+ // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
+ static const u32 MaxCount = 1019;
+ QuarantineBatch *Next;
+ uptr Size;
+ u32 Count;
+ void *Batch[MaxCount];
+
+ void init(void *Ptr, uptr Size) {
+ Count = 1;
+ Batch[0] = Ptr;
+ this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
+
+ void push_back(void *Ptr, uptr Size) {
+ DCHECK_LT(Count, MaxCount);
+ Batch[Count++] = Ptr;
+ this->Size += Size;
+ }
+
+ bool canMerge(const QuarantineBatch *const From) const {
+ return Count + From->Count <= MaxCount;
+ }
+
+ void merge(QuarantineBatch *const From) {
+ DCHECK_LE(Count + From->Count, MaxCount);
+ DCHECK_GE(Size, sizeof(QuarantineBatch));
+
+ for (uptr I = 0; I < From->Count; ++I)
+ Batch[Count + I] = From->Batch[I];
+ Count += From->Count;
+ Size += From->getQuarantinedSize();
+
+ From->Count = 0;
+ From->Size = sizeof(QuarantineBatch);
+ }
+
+ void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
+};
+
+static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
+
+// Per-thread cache of memory blocks.
+template <typename Callback> class QuarantineCache {
+public:
+ void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
+
+ // Total memory used, including internal accounting.
+ uptr getSize() const { return atomic_load_relaxed(&Size); }
+ // Memory used for internal accounting.
+ uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
+
+ void enqueue(Callback Cb, void *Ptr, uptr Size) {
+ if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
+ QuarantineBatch *B =
+ reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
+ DCHECK(B);
+ B->init(Ptr, Size);
+ enqueueBatch(B);
+ } else {
+ List.back()->push_back(Ptr, Size);
+ addToSize(Size);
+ }
+ }
+
+ void transfer(QuarantineCache *From) {
+ List.append_back(&From->List);
+ addToSize(From->getSize());
+ atomic_store_relaxed(&From->Size, 0);
+ }
+
+ void enqueueBatch(QuarantineBatch *B) {
+ List.push_back(B);
+ addToSize(B->Size);
+ }
+
+ QuarantineBatch *dequeueBatch() {
+ if (List.empty())
+ return nullptr;
+ QuarantineBatch *B = List.front();
+ List.pop_front();
+ subFromSize(B->Size);
+ return B;
+ }
+
+ void mergeBatches(QuarantineCache *ToDeallocate) {
+ uptr ExtractedSize = 0;
+ QuarantineBatch *Current = List.front();
+ while (Current && Current->Next) {
+ if (Current->canMerge(Current->Next)) {
+ QuarantineBatch *Extracted = Current->Next;
+ // Move all the chunks into the current batch.
+ Current->merge(Extracted);
+ DCHECK_EQ(Extracted->Count, 0);
+ DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
+ // Remove the next batch From the list and account for its Size.
+ List.extract(Current, Extracted);
+ ExtractedSize += Extracted->Size;
+ // Add it to deallocation list.
+ ToDeallocate->enqueueBatch(Extracted);
+ } else {
+ Current = Current->Next;
+ }
+ }
+ subFromSize(ExtractedSize);
+ }
+
+ void getStats(ScopedString *Str) const {
+ uptr BatchCount = 0;
+ uptr TotalOverheadBytes = 0;
+ uptr TotalBytes = 0;
+ uptr TotalQuarantineChunks = 0;
+ for (const QuarantineBatch &Batch : List) {
+ BatchCount++;
+ TotalBytes += Batch.Size;
+ TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
+ TotalQuarantineChunks += Batch.Count;
+ }
+ const uptr QuarantineChunksCapacity =
+ BatchCount * QuarantineBatch::MaxCount;
+ const uptr ChunksUsagePercent =
+ (QuarantineChunksCapacity == 0)
+ ? 0
+ : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
+ const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
+ const uptr MemoryOverheadPercent =
+ (TotalQuarantinedBytes == 0)
+ ? 0
+ : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
+ Str->append(
+ "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
+ "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
+ BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+ QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+ }
+
+private:
+ SinglyLinkedList<QuarantineBatch> List;
+ atomic_uptr Size = {};
+
+ void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
+ void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
+};
+
+// The callback interface is:
+// void Callback::recycle(Node *Ptr);
+// void *Callback::allocate(uptr Size);
+// void Callback::deallocate(void *Ptr);
+template <typename Callback, typename Node> class GlobalQuarantine {
+public:
+ typedef QuarantineCache<Callback> CacheT;
+ using ThisT = GlobalQuarantine<Callback, Node>;
+
+ void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
+ DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
+ DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per put() call).
+ CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
+
+ atomic_store_relaxed(&MaxSize, Size);
+ atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&MaxCacheSize, CacheSize);
+
+ Cache.init();
+ }
+
+ uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
+ uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+
+ // This is supposed to be used in test only.
+ bool isEmpty() {
+ ScopedLock L(CacheMutex);
+ return Cache.getSize() == 0U;
+ }
+
+ void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
+ C->enqueue(Cb, Ptr, Size);
+ if (C->getSize() > getCacheSize())
+ drain(C, Cb);
+ }
+
+ void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
+ bool needRecycle = false;
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ needRecycle = Cache.getSize() > getMaxSize();
+ }
+
+ if (needRecycle && RecycleMutex.tryLock())
+ recycle(atomic_load_relaxed(&MinSize), Cb);
+ }
+
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ RecycleMutex.lock();
+ recycle(0, Cb);
+ }
+
+ void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
+ ScopedLock L(CacheMutex);
+ // It assumes that the world is stopped, just as the allocator's printStats.
+ Cache.getStats(Str);
+ Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
+ getMaxSize() >> 10, getCacheSize() >> 10);
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
+ RecycleMutex.lock();
+ CacheMutex.lock();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ CacheMutex.unlock();
+ RecycleMutex.unlock();
+ }
+
+private:
+ // Read-only data.
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
+ CacheT Cache GUARDED_BY(CacheMutex);
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
+ atomic_uptr MinSize = {};
+ atomic_uptr MaxSize = {};
+ alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
+
+ void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
+ EXCLUDES(CacheMutex) {
+ CacheT Tmp;
+ Tmp.init();
+ {
+ ScopedLock L(CacheMutex);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ const uptr CacheSize = Cache.getSize();
+ const uptr OverheadSize = Cache.getOverheadSize();
+ DCHECK_GE(CacheSize, OverheadSize);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ constexpr uptr OverheadThresholdPercents = 100;
+ if (CacheSize > OverheadSize &&
+ OverheadSize * (100 + OverheadThresholdPercents) >
+ CacheSize * OverheadThresholdPercents) {
+ Cache.mergeBatches(&Tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (Cache.getSize() > MinSize)
+ Tmp.enqueueBatch(Cache.dequeueBatch());
+ }
+ RecycleMutex.unlock();
+ doRecycle(&Tmp, Cb);
+ }
+
+ void NOINLINE doRecycle(CacheT *C, Callback Cb) {
+ while (QuarantineBatch *B = C->dequeueBatch()) {
+ const u32 Seed = static_cast<u32>(
+ (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
+ B->shuffle(Seed);
+ constexpr uptr NumberOfPrefetch = 8UL;
+ CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
+ for (uptr I = 0; I < NumberOfPrefetch; I++)
+ PREFETCH(B->Batch[I]);
+ for (uptr I = 0, Count = B->Count; I < Count; I++) {
+ if (I + NumberOfPrefetch < Count)
+ PREFETCH(B->Batch[I + NumberOfPrefetch]);
+ Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
+ }
+ Cb.deallocate(B);
+ }
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_QUARANTINE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
new file mode 100644
index 000000000000..875a2b0c1c57
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
@@ -0,0 +1,17 @@
+//===-- release.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "release.h"
+
+namespace scudo {
+
+BufferPool<RegionPageMap::StaticBufferCount,
+ RegionPageMap::StaticBufferNumElements>
+ RegionPageMap::Buffers;
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
new file mode 100644
index 000000000000..b6f76a4d2058
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
@@ -0,0 +1,701 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+#include "mem_map.h"
+#include "mutex.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+template <typename MemMapT> class RegionReleaseRecorder {
+public:
+ RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
+ : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ uptr getBase() const { return Base; }
+
+ // Releases [From, To) range of pages back to OS. Note that `From` and `To`
+ // are offseted from `Base` + Offset.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ MemMapT *RegionMemMap = nullptr;
+ uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
+};
+
+class ReleaseRecorder {
+public:
+ ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
+ : Base(Base), Offset(Offset), Data(Data) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ uptr getBase() const { return Base; }
+
+ // Releases [From, To) range of pages back to OS.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ releasePagesToOS(Base, From + Offset, Size, Data);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ // The starting address to release. Note that we may want to combine (Base +
+ // Offset) as a new Base. However, the Base is retrieved from
+ // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
+ // Therefore, store them separately to make it work on all the platforms.
+ uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
+ MapPlatformData *Data = nullptr;
+};
+
+class FragmentationRecorder {
+public:
+ FragmentationRecorder() = default;
+
+ uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
+
+ void releasePageRangeToOS(uptr From, uptr To) {
+ DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
+ ReleasedPagesCount += (To - From) / getPageSizeCached();
+ }
+
+private:
+ uptr ReleasedPagesCount = 0;
+};
+
+// A buffer pool which holds a fixed number of static buffers of `uptr` elements
+// for fast buffer allocation. If the request size is greater than
+// `StaticBufferNumElements` or if all the static buffers are in use, it'll
+// delegate the allocation to map().
+template <uptr StaticBufferCount, uptr StaticBufferNumElements>
+class BufferPool {
+public:
+ // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
+ // extracting the least significant bit from the `Mask`.
+ static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
+ static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
+ SCUDO_CACHE_LINE_SIZE),
+ "");
+
+ struct Buffer {
+ // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
+ uptr *Data = nullptr;
+
+ // The index of the underlying static buffer, or StaticBufferCount if this
+ // buffer was dynamically allocated. This value is initially set to a poison
+ // value to aid debugging.
+ uptr BufferIndex = ~static_cast<uptr>(0);
+
+ // Only valid if BufferIndex == StaticBufferCount.
+ MemMapT MemMap = {};
+ };
+
+ // Return a zero-initialized buffer which can contain at least the given
+ // number of elements, or nullptr on failure.
+ Buffer getBuffer(const uptr NumElements) {
+ if (UNLIKELY(NumElements > StaticBufferNumElements))
+ return getDynamicBuffer(NumElements);
+
+ uptr index;
+ {
+ // TODO: In general, we expect this operation should be fast so the
+ // waiting thread won't be put into sleep. The HybridMutex does implement
+ // the busy-waiting but we may want to review the performance and see if
+ // we need an explict spin lock here.
+ ScopedLock L(Mutex);
+ index = getLeastSignificantSetBitIndex(Mask);
+ if (index < StaticBufferCount)
+ Mask ^= static_cast<uptr>(1) << index;
+ }
+
+ if (index >= StaticBufferCount)
+ return getDynamicBuffer(NumElements);
+
+ Buffer Buf;
+ Buf.Data = &RawBuffer[index * StaticBufferNumElements];
+ Buf.BufferIndex = index;
+ memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
+ return Buf;
+ }
+
+ void releaseBuffer(Buffer Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ if (Buf.BufferIndex != StaticBufferCount) {
+ ScopedLock L(Mutex);
+ DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
+ Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
+ } else {
+ Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
+ }
+ }
+
+ bool isStaticBufferTestOnly(const Buffer &Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ return Buf.BufferIndex != StaticBufferCount;
+ }
+
+private:
+ Buffer getDynamicBuffer(const uptr NumElements) {
+ // When using a heap-based buffer, precommit the pages backing the
+ // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
+ // where page fault exceptions are skipped as the allocated memory
+ // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
+ // performance benefit on other platforms.
+ const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
+ const uptr MappedSize =
+ roundUp(NumElements * sizeof(uptr), getPageSizeCached());
+ Buffer Buf;
+ if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
+ Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
+ Buf.BufferIndex = StaticBufferCount;
+ }
+ return Buf;
+ }
+
+ HybridMutex Mutex;
+ // '1' means that buffer index is not used. '0' means the buffer is in use.
+ uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
+ uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
+};
+
+// A Region page map is used to record the usage of pages in the regions. It
+// implements a packed array of Counters. Each counter occupies 2^N bits, enough
+// to store counter's MaxValue. Ctor will try to use a static buffer first, and
+// if that fails (the buffer is too small or already locked), will allocate the
+// required Buffer via map(). The caller is expected to check whether the
+// initialization was successful by checking isAllocated() result. For
+// performance sake, none of the accessors check the validity of the arguments,
+// It is assumed that Index is always in [0, N) range and the value is not
+// incremented past MaxValue.
+class RegionPageMap {
+public:
+ RegionPageMap()
+ : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
+ PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
+ BufferNumElements(0) {}
+ RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
+ reset(NumberOfRegions, CountersPerRegion, MaxValue);
+ }
+ ~RegionPageMap() {
+ if (!isAllocated())
+ return;
+ Buffers.releaseBuffer(Buffer);
+ Buffer = {};
+ }
+
+ // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
+ // specify the thread-safety attribute properly in current code structure.
+ // Besides, it's the only place we may want to check thread safety. Therefore,
+ // it's fine to bypass the thread-safety analysis now.
+ void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
+ DCHECK_GT(NumberOfRegion, 0);
+ DCHECK_GT(CountersPerRegion, 0);
+ DCHECK_GT(MaxValue, 0);
+
+ Regions = NumberOfRegion;
+ NumCounters = CountersPerRegion;
+
+ constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's Index and offset.
+ const uptr CounterSizeBits =
+ roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ DCHECK_LE(CounterSizeBits, MaxCounterBits);
+ CounterSizeBitsLog = getLog2(CounterSizeBits);
+ CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+ const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+ DCHECK_GT(PackingRatio, 0);
+ PackingRatioLog = getLog2(PackingRatio);
+ BitOffsetMask = PackingRatio - 1;
+
+ SizePerRegion =
+ roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog;
+ BufferNumElements = SizePerRegion * Regions;
+ Buffer = Buffers.getBuffer(BufferNumElements);
+ }
+
+ bool isAllocated() const { return Buffer.Data != nullptr; }
+
+ uptr getCount() const { return NumCounters; }
+
+ uptr get(uptr Region, uptr I) const {
+ DCHECK_LT(Region, Regions);
+ DCHECK_LT(I, NumCounters);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
+ CounterMask;
+ }
+
+ void inc(uptr Region, uptr I) const {
+ DCHECK_LT(get(Region, I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ DCHECK_EQ(isAllCounted(Region, I), false);
+ Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
+ << BitOffset;
+ }
+
+ void incN(uptr Region, uptr I, uptr N) const {
+ DCHECK_GT(N, 0U);
+ DCHECK_LE(N, CounterMask);
+ DCHECK_LE(get(Region, I), CounterMask - N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ DCHECK_EQ(isAllCounted(Region, I), false);
+ Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
+ }
+
+ void incRange(uptr Region, uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ const uptr Top = Min(To + 1, NumCounters);
+ for (uptr I = From; I < Top; I++)
+ inc(Region, I);
+ }
+
+ // Set the counter to the max value. Note that the max number of blocks in a
+ // page may vary. To provide an easier way to tell if all the blocks are
+ // counted for different pages, set to the same max value to denote the
+ // all-counted status.
+ void setAsAllCounted(uptr Region, uptr I) const {
+ DCHECK_LE(get(Region, I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
+ }
+ void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ const uptr Top = Min(To + 1, NumCounters);
+ for (uptr I = From; I < Top; I++)
+ setAsAllCounted(Region, I);
+ }
+
+ bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
+ const uptr Count = get(Region, I);
+ if (Count == CounterMask)
+ return true;
+ if (Count == MaxCount) {
+ setAsAllCounted(Region, I);
+ return true;
+ }
+ return false;
+ }
+ bool isAllCounted(uptr Region, uptr I) const {
+ return get(Region, I) == CounterMask;
+ }
+
+ uptr getBufferNumElements() const { return BufferNumElements; }
+
+private:
+ // We may consider making this configurable if there are cases which may
+ // benefit from this.
+ static const uptr StaticBufferCount = 2U;
+ static const uptr StaticBufferNumElements = 512U;
+ using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
+ static BufferPoolT Buffers;
+
+ uptr Regions;
+ uptr NumCounters;
+ uptr CounterSizeBitsLog;
+ uptr CounterMask;
+ uptr PackingRatioLog;
+ uptr BitOffsetMask;
+
+ uptr SizePerRegion;
+ uptr BufferNumElements;
+ BufferPoolT::Buffer Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+ explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
+ : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+ void processNextPage(bool Released) {
+ if (Released) {
+ if (!InRange) {
+ CurrentRangeStatePage = CurrentPage;
+ InRange = true;
+ }
+ } else {
+ closeOpenedRange();
+ }
+ CurrentPage++;
+ }
+
+ void skipPages(uptr N) {
+ closeOpenedRange();
+ CurrentPage += N;
+ }
+
+ void finish() { closeOpenedRange(); }
+
+private:
+ void closeOpenedRange() {
+ if (InRange) {
+ Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
+ InRange = false;
+ }
+ }
+
+ ReleaseRecorderT &Recorder;
+ const uptr PageSizeLog;
+ bool InRange = false;
+ uptr CurrentPage = 0;
+ uptr CurrentRangeStatePage = 0;
+};
+
+struct PageReleaseContext {
+ PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
+ uptr ReleaseOffset = 0)
+ : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
+ PageSize = getPageSizeCached();
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
+ } else {
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
+ }
+
+ // TODO: For multiple regions, it's more complicated to support partial
+ // region marking (which includes the complexity of how to handle the last
+ // block in a region). We may consider this after markFreeBlocks() accepts
+ // only free blocks from the same region.
+ if (NumberOfRegions != 1)
+ DCHECK_EQ(ReleaseOffset, 0U);
+
+ PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
+ PageSizeLog = getLog2(PageSize);
+ ReleasePageOffset = ReleaseOffset >> PageSizeLog;
+ }
+
+ // PageMap is lazily allocated when markFreeBlocks() is invoked.
+ bool hasBlockMarked() const {
+ return PageMap.isAllocated();
+ }
+
+ bool ensurePageMapAllocated() {
+ if (PageMap.isAllocated())
+ return true;
+ PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
+ // TODO: Log some message when we fail on PageMap allocation.
+ return PageMap.isAllocated();
+ }
+
+ // Mark all the blocks in the given range [From, to). Instead of visiting all
+ // the blocks, we will just mark the page as all counted. Note the `From` and
+ // `To` has to be page aligned but with one exception, if `To` is equal to the
+ // RegionSize, it's not necessary to be aligned with page size.
+ bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
+ const uptr RegionIndex, const uptr RegionSize) {
+ DCHECK_LT(From, To);
+ DCHECK_LE(To, Base + RegionSize);
+ DCHECK_EQ(From % PageSize, 0U);
+ DCHECK_LE(To - From, RegionSize);
+
+ if (!ensurePageMapAllocated())
+ return false;
+
+ uptr FromInRegion = From - Base;
+ uptr ToInRegion = To - Base;
+ uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
+
+ // The straddling block sits across entire range.
+ if (FirstBlockInRange >= ToInRegion)
+ return true;
+
+ // First block may not sit at the first pape in the range, move
+ // `FromInRegion` to the first block page.
+ FromInRegion = roundDown(FirstBlockInRange, PageSize);
+
+ // When The first block is not aligned to the range boundary, which means
+ // there is a block sitting acorss `From`, that looks like,
+ //
+ // From To
+ // V V
+ // +-----------------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // |- first page -||- second page -||- ...
+ //
+ // Therefore, we can't just mark the first page as all counted. Instead, we
+ // increment the number of blocks in the first page in the page map and
+ // then round up the `From` to the next page.
+ if (FirstBlockInRange != FromInRegion) {
+ DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
+ uptr NumBlocksInFirstPage =
+ (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
+ BlockSize;
+ PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
+ NumBlocksInFirstPage);
+ FromInRegion = roundUp(FromInRegion + 1, PageSize);
+ }
+
+ uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
+
+ // Note that LastBlockInRange may be smaller than `FromInRegion` at this
+ // point because it may contain only one block in the range.
+
+ // When the last block sits across `To`, we can't just mark the pages
+ // occupied by the last block as all counted. Instead, we increment the
+ // counters of those pages by 1. The exception is that if it's the last
+ // block in the region, it's fine to mark those pages as all counted.
+ if (LastBlockInRange + BlockSize != RegionSize) {
+ DCHECK_EQ(ToInRegion % PageSize, 0U);
+ // The case below is like,
+ //
+ // From To
+ // V V
+ // +----------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // ... -||- last page -||- next page -|
+ //
+ // The last block is not aligned to `To`, we need to increment the
+ // counter of `next page` by 1.
+ if (LastBlockInRange + BlockSize != ToInRegion) {
+ PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
+ getPageIndex(LastBlockInRange + BlockSize - 1));
+ }
+ } else {
+ ToInRegion = RegionSize;
+ }
+
+ // After handling the first page and the last block, it's safe to mark any
+ // page in between the range [From, To).
+ if (FromInRegion < ToInRegion) {
+ PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
+ getPageIndex(ToInRegion - 1));
+ }
+
+ return true;
+ }
+
+ template <class TransferBatchT, typename DecompactPtrT>
+ bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
+ DecompactPtrT DecompactPtr, const uptr Base,
+ const uptr RegionIndex, const uptr RegionSize,
+ bool MayContainLastBlockInRegion) {
+ if (!ensurePageMapAllocated())
+ return false;
+
+ if (MayContainLastBlockInRegion) {
+ const uptr LastBlockInRegion =
+ ((RegionSize / BlockSize) - 1U) * BlockSize;
+ // The last block in a region may not use the entire page, we mark the
+ // following "pretend" memory block(s) as free in advance.
+ //
+ // Region Boundary
+ // v
+ // -----+-----------------------+
+ // | Last Page | <- Rounded Region Boundary
+ // -----+-----------------------+
+ // |-----||- trailing blocks -|
+ // ^
+ // last block
+ const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
+ const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
+ // If the difference between `RoundedRegionSize` and
+ // `TrailingBlockBase` is larger than a page, that implies the reported
+ // `RegionSize` may not be accurate.
+ DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
+
+ // Only the last page touched by the last block needs to mark the trailing
+ // blocks. Note that if the last "pretend" block straddles the boundary,
+ // we still have to count it in so that the logic of counting the number
+ // of blocks on a page is consistent.
+ uptr NumTrailingBlocks =
+ (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
+ BlockSize - 1) /
+ BlockSize;
+ if (NumTrailingBlocks > 0) {
+ PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
+ NumTrailingBlocks);
+ }
+ }
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ DCHECK_LT(PInRegion, RegionSize);
+ PageMap.inc(RegionIndex, getPageIndex(PInRegion));
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ DCHECK_GE(RegionSize, BlockSize);
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+ getPageIndex(PInRegion + BlockSize - 1));
+ }
+ }
+ }
+
+ return true;
+ }
+
+ uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
+ uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
+
+ uptr BlockSize;
+ uptr NumberOfRegions;
+ // For partial region marking, some pages in front are not needed to be
+ // counted.
+ uptr ReleasePageOffset;
+ uptr PageSize;
+ uptr PagesCount;
+ uptr PageSizeLog;
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ RegionPageMap PageMap;
+};
+
+// Try to release the page which doesn't have any in-used block, i.e., they are
+// all free blocks. The `PageMap` will record the number of free blocks in each
+// page.
+template <class ReleaseRecorderT, typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(PageReleaseContext &Context,
+ ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
+ const uptr PageSize = Context.PageSize;
+ const uptr BlockSize = Context.BlockSize;
+ const uptr PagesCount = Context.PagesCount;
+ const uptr NumberOfRegions = Context.NumberOfRegions;
+ const uptr ReleasePageOffset = Context.ReleasePageOffset;
+ const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
+ const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
+ RegionPageMap &PageMap = Context.PageMap;
+
+ // Iterate over pages detecting ranges of pages with chunk Counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+ if (SameBlockCountPerPage) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
+ for (uptr J = 0; J < PagesCount; J++) {
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
+ RangeTracker.processNextPage(CanRelease);
+ }
+ }
+ } else {
+ // Slow path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+ const uptr Pnc = Pn * BlockSize;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ if (ReleasePageOffset > 0) {
+ PrevPageBoundary = ReleasePageOffset * PageSize;
+ CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
+ }
+ for (uptr J = 0; J < PagesCount; J++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
+ if (CurrentBoundary < PageBoundary) {
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
+ }
+ PrevPageBoundary = PageBoundary;
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
+ RangeTracker.processNextPage(CanRelease);
+ }
+ }
+ }
+ RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
new file mode 100644
index 000000000000..9cef0adc0bb3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
@@ -0,0 +1,192 @@
+//===-- report.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "report.h"
+
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedErrorReport {
+public:
+ ScopedErrorReport() : Message() { Message.append("Scudo ERROR: "); }
+ void append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ Message.vappend(Format, Args);
+ va_end(Args);
+ }
+ NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
+
+private:
+ ScopedString Message;
+};
+
+inline void NORETURN trap() { __builtin_trap(); }
+
+// This could potentially be called recursively if a CHECK fails in the reports.
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2) {
+ static atomic_u32 NumberOfCalls;
+ if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
+ // TODO(kostyak): maybe sleep here?
+ trap();
+ }
+ ScopedErrorReport Report;
+ Report.append("CHECK failed @ %s:%d %s ((u64)op1=%llu, (u64)op2=%llu)\n",
+ File, Line, Condition, Value1, Value2);
+}
+
+// Generic string fatal error message.
+void NORETURN reportError(const char *Message) {
+ ScopedErrorReport Report;
+ Report.append("%s\n", Message);
+}
+
+// Generic fatal error message without ScopedString.
+void NORETURN reportRawError(const char *Message) {
+ outputRaw(Message);
+ setAbortMessage(Message);
+ die();
+}
+
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
+ ScopedErrorReport Report;
+ Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
+}
+
+// The checksum of a chunk header is invalid. This could be caused by an
+// {over,under}write of the header, a pointer that is not an actual chunk.
+void NORETURN reportHeaderCorruption(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("corrupted chunk header at address %p\n", Ptr);
+}
+
+// The allocator was compiled with parameters that conflict with field size
+// requirements.
+void NORETURN reportSanityCheckError(const char *Field) {
+ ScopedErrorReport Report;
+ Report.append("maximum possible %s doesn't fit in header\n", Field);
+}
+
+// We enforce a maximum alignment, to keep fields smaller and generally prevent
+// integer overflows, or unexpected corner cases.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
+ ScopedErrorReport Report;
+ Report.append("invalid allocation alignment: %zu exceeds maximum supported "
+ "alignment of %zu\n",
+ Alignment, MaxAlignment);
+}
+
+// See above, we also enforce a maximum size.
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ ScopedErrorReport Report;
+ Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
+ "maximum supported size of %zu\n",
+ UserSize, TotalSize, MaxSize);
+}
+
+void NORETURN reportOutOfBatchClass() {
+ ScopedErrorReport Report;
+ Report.append("BatchClass region is used up, can't hold any free block\n");
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ ScopedErrorReport Report;
+ Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
+}
+
+static const char *stringifyAction(AllocatorAction Action) {
+ switch (Action) {
+ case AllocatorAction::Recycling:
+ return "recycling";
+ case AllocatorAction::Deallocating:
+ return "deallocating";
+ case AllocatorAction::Reallocating:
+ return "reallocating";
+ case AllocatorAction::Sizing:
+ return "sizing";
+ }
+ return "<invalid action>";
+}
+
+// The chunk is not in a state congruent with the operation we want to perform.
+// This is usually the case with a double-free, a realloc of a freed pointer.
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("invalid chunk state when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("misaligned pointer when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+// The deallocation function used is at odds with the one used to allocate the
+// chunk (eg: new[]/delete or malloc/delete, and so on).
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB) {
+ ScopedErrorReport Report;
+ Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
+ stringifyAction(Action), Ptr, TypeA, TypeB);
+}
+
+// The size specified to the delete operator does not match the one that was
+// passed to new when allocating the chunk.
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
+ uptr ExpectedSize) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
+ Size, ExpectedSize);
+}
+
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid allocation alignment: %zu, alignment must be a power of two\n",
+ Alignment);
+}
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
+ "be represented with type size_t\n",
+ Count, Size);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid alignment requested in posix_memalign: %zu, alignment must be a "
+ "power of two and a multiple of sizeof(void *) == %zu\n",
+ Alignment, sizeof(void *));
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("pvalloc parameters overflow: size %zu rounded up to system "
+ "page size %zu cannot be represented in type size_t\n",
+ Size, getPageSizeCached());
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
+ "must be a power of two and the requested size %zu must be a "
+ "multiple of alignment\n",
+ Alignment, Size);
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
new file mode 100644
index 000000000000..a510fdaebb6d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
@@ -0,0 +1,60 @@
+//===-- report.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_H_
+#define SCUDO_REPORT_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Reports are *fatal* unless stated otherwise.
+
+// Generic error, adds newline to end of message.
+void NORETURN reportError(const char *Message);
+
+// Generic error, but the message is not modified.
+void NORETURN reportRawError(const char *Message);
+
+// Flags related errors.
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
+
+// Chunk header related errors.
+void NORETURN reportHeaderCorruption(void *Ptr);
+
+// Sanity checks related error.
+void NORETURN reportSanityCheckError(const char *Field);
+
+// Combined allocator errors.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportOutOfBatchClass();
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+enum class AllocatorAction : u8 {
+ Recycling,
+ Deallocating,
+ Reallocating,
+ Sizing,
+};
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB);
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
+
+// C wrappers errors.
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+
+} // namespace scudo
+
+#endif // SCUDO_REPORT_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp
new file mode 100644
index 000000000000..432f6a016964
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp
@@ -0,0 +1,55 @@
+//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "common.h"
+#include "internal_defs.h"
+#include "report.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+// Fatal internal map() error (potentially OOM related).
+void NORETURN reportMapError(uptr SizeIfOOM) {
+ ScopedString Error;
+ Error.append("Scudo ERROR: internal map failure (error desc=%s)",
+ strerror(errno));
+ if (SizeIfOOM)
+ Error.append(" requesting %zuKB", SizeIfOOM >> 10);
+ Error.append("\n");
+ reportRawError(Error.data());
+}
+
+void NORETURN reportUnmapError(uptr Addr, uptr Size) {
+ ScopedString Error;
+ Error.append("Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
+ "Size %zu\n",
+ strerror(errno), Addr, Size);
+ reportRawError(Error.data());
+}
+
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
+ ScopedString Error;
+ Error.append(
+ "Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
+ "Size %zu Prot %x\n",
+ strerror(errno), Addr, Size, Prot);
+ reportRawError(Error.data());
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h
new file mode 100644
index 000000000000..aa0bb247e672
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h
@@ -0,0 +1,34 @@
+//===-- report_linux.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_LINUX_H_
+#define SCUDO_REPORT_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Report a fatal error when a map call fails. SizeIfOOM shall
+// hold the requested size on an out-of-memory error, 0 otherwise.
+void NORETURN reportMapError(uptr SizeIfOOM = 0);
+
+// Report a fatal error when an unmap call fails.
+void NORETURN reportUnmapError(uptr Addr, uptr Size);
+
+// Report a fatal error when a mprotect call fails.
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
+
+#endif // SCUDO_REPORT_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
new file mode 100644
index 000000000000..9a8e53be388b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
@@ -0,0 +1,734 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "list.h"
+#include "mem_map.h"
+#include "memtag.h"
+#include "mutex.h"
+#include "options.h"
+#include "stats.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct alignas(Max<uptr>(archSupportsMemoryTagging()
+ ? archMemoryTagGranuleSize()
+ : 1,
+ 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
+ LargeBlock::Header *Prev;
+ LargeBlock::Header *Next;
+ uptr CommitBase;
+ uptr CommitSize;
+ MemMapT MemMap;
+};
+
+static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
+static_assert(!archSupportsMemoryTagging() ||
+ sizeof(Header) % archMemoryTagGranuleSize() == 0,
+ "");
+
+constexpr uptr getHeaderSize() { return sizeof(Header); }
+
+template <typename Config> static uptr addHeaderTag(uptr Ptr) {
+ if (allocatorSupportsMemoryTagging<Config>())
+ return addFixedTag(Ptr, 1);
+ return Ptr;
+}
+
+template <typename Config> static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
+}
+
+template <typename Config> static Header *getHeader(const void *Ptr) {
+ return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+static inline void unmap(LargeBlock::Header *H) {
+ // Note that the `H->MapMap` is stored on the pages managed by itself. Take
+ // over the ownership before unmap() so that any operation along with unmap()
+ // won't touch inaccessible pages.
+ MemMapT MemMap = H->MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+}
+
+namespace {
+struct CachedBlock {
+ uptr CommitBase = 0;
+ uptr CommitSize = 0;
+ uptr BlockBegin = 0;
+ MemMapT MemMap = {};
+ u64 Time = 0;
+
+ bool isValid() { return CommitBase != 0; }
+
+ void invalidate() { CommitBase = 0; }
+};
+} // namespace
+
+template <typename Config> class MapAllocatorNoCache {
+public:
+ void init(UNUSED s32 ReleaseToOsInterval) {}
+ bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
+ UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
+ UNUSED bool *Zeroed) {
+ return false;
+ }
+ void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
+ bool canCache(UNUSED uptr Size) { return false; }
+ void disable() {}
+ void enable() {}
+ void releaseToOS() {}
+ void disableMemoryTagging() {}
+ void unmapTestOnly() {}
+ bool setOption(Option O, UNUSED sptr Value) {
+ if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
+ O == Option::MaxCacheEntrySize)
+ return false;
+ // Not supported by the Secondary Cache, but not an error either.
+ return true;
+ }
+
+ void getStats(UNUSED ScopedString *Str) {
+ Str->append("Secondary Cache Disabled\n");
+ }
+};
+
+static const uptr MaxUnusedCachePages = 4U;
+
+template <typename Config>
+bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
+ uptr AllocPos, uptr Flags, MemMapT &MemMap) {
+ Flags |= MAP_RESIZABLE;
+ Flags |= MAP_ALLOWNOMEM;
+
+ const uptr PageSize = getPageSizeCached();
+ if (SCUDO_TRUSTY) {
+ /*
+ * On Trusty we need AllocPos to be usable for shared memory, which cannot
+ * cross multiple mappings. This means we need to split around AllocPos
+ * and not over it. We can only do this if the address is page-aligned.
+ */
+ const uptr TaggedSize = AllocPos - CommitBase;
+ if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
+ DCHECK_GT(TaggedSize, 0);
+ return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
+ MAP_MEMTAG | Flags) &&
+ MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
+ Flags);
+ } else {
+ const uptr RemapFlags =
+ (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
+ return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
+ RemapFlags);
+ }
+ }
+
+ const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
+ if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
+ const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
+ return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+ MAP_MEMTAG | Flags) &&
+ MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+ "scudo:secondary", Flags);
+ } else {
+ const uptr RemapFlags =
+ (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
+ return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
+ }
+}
+
+// Template specialization to avoid producing zero-length array
+template <typename T, size_t Size> class NonZeroLengthArray {
+public:
+ T &operator[](uptr Idx) { return values[Idx]; }
+
+private:
+ T values[Size];
+};
+template <typename T> class NonZeroLengthArray<T, 0> {
+public:
+ T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
+};
+
+template <typename Config> class MapAllocatorCache {
+public:
+ void getStats(ScopedString *Str) {
+ ScopedLock L(Mutex);
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
+ &Fractional);
+ const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ Str->append(
+ "Stats: MapAllocatorCache: EntriesCount: %d, "
+ "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
+ EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
+ atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
+ Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
+ "(%zu.%02zu%%)\n",
+ SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
+ for (CachedBlock Entry : Entries) {
+ if (!Entry.isValid())
+ continue;
+ Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
+ "BlockSize: %zu %s\n",
+ Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
+ Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
+ }
+ }
+
+ // Ensure the default maximum specified fits the array.
+ static_assert(Config::getDefaultMaxEntriesCount() <=
+ Config::getEntriesArraySize(),
+ "");
+
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_EQ(EntriesCount, 0U);
+ setOption(Option::MaxCacheEntriesCount,
+ static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
+ setOption(Option::MaxCacheEntrySize,
+ static_cast<sptr>(Config::getDefaultMaxEntrySize()));
+ // The default value in the cache config has the higher priority.
+ if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
+ ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
+ }
+
+ void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
+ if (!canCache(H->CommitSize))
+ return unmap(H);
+
+ bool EntryCached = false;
+ bool EmptyCache = false;
+ const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ const u64 Time = getMonotonicTimeFast();
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ CachedBlock Entry;
+ Entry.CommitBase = H->CommitBase;
+ Entry.CommitSize = H->CommitSize;
+ Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
+ Entry.MemMap = H->MemMap;
+ Entry.Time = Time;
+ if (useMemoryTagging<Config>(Options)) {
+ if (Interval == 0 && !SCUDO_FUCHSIA) {
+ // Release the memory and make it inaccessible at the same time by
+ // creating a new MAP_NOACCESS mapping on top of the existing mapping.
+ // Fuchsia does not support replacing mappings by creating a new mapping
+ // on top so we just do the two syscalls there.
+ Entry.Time = 0;
+ mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
+ Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
+ } else {
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+ MAP_NOACCESS);
+ }
+ } else if (Interval == 0) {
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
+ Entry.Time = 0;
+ }
+ do {
+ ScopedLock L(Mutex);
+ if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
+ // If we get here then memory tagging was disabled in between when we
+ // read Options and when we locked Mutex. We can't insert our entry into
+ // the quarantine or the cache because the permissions would be wrong so
+ // just unmap it.
+ break;
+ }
+ if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
+ QuarantinePos =
+ (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
+ if (!Quarantine[QuarantinePos].isValid()) {
+ Quarantine[QuarantinePos] = Entry;
+ return;
+ }
+ CachedBlock PrevEntry = Quarantine[QuarantinePos];
+ Quarantine[QuarantinePos] = Entry;
+ if (OldestTime == 0)
+ OldestTime = Entry.Time;
+ Entry = PrevEntry;
+ }
+ if (EntriesCount >= MaxCount) {
+ if (IsFullEvents++ == 4U)
+ EmptyCache = true;
+ } else {
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (Entries[I].isValid())
+ continue;
+ if (I != 0)
+ Entries[I] = Entries[0];
+ Entries[0] = Entry;
+ EntriesCount++;
+ if (OldestTime == 0)
+ OldestTime = Entry.Time;
+ EntryCached = true;
+ break;
+ }
+ }
+ } while (0);
+ if (EmptyCache)
+ empty();
+ else if (Interval >= 0)
+ releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
+ if (!EntryCached)
+ Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
+ }
+
+ bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
+ LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
+ const uptr PageSize = getPageSizeCached();
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ // 10% of the requested size proved to be the optimal choice for
+ // retrieving cached blocks after testing several options.
+ constexpr u32 FragmentedBytesDivisor = 10;
+ bool Found = false;
+ CachedBlock Entry;
+ uptr EntryHeaderPos = 0;
+ {
+ ScopedLock L(Mutex);
+ CallsToRetrieve++;
+ if (EntriesCount == 0)
+ return false;
+ u32 OptimalFitIndex = 0;
+ uptr MinDiff = UINTPTR_MAX;
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (!Entries[I].isValid())
+ continue;
+ const uptr CommitBase = Entries[I].CommitBase;
+ const uptr CommitSize = Entries[I].CommitSize;
+ const uptr AllocPos =
+ roundDown(CommitBase + CommitSize - Size, Alignment);
+ const uptr HeaderPos = AllocPos - HeadersSize;
+ if (HeaderPos > CommitBase + CommitSize)
+ continue;
+ if (HeaderPos < CommitBase ||
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
+ continue;
+ }
+ Found = true;
+ const uptr Diff = HeaderPos - CommitBase;
+ // immediately use a cached block if it's size is close enough to the
+ // requested size.
+ const uptr MaxAllowedFragmentedBytes =
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
+ if (Diff <= MaxAllowedFragmentedBytes) {
+ OptimalFitIndex = I;
+ EntryHeaderPos = HeaderPos;
+ break;
+ }
+ // keep track of the smallest cached block
+ // that is greater than (AllocSize + HeaderSize)
+ if (Diff > MinDiff)
+ continue;
+ OptimalFitIndex = I;
+ MinDiff = Diff;
+ EntryHeaderPos = HeaderPos;
+ }
+ if (Found) {
+ Entry = Entries[OptimalFitIndex];
+ Entries[OptimalFitIndex].invalidate();
+ EntriesCount--;
+ SuccessfulRetrieves++;
+ }
+ }
+ if (!Found)
+ return false;
+
+ *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
+ *Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options))
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
+ if (useMemoryTagging<Config>(Options)) {
+ if (*Zeroed) {
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ } else if (Entry.BlockBegin < NewBlockBegin) {
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ } else {
+ storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
+ }
+ }
+ (*H)->CommitBase = Entry.CommitBase;
+ (*H)->CommitSize = Entry.CommitSize;
+ (*H)->MemMap = Entry.MemMap;
+ return true;
+ }
+
+ bool canCache(uptr Size) {
+ return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
+ Size <= atomic_load_relaxed(&MaxEntrySize);
+ }
+
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+ Config::getMinReleaseToOsIntervalMs());
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
+ }
+ if (O == Option::MaxCacheEntriesCount) {
+ if (Value < 0)
+ return false;
+ atomic_store_relaxed(
+ &MaxEntriesCount,
+ Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize()));
+ return true;
+ }
+ if (O == Option::MaxCacheEntrySize) {
+ atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
+ return true;
+ }
+ // Not supported by the Secondary Cache, but not an error either.
+ return true;
+ }
+
+ void releaseToOS() { releaseOlderThan(UINT64_MAX); }
+
+ void disableMemoryTagging() EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
+ if (Quarantine[I].isValid()) {
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ Quarantine[I].invalidate();
+ }
+ }
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (Entries[I].isValid()) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
+ }
+ QuarantinePos = -1U;
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
+
+ void unmapTestOnly() { empty(); }
+
+private:
+ void empty() {
+ MemMapT MapInfo[Config::getEntriesArraySize()];
+ uptr N = 0;
+ {
+ ScopedLock L(Mutex);
+ for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
+ if (!Entries[I].isValid())
+ continue;
+ MapInfo[N] = Entries[I].MemMap;
+ Entries[I].invalidate();
+ N++;
+ }
+ EntriesCount = 0;
+ IsFullEvents = 0;
+ }
+ for (uptr I = 0; I < N; I++) {
+ MemMapT &MemMap = MapInfo[I];
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
+ }
+
+ void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
+ if (!Entry.isValid() || !Entry.Time)
+ return;
+ if (Entry.Time > Time) {
+ if (OldestTime == 0 || Entry.Time < OldestTime)
+ OldestTime = Entry.Time;
+ return;
+ }
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
+ Entry.Time = 0;
+ }
+
+ void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
+ return;
+ OldestTime = 0;
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++)
+ releaseIfOlderThan(Quarantine[I], Time);
+ for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
+ releaseIfOlderThan(Entries[I], Time);
+ }
+
+ HybridMutex Mutex;
+ u32 EntriesCount GUARDED_BY(Mutex) = 0;
+ u32 QuarantinePos GUARDED_BY(Mutex) = 0;
+ atomic_u32 MaxEntriesCount = {};
+ atomic_uptr MaxEntrySize = {};
+ u64 OldestTime GUARDED_BY(Mutex) = 0;
+ u32 IsFullEvents GUARDED_BY(Mutex) = 0;
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
+ u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
+
+ CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
+ NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
+ Quarantine GUARDED_BY(Mutex) = {};
+};
+
+template <typename Config> class MapAllocator {
+public:
+ void init(GlobalStats *S,
+ s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_EQ(AllocatedBytes, 0U);
+ DCHECK_EQ(FreedBytes, 0U);
+ Cache.init(ReleaseToOsInterval);
+ Stats.init();
+ if (LIKELY(S))
+ S->link(&Stats);
+ }
+
+ void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
+ uptr *BlockEnd = nullptr,
+ FillContentsMode FillContents = NoFill);
+
+ void deallocate(const Options &Options, void *Ptr);
+
+ static uptr getBlockEnd(void *Ptr) {
+ auto *B = LargeBlock::getHeader<Config>(Ptr);
+ return B->CommitBase + B->CommitSize;
+ }
+
+ static uptr getBlockSize(void *Ptr) {
+ return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+ }
+
+ static constexpr uptr getHeadersSize() {
+ return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ Mutex.lock();
+ Cache.disable();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ Cache.enable();
+ Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ Mutex.assertHeld();
+
+ for (const auto &H : InUseBlocks) {
+ uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
+ if (allocatorSupportsMemoryTagging<Config>())
+ Ptr = untagPointer(Ptr);
+ Callback(Ptr);
+ }
+ }
+
+ bool canCache(uptr Size) { return Cache.canCache(Size); }
+
+ bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
+
+ void releaseToOS() { Cache.releaseToOS(); }
+
+ void disableMemoryTagging() { Cache.disableMemoryTagging(); }
+
+ void unmapTestOnly() { Cache.unmapTestOnly(); }
+
+ void getStats(ScopedString *Str);
+
+private:
+ typename Config::template CacheT<typename Config::CacheConfig> Cache;
+
+ mutable HybridMutex Mutex;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
+ uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
+ uptr FreedBytes GUARDED_BY(Mutex) = 0;
+ uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
+ uptr LargestSize GUARDED_BY(Mutex) = 0;
+ u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
+ u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
+ LocalStats Stats GUARDED_BY(Mutex);
+};
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+template <typename Config>
+void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
+ uptr Alignment, uptr *BlockEndPtr,
+ FillContentsMode FillContents) {
+ if (Options.get(OptionBit::AddLargeAllocationSlack))
+ Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
+ Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
+ const uptr PageSize = getPageSizeCached();
+
+ // Note that cached blocks may have aligned address already. Thus we simply
+ // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
+ const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
+
+ if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
+ LargeBlock::Header *H;
+ bool Zeroed;
+ if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
+ &Zeroed)) {
+ const uptr BlockEnd = H->CommitBase + H->CommitSize;
+ if (BlockEndPtr)
+ *BlockEndPtr = BlockEnd;
+ uptr HInt = reinterpret_cast<uptr>(H);
+ if (allocatorSupportsMemoryTagging<Config>())
+ HInt = untagPointer(HInt);
+ const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
+ void *Ptr = reinterpret_cast<void *>(PtrInt);
+ if (FillContents && !Zeroed)
+ memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
+ BlockEnd - PtrInt);
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.push_back(H);
+ AllocatedBytes += H->CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, H->CommitSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
+ }
+ return Ptr;
+ }
+ }
+
+ uptr RoundedSize =
+ roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
+ if (Alignment > PageSize)
+ RoundedSize += Alignment - PageSize;
+
+ ReservedMemoryT ReservedMemory;
+ const uptr MapSize = RoundedSize + 2 * PageSize;
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
+ MAP_ALLOWNOMEM))) {
+ return nullptr;
+ }
+
+ // Take the entire ownership of reserved region.
+ MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity());
+ uptr MapBase = MemMap.getBase();
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (UNLIKELY(Alignment >= PageSize)) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ MemMap.unmap(MapBase, NewMapBase - MapBase);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd =
+ CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
+ if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
+ MemMap)) {
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ return nullptr;
+ }
+ const uptr HeaderPos = AllocPos - getHeadersSize();
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
+ if (useMemoryTagging<Config>(Options))
+ storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
+ reinterpret_cast<uptr>(H + 1));
+ H->CommitBase = CommitBase;
+ H->CommitSize = CommitSize;
+ H->MemMap = MemMap;
+ if (BlockEndPtr)
+ *BlockEndPtr = CommitBase + CommitSize;
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.push_back(H);
+ AllocatedBytes += CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
+ }
+ return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
+}
+
+template <typename Config>
+void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
+ EXCLUDES(Mutex) {
+ LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
+ const uptr CommitSize = H->CommitSize;
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.remove(H);
+ FreedBytes += CommitSize;
+ FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ Stats.sub(StatMapped, H->MemMap.getCapacity());
+ }
+ Cache.store(Options, H);
+}
+
+template <typename Config>
+void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
+ "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+ FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
+ FragmentedBytes >> 10);
+ Cache.getStats(Str);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
new file mode 100644
index 000000000000..4138885de338
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -0,0 +1,353 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
+ const uptr L = getMostSignificantSetBitIndex(Size);
+ const uptr LBits = (Size >> (L - LogBits)) - (1 << LogBits);
+ const uptr HBits = (L - ZeroLog) << LogBits;
+ return LBits + HBits;
+}
+
+template <typename Config> struct SizeClassMapBase {
+ static u16 getMaxCachedHint(uptr Size) {
+ DCHECK_NE(Size, 0);
+ u32 N;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (Config::MaxBytesCachedLog > 31 || Config::MaxSizeLog > 31)
+ N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
+ else
+ N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
+
+ // Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
+ // fit in u16.
+ return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
+ }
+};
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+// 2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+// 0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+template <typename Config>
+class FixedSizeClassMap : public SizeClassMapBase<Config> {
+ typedef SizeClassMapBase<Config> Base;
+
+ static const uptr MinSize = 1UL << Config::MinSizeLog;
+ static const uptr MidSize = 1UL << Config::MidSizeLog;
+ static const uptr MidClass = MidSize / MinSize;
+ static const u8 S = Config::NumBits - 1;
+ static const uptr M = (1UL << S) - 1;
+
+public:
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
+
+ static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
+ static const uptr NumClasses =
+ MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1;
+ static_assert(NumClasses <= 256, "");
+ static const uptr LargestClassId = NumClasses - 1;
+ static const uptr BatchClassId = 0;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ DCHECK_NE(ClassId, BatchClassId);
+ if (ClassId <= MidClass)
+ return (ClassId << Config::MinSizeLog) + Config::SizeDelta;
+ ClassId -= MidClass;
+ const uptr T = MidSize << (ClassId >> S);
+ return T + (T >> S) * (ClassId & M) + Config::SizeDelta;
+ }
+
+ static u8 getSizeLSBByClassId(uptr ClassId) {
+ return u8(getLeastSignificantSetBitIndex(getSizeByClassId(ClassId)));
+ }
+
+ static constexpr bool usesCompressedLSBFormat() { return false; }
+
+ static uptr getClassIdBySize(uptr Size) {
+ if (Size <= Config::SizeDelta + (1 << Config::MinSizeLog))
+ return 1;
+ Size -= Config::SizeDelta;
+ DCHECK_LE(Size, MaxSize);
+ if (Size <= MidSize)
+ return (Size + MinSize - 1) >> Config::MinSizeLog;
+ return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
+ }
+
+ static u16 getMaxCachedHint(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ return Base::getMaxCachedHint(Size);
+ }
+};
+
+template <typename Config>
+class TableSizeClassMap : public SizeClassMapBase<Config> {
+ typedef SizeClassMapBase<Config> Base;
+
+ static const u8 S = Config::NumBits - 1;
+ static const uptr M = (1UL << S) - 1;
+ static const uptr ClassesSize =
+ sizeof(Config::Classes) / sizeof(Config::Classes[0]);
+
+ struct SizeTable {
+ constexpr SizeTable() {
+ uptr Pos = 1 << Config::MidSizeLog;
+ uptr Inc = 1 << (Config::MidSizeLog - S);
+ for (uptr i = 0; i != getTableSize(); ++i) {
+ Pos += Inc;
+ if ((Pos & (Pos - 1)) == 0)
+ Inc *= 2;
+ Tab[i] = computeClassId(Pos + Config::SizeDelta);
+ }
+ }
+
+ constexpr static u8 computeClassId(uptr Size) {
+ for (uptr i = 0; i != ClassesSize; ++i) {
+ if (Size <= Config::Classes[i])
+ return static_cast<u8>(i + 1);
+ }
+ return static_cast<u8>(-1);
+ }
+
+ constexpr static uptr getTableSize() {
+ return (Config::MaxSizeLog - Config::MidSizeLog) << S;
+ }
+
+ u8 Tab[getTableSize()] = {};
+ };
+
+ static constexpr SizeTable SzTable = {};
+
+ struct LSBTable {
+ constexpr LSBTable() {
+ u8 Min = 255, Max = 0;
+ for (uptr I = 0; I != ClassesSize; ++I) {
+ for (u8 Bit = 0; Bit != 64; ++Bit) {
+ if (Config::Classes[I] & (1 << Bit)) {
+ Tab[I] = Bit;
+ if (Bit < Min)
+ Min = Bit;
+ if (Bit > Max)
+ Max = Bit;
+ break;
+ }
+ }
+ }
+
+ if (Max - Min > 3 || ClassesSize > 32)
+ return;
+
+ UseCompressedFormat = true;
+ CompressedMin = Min;
+ for (uptr I = 0; I != ClassesSize; ++I)
+ CompressedValue |= u64(Tab[I] - Min) << (I * 2);
+ }
+
+ u8 Tab[ClassesSize] = {};
+
+ bool UseCompressedFormat = false;
+ u8 CompressedMin = 0;
+ u64 CompressedValue = 0;
+ };
+
+ static constexpr LSBTable LTable = {};
+
+public:
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
+
+ static const uptr NumClasses = ClassesSize + 1;
+ static_assert(NumClasses < 256, "");
+ static const uptr LargestClassId = NumClasses - 1;
+ static const uptr BatchClassId = 0;
+ static const uptr MaxSize = Config::Classes[LargestClassId - 1];
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return Config::Classes[ClassId - 1];
+ }
+
+ static u8 getSizeLSBByClassId(uptr ClassId) {
+ if (LTable.UseCompressedFormat)
+ return ((LTable.CompressedValue >> ((ClassId - 1) * 2)) & 3) +
+ LTable.CompressedMin;
+ else
+ return LTable.Tab[ClassId - 1];
+ }
+
+ static constexpr bool usesCompressedLSBFormat() {
+ return LTable.UseCompressedFormat;
+ }
+
+ static uptr getClassIdBySize(uptr Size) {
+ if (Size <= Config::Classes[0])
+ return 1;
+ Size -= Config::SizeDelta;
+ DCHECK_LE(Size, MaxSize);
+ if (Size <= (1 << Config::MidSizeLog))
+ return ((Size - 1) >> Config::MinSizeLog) + 1;
+ return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
+ }
+
+ static u16 getMaxCachedHint(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ return Base::getMaxCachedHint(Size);
+ }
+};
+
+struct DefaultSizeClassConfig {
+ static const uptr NumBits = 3;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 8;
+ static const uptr MaxSizeLog = 17;
+ static const u16 MaxNumCachedHint = 14;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = 0;
+};
+
+typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
+
+struct FuchsiaSizeClassConfig {
+ static const uptr NumBits = 3;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 8;
+ static const uptr MaxSizeLog = 17;
+ static const u16 MaxNumCachedHint = 12;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = Chunk::getHeaderSize();
+};
+
+typedef FixedSizeClassMap<FuchsiaSizeClassConfig> FuchsiaSizeClassMap;
+
+struct AndroidSizeClassConfig {
+#if SCUDO_WORDSIZE == 64U
+ static const uptr NumBits = 7;
+ static const uptr MinSizeLog = 4;
+ static const uptr MidSizeLog = 6;
+ static const uptr MaxSizeLog = 16;
+ static const u16 MaxNumCachedHint = 13;
+ static const uptr MaxBytesCachedLog = 13;
+
+ static constexpr uptr Classes[] = {
+ 0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
+ 0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
+ 0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
+ 0x02d90, 0x03790, 0x04010, 0x04810, 0x05a10, 0x07310, 0x08210, 0x10010,
+ };
+ static const uptr SizeDelta = 16;
+#else
+ static const uptr NumBits = 8;
+ static const uptr MinSizeLog = 4;
+ static const uptr MidSizeLog = 7;
+ static const uptr MaxSizeLog = 16;
+ static const u16 MaxNumCachedHint = 14;
+ static const uptr MaxBytesCachedLog = 13;
+
+ static constexpr uptr Classes[] = {
+ 0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
+ 0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
+ 0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
+ 0x00330, 0x00370, 0x003a0, 0x00400, 0x00430, 0x004a0, 0x00530, 0x00610,
+ 0x00730, 0x00840, 0x00910, 0x009c0, 0x00a60, 0x00b10, 0x00ca0, 0x00e00,
+ 0x00fb0, 0x01030, 0x01130, 0x011f0, 0x01490, 0x01650, 0x01930, 0x02010,
+ 0x02190, 0x02490, 0x02850, 0x02d50, 0x03010, 0x03210, 0x03c90, 0x04090,
+ 0x04510, 0x04810, 0x05c10, 0x06f10, 0x07310, 0x08010, 0x0c010, 0x10010,
+ };
+ static const uptr SizeDelta = 16;
+#endif
+};
+
+typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
+
+#if SCUDO_WORDSIZE == 64U && defined(__clang__)
+static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
+#endif
+
+struct TrustySizeClassConfig {
+ static const uptr NumBits = 1;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 5;
+ static const uptr MaxSizeLog = 15;
+ static const u16 MaxNumCachedHint = 12;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = 0;
+};
+
+typedef FixedSizeClassMap<TrustySizeClassConfig> TrustySizeClassMap;
+
+template <typename SCMap> inline void printMap() {
+ ScopedString Buffer;
+ uptr PrevS = 0;
+ uptr TotalCached = 0;
+ for (uptr I = 0; I < SCMap::NumClasses; I++) {
+ if (I == SCMap::BatchClassId)
+ continue;
+ const uptr S = SCMap::getSizeByClassId(I);
+ const uptr D = S - PrevS;
+ const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+ const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+ const uptr Cached = SCMap::getMaxCachedHint(S) * S;
+ Buffer.append(
+ "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %u %zu; id %zu\n", I,
+ S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
+ SCMap::getClassIdBySize(S));
+ TotalCached += Cached;
+ PrevS = S;
+ }
+ Buffer.append("Total Cached: %zu\n", TotalCached);
+ Buffer.output();
+}
+
+template <typename SCMap> static UNUSED void validateMap() {
+ for (uptr C = 0; C < SCMap::NumClasses; C++) {
+ if (C == SCMap::BatchClassId)
+ continue;
+ const uptr S = SCMap::getSizeByClassId(C);
+ CHECK_NE(S, 0U);
+ CHECK_EQ(SCMap::getClassIdBySize(S), C);
+ if (C < SCMap::LargestClassId)
+ CHECK_EQ(SCMap::getClassIdBySize(S + 1), C + 1);
+ CHECK_EQ(SCMap::getClassIdBySize(S - 1), C);
+ if (C - 1 != SCMap::BatchClassId)
+ CHECK_GT(SCMap::getSizeByClassId(C), SCMap::getSizeByClassId(C - 1));
+ }
+ // Do not perform the loop if the maximum size is too large.
+ if (SCMap::MaxSize > (1 << 19))
+ return;
+ for (uptr S = 1; S <= SCMap::MaxSize; S++) {
+ const uptr C = SCMap::getClassIdBySize(S);
+ CHECK_LT(C, SCMap::NumClasses);
+ CHECK_GE(SCMap::getSizeByClassId(C), S);
+ if (C - 1 != SCMap::BatchClassId)
+ CHECK_LT(SCMap::getSizeByClassId(C - 1), S);
+ }
+}
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
new file mode 100644
index 000000000000..0176c40aa899
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -0,0 +1,208 @@
+//===-- stack_depot.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STACK_DEPOT_H_
+#define SCUDO_STACK_DEPOT_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+class MurMur2HashBuilder {
+ static const u32 M = 0x5bd1e995;
+ static const u32 Seed = 0x9747b28c;
+ static const u32 R = 24;
+ u32 H;
+
+public:
+ explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; }
+ void add(u32 K) {
+ K *= M;
+ K ^= K >> R;
+ K *= M;
+ H *= M;
+ H ^= K;
+ }
+ u32 get() {
+ u32 X = H;
+ X ^= X >> 13;
+ X *= M;
+ X ^= X >> 15;
+ return X;
+ }
+};
+
+class alignas(atomic_u64) StackDepot {
+ HybridMutex RingEndMu;
+ u32 RingEnd = 0;
+
+ // This data structure stores a stack trace for each allocation and
+ // deallocation when stack trace recording is enabled, that may be looked up
+ // using a hash of the stack trace. The lower bits of the hash are an index
+ // into the Tab array, which stores an index into the Ring array where the
+ // stack traces are stored. As the name implies, Ring is a ring buffer, so a
+ // stack trace may wrap around to the start of the array.
+ //
+ // Each stack trace in Ring is prefixed by a stack trace marker consisting of
+ // a fixed 1 bit in bit 0 (this allows disambiguation between stack frames
+ // and stack trace markers in the case where instruction pointers are 4-byte
+ // aligned, as they are on arm64), the stack trace hash in bits 1-32, and the
+ // size of the stack trace in bits 33-63.
+ //
+ // The insert() function is potentially racy in its accesses to the Tab and
+ // Ring arrays, but find() is resilient to races in the sense that, barring
+ // hash collisions, it will either return the correct stack trace or no stack
+ // trace at all, even if two instances of insert() raced with one another.
+ // This is achieved by re-checking the hash of the stack trace before
+ // returning the trace.
+
+ u32 RingSize = 0;
+ u32 RingMask = 0;
+ u32 TabMask = 0;
+ // This is immediately followed by RingSize atomic_u64 and
+ // (TabMask + 1) atomic_u32.
+
+ atomic_u64 *getRing() {
+ return reinterpret_cast<atomic_u64 *>(reinterpret_cast<char *>(this) +
+ sizeof(StackDepot));
+ }
+
+ atomic_u32 *getTab() {
+ return reinterpret_cast<atomic_u32 *>(reinterpret_cast<char *>(this) +
+ sizeof(StackDepot) +
+ sizeof(atomic_u64) * RingSize);
+ }
+
+ const atomic_u64 *getRing() const {
+ return reinterpret_cast<const atomic_u64 *>(
+ reinterpret_cast<const char *>(this) + sizeof(StackDepot));
+ }
+
+ const atomic_u32 *getTab() const {
+ return reinterpret_cast<const atomic_u32 *>(
+ reinterpret_cast<const char *>(this) + sizeof(StackDepot) +
+ sizeof(atomic_u64) * RingSize);
+ }
+
+public:
+ void init(u32 RingSz, u32 TabSz) {
+ DCHECK(isPowerOfTwo(RingSz));
+ DCHECK(isPowerOfTwo(TabSz));
+ RingSize = RingSz;
+ RingMask = RingSz - 1;
+ TabMask = TabSz - 1;
+ }
+
+ // Ensure that RingSize, RingMask and TabMask are set up in a way that
+ // all accesses are within range of BufSize.
+ bool isValid(uptr BufSize) const {
+ if (!isPowerOfTwo(RingSize))
+ return false;
+ uptr RingBytes = sizeof(atomic_u64) * RingSize;
+ if (RingMask + 1 != RingSize)
+ return false;
+
+ if (TabMask == 0)
+ return false;
+ uptr TabSize = TabMask + 1;
+ if (!isPowerOfTwo(TabSize))
+ return false;
+ uptr TabBytes = sizeof(atomic_u32) * TabSize;
+
+ // Subtract and detect underflow.
+ if (BufSize < sizeof(StackDepot))
+ return false;
+ BufSize -= sizeof(StackDepot);
+ if (BufSize < TabBytes)
+ return false;
+ BufSize -= TabBytes;
+ if (BufSize < RingBytes)
+ return false;
+ return BufSize == RingBytes;
+ }
+
+ // Insert hash of the stack trace [Begin, End) into the stack depot, and
+ // return the hash.
+ u32 insert(uptr *Begin, uptr *End) {
+ auto *Tab = getTab();
+ auto *Ring = getRing();
+
+ MurMur2HashBuilder B;
+ for (uptr *I = Begin; I != End; ++I)
+ B.add(u32(*I) >> 2);
+ u32 Hash = B.get();
+
+ u32 Pos = Hash & TabMask;
+ u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
+ u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
+ u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1;
+ if (Entry == Id)
+ return Hash;
+
+ ScopedLock Lock(RingEndMu);
+ RingPos = RingEnd;
+ atomic_store_relaxed(&Tab[Pos], RingPos);
+ atomic_store_relaxed(&Ring[RingPos], Id);
+ for (uptr *I = Begin; I != End; ++I) {
+ RingPos = (RingPos + 1) & RingMask;
+ atomic_store_relaxed(&Ring[RingPos], *I);
+ }
+ RingEnd = (RingPos + 1) & RingMask;
+ return Hash;
+ }
+
+ // Look up a stack trace by hash. Returns true if successful. The trace may be
+ // accessed via operator[] passing indexes between *RingPosPtr and
+ // *RingPosPtr + *SizePtr.
+ bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
+ auto *Tab = getTab();
+ auto *Ring = getRing();
+
+ u32 Pos = Hash & TabMask;
+ u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
+ if (RingPos >= RingSize)
+ return false;
+ u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
+ u64 HashWithTagBit = (u64(Hash) << 1) | 1;
+ if ((Entry & 0x1ffffffff) != HashWithTagBit)
+ return false;
+ u32 Size = u32(Entry >> 33);
+ if (Size >= RingSize)
+ return false;
+ *RingPosPtr = (RingPos + 1) & RingMask;
+ *SizePtr = Size;
+ MurMur2HashBuilder B;
+ for (uptr I = 0; I != Size; ++I) {
+ RingPos = (RingPos + 1) & RingMask;
+ B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2);
+ }
+ return B.get() == Hash;
+ }
+
+ u64 at(uptr RingPos) const {
+ auto *Ring = getRing();
+ return atomic_load_relaxed(&Ring[RingPos & RingMask]);
+ }
+
+ // This is done for the purpose of fork safety in multithreaded programs and
+ // does not fully disable StackDepot. In particular, find() still works and
+ // only insert() is blocked.
+ void disable() NO_THREAD_SAFETY_ANALYSIS { RingEndMu.lock(); }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS { RingEndMu.unlock(); }
+};
+
+// We need StackDepot to be aligned to 8-bytes so the ring we store after
+// is correctly assigned.
+static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0);
+
+} // namespace scudo
+
+#endif // SCUDO_STACK_DEPOT_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
new file mode 100644
index 000000000000..658b75863ade
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
@@ -0,0 +1,102 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STATS_H_
+#define SCUDO_STATS_H_
+
+#include "atomic_helpers.h"
+#include "list.h"
+#include "mutex.h"
+#include "thread_annotations.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// Memory allocator statistics
+enum StatType { StatAllocated, StatFree, StatMapped, StatCount };
+
+typedef uptr StatCounters[StatCount];
+
+// Per-thread stats, live in per-thread cache. We use atomics so that the
+// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
+// lock, because those are expensive operations , and we only care for the stats
+// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
+// LocalStats::add'ing, this is OK, we will still get a meaningful number.
+class LocalStats {
+public:
+ void init() {
+ for (uptr I = 0; I < StatCount; I++)
+ DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
+ }
+
+ void add(StatType I, uptr V) {
+ V += atomic_load_relaxed(&StatsArray[I]);
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void sub(StatType I, uptr V) {
+ V = atomic_load_relaxed(&StatsArray[I]) - V;
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
+
+ uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
+
+ LocalStats *Next = nullptr;
+ LocalStats *Prev = nullptr;
+
+private:
+ atomic_uptr StatsArray[StatCount] = {};
+};
+
+// Global stats, used for aggregation and querying.
+class GlobalStats : public LocalStats {
+public:
+ void init() { LocalStats::init(); }
+
+ void link(LocalStats *S) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ StatsList.push_back(S);
+ }
+
+ void unlink(LocalStats *S) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ StatsList.remove(S);
+ for (uptr I = 0; I < StatCount; I++)
+ add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
+ }
+
+ void get(uptr *S) const EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = LocalStats::get(static_cast<StatType>(I));
+ for (const auto &Stats : StatsList) {
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] += Stats.get(static_cast<StatType>(I));
+ }
+ // All stats must be non-negative.
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
+ }
+
+ void lock() ACQUIRE(Mutex) { Mutex.lock(); }
+ void unlock() RELEASE(Mutex) { Mutex.unlock(); }
+
+ void disable() ACQUIRE(Mutex) { lock(); }
+ void enable() RELEASE(Mutex) { unlock(); }
+
+private:
+ mutable HybridMutex Mutex;
+ DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
+};
+
+} // namespace scudo
+
+#endif // SCUDO_STATS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
new file mode 100644
index 000000000000..e584bd806e57
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -0,0 +1,241 @@
+//===-- string_utils.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_utils.h"
+#include "common.h"
+
+#include <stdarg.h>
+#include <string.h>
+
+namespace scudo {
+
+// Appends number in a given Base to buffer. If its length is less than
+// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
+// on the value of |PadWithZero|.
+void ScopedString::appendNumber(u64 AbsoluteValue, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Negative, bool Upper) {
+ constexpr uptr MaxLen = 30;
+ RAW_CHECK(Base == 10 || Base == 16);
+ RAW_CHECK(Base == 10 || !Negative);
+ RAW_CHECK(AbsoluteValue || !Negative);
+ RAW_CHECK(MinNumberLength < MaxLen);
+ if (Negative && MinNumberLength)
+ --MinNumberLength;
+ if (Negative && PadWithZero) {
+ String.push_back('-');
+ }
+ uptr NumBuffer[MaxLen];
+ int Pos = 0;
+ do {
+ RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
+ "appendNumber buffer overflow");
+ NumBuffer[Pos++] = static_cast<uptr>(AbsoluteValue % Base);
+ AbsoluteValue /= Base;
+ } while (AbsoluteValue > 0);
+ if (Pos < MinNumberLength) {
+ memset(&NumBuffer[Pos], 0,
+ sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
+ Pos = MinNumberLength;
+ }
+ RAW_CHECK(Pos > 0);
+ Pos--;
+ for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
+ char c = (PadWithZero || Pos == 0) ? '0' : ' ';
+ String.push_back(c);
+ }
+ if (Negative && !PadWithZero)
+ String.push_back('-');
+ for (; Pos >= 0; Pos--) {
+ char Digit = static_cast<char>(NumBuffer[Pos]);
+ Digit = static_cast<char>((Digit < 10) ? '0' + Digit
+ : (Upper ? 'A' : 'a') + Digit - 10);
+ String.push_back(Digit);
+ }
+}
+
+void ScopedString::appendUnsigned(u64 Num, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Upper) {
+ appendNumber(Num, Base, MinNumberLength, PadWithZero, /*Negative=*/false,
+ Upper);
+}
+
+void ScopedString::appendSignedDecimal(s64 Num, u8 MinNumberLength,
+ bool PadWithZero) {
+ const bool Negative = (Num < 0);
+ const u64 UnsignedNum = (Num == INT64_MIN)
+ ? static_cast<u64>(INT64_MAX) + 1
+ : static_cast<u64>(Negative ? -Num : Num);
+ appendNumber(UnsignedNum, 10, MinNumberLength, PadWithZero, Negative,
+ /*Upper=*/false);
+}
+
+// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
+// interpret Width == 0 as "no Width requested":
+// Width == 0 - no Width requested
+// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
+// Width > 0 - right-justify S, not implemented yet
+void ScopedString::appendString(int Width, int MaxChars, const char *S) {
+ if (!S)
+ S = "<null>";
+ int NumChars = 0;
+ for (; *S; S++) {
+ if (MaxChars >= 0 && NumChars >= MaxChars)
+ break;
+ String.push_back(*S);
+ NumChars++;
+ }
+ if (Width < 0) {
+ // Only left justification supported.
+ Width = -Width - NumChars;
+ while (Width-- > 0)
+ String.push_back(' ');
+ }
+}
+
+void ScopedString::appendPointer(u64 ptr_value) {
+ appendString(0, -1, "0x");
+ appendUnsigned(ptr_value, 16, SCUDO_POINTER_FORMAT_LENGTH,
+ /*PadWithZero=*/true,
+ /*Upper=*/false);
+}
+
+void ScopedString::vappend(const char *Format, va_list &Args) {
+ // Since the string contains the '\0' terminator, put our size before it
+ // so that push_back calls work correctly.
+ DCHECK(String.size() > 0);
+ String.resize(String.size() - 1);
+
+ static const char *PrintfFormatsHelp =
+ "Supported formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(Format);
+ const char *Cur = Format;
+ for (; *Cur; Cur++) {
+ if (*Cur != '%') {
+ String.push_back(*Cur);
+ continue;
+ }
+ Cur++;
+ const bool LeftJustified = *Cur == '-';
+ if (LeftJustified)
+ Cur++;
+ bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
+ const bool PadWithZero = (*Cur == '0');
+ u8 Width = 0;
+ if (HaveWidth) {
+ while (*Cur >= '0' && *Cur <= '9')
+ Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
+ }
+ const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
+ int Precision = -1;
+ if (HavePrecision) {
+ Cur += 2;
+ Precision = va_arg(Args, int);
+ }
+ const bool HaveZ = (*Cur == 'z');
+ Cur += HaveZ;
+ const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
+ Cur += HaveLL * 2;
+ s64 DVal;
+ u64 UVal;
+ const bool HaveLength = HaveZ || HaveLL;
+ const bool HaveFlags = HaveWidth || HaveLength;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
+ switch (*Cur) {
+ case 'd': {
+ DVal = HaveLL ? va_arg(Args, s64)
+ : HaveZ ? va_arg(Args, sptr)
+ : va_arg(Args, int);
+ appendSignedDecimal(DVal, Width, PadWithZero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ UVal = HaveLL ? va_arg(Args, u64)
+ : HaveZ ? va_arg(Args, uptr)
+ : va_arg(Args, unsigned);
+ const bool Upper = (*Cur == 'X');
+ appendUnsigned(UVal, (*Cur == 'u') ? 10 : 16, Width, PadWithZero, Upper);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ appendPointer(va_arg(Args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
+ // Only left-justified Width is supported.
+ CHECK(!HaveWidth || LeftJustified);
+ appendString(LeftJustified ? -Width : Width, Precision,
+ va_arg(Args, char *));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ String.push_back(static_cast<char>(va_arg(Args, int)));
+ break;
+ }
+ // In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
+ // However, `-Wformat` doesn't know we have a different parser for those
+ // placeholders and it keeps complaining the type mismatch on 64-bit
+ // platform which uses `ld`/`lu` for `s64`/`u64`. Therefore, in order to
+ // silence the warning, we turn to use `PRId64`/`PRIu64` for printing
+ // `s64`/`u64` and handle the `ld`/`lu` here.
+ case 'l': {
+ ++Cur;
+ RAW_CHECK(*Cur == 'd' || *Cur == 'u');
+
+ if (*Cur == 'd') {
+ DVal = va_arg(Args, s64);
+ appendSignedDecimal(DVal, Width, PadWithZero);
+ } else {
+ UVal = va_arg(Args, u64);
+ appendUnsigned(UVal, 10, Width, PadWithZero, false);
+ }
+
+ break;
+ }
+ case '%': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ String.push_back('%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, PrintfFormatsHelp);
+ }
+ }
+ }
+ String.push_back('\0');
+ if (String.back() != '\0') {
+ // String truncated, make sure the string is terminated properly.
+ // This can happen if there is no more memory when trying to resize
+ // the string.
+ String.back() = '\0';
+ }
+}
+
+void ScopedString::append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ vappend(Format, Args);
+ va_end(Args);
+}
+
+void Printf(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ ScopedString Msg;
+ Msg.vappend(Format, Args);
+ outputRaw(Msg.data());
+ va_end(Args);
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
new file mode 100644
index 000000000000..cf61e150f20e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -0,0 +1,50 @@
+//===-- string_utils.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STRING_UTILS_H_
+#define SCUDO_STRING_UTILS_H_
+
+#include "internal_defs.h"
+#include "vector.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedString {
+public:
+ explicit ScopedString() { String.push_back('\0'); }
+ uptr length() { return String.size() - 1; }
+ const char *data() { return String.data(); }
+ void clear() {
+ String.clear();
+ String.push_back('\0');
+ }
+ void vappend(const char *Format, va_list &Args);
+ void append(const char *Format, ...) FORMAT(2, 3);
+ void output() const { outputRaw(String.data()); }
+ void reserve(size_t Size) { String.reserve(Size + 1); }
+ uptr capacity() { return String.capacity() - 1; }
+
+private:
+ void appendNumber(u64 AbsoluteValue, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Negative, bool Upper);
+ void appendUnsigned(u64 Num, u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Upper);
+ void appendSignedDecimal(s64 Num, u8 MinNumberLength, bool PadWithZero);
+ void appendString(int Width, int MaxChars, const char *S);
+ void appendPointer(u64 ptr_value);
+
+ Vector<char, 256> String;
+};
+
+void Printf(const char *Format, ...) FORMAT(1, 2);
+
+} // namespace scudo
+
+#endif // SCUDO_STRING_UTILS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
new file mode 100644
index 000000000000..68a1087c2034
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
@@ -0,0 +1,70 @@
+//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_THREAD_ANNOTATIONS_
+#define SCUDO_THREAD_ANNOTATIONS_
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely ignored when compiling with other compilers.
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+
+#endif // SCUDO_THREAD_ANNOTATIONS_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
new file mode 100644
index 000000000000..59ae21d10f0f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
@@ -0,0 +1,29 @@
+//===-- timing.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "timing.h"
+
+namespace scudo {
+
+Timer::~Timer() {
+ if (Manager)
+ Manager->report(*this);
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const char *Name)
+ : Timer(Manager.getOrCreateTimer(Name)) {
+ start();
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const Timer &Nest,
+ const char *Name)
+ : Timer(Manager.nest(Nest, Name)) {
+ start();
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
new file mode 100644
index 000000000000..de741edbff5f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
@@ -0,0 +1,239 @@
+//===-- timing.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TIMING_H_
+#define SCUDO_TIMING_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+namespace scudo {
+
+class TimingManager;
+
+// A simple timer for evaluating execution time of code snippets. It can be used
+// along with TimingManager or standalone.
+class Timer {
+public:
+ // The use of Timer without binding to a TimingManager is supposed to do the
+ // timer logging manually. Otherwise, TimingManager will do the logging stuff
+ // for you.
+ Timer() = default;
+ Timer(Timer &&Other)
+ : StartTime(0), AccTime(Other.AccTime), Manager(Other.Manager),
+ HandleId(Other.HandleId) {
+ Other.Manager = nullptr;
+ }
+
+ Timer(const Timer &) = delete;
+
+ ~Timer();
+
+ void start() {
+ CHECK_EQ(StartTime, 0U);
+ StartTime = getMonotonicTime();
+ }
+ void stop() {
+ AccTime += getMonotonicTime() - StartTime;
+ StartTime = 0;
+ }
+ u64 getAccumulatedTime() const { return AccTime; }
+
+ // Unset the bound TimingManager so that we don't report the data back. This
+ // is useful if we only want to track subset of certain scope events.
+ void ignore() {
+ StartTime = 0;
+ AccTime = 0;
+ Manager = nullptr;
+ }
+
+protected:
+ friend class TimingManager;
+ Timer(TimingManager &Manager, u32 HandleId)
+ : Manager(&Manager), HandleId(HandleId) {}
+
+ u64 StartTime = 0;
+ u64 AccTime = 0;
+ TimingManager *Manager = nullptr;
+ u32 HandleId;
+};
+
+// A RAII-style wrapper for easy scope execution measurement. Note that in order
+// not to take additional space for the message like `Name`. It only works with
+// TimingManager.
+class ScopedTimer : public Timer {
+public:
+ ScopedTimer(TimingManager &Manager, const char *Name);
+ ScopedTimer(TimingManager &Manager, const Timer &Nest, const char *Name);
+ ~ScopedTimer() { stop(); }
+};
+
+// In Scudo, the execution time of single run of code snippets may not be
+// useful, we are more interested in the average time from several runs.
+// TimingManager lets the registered timer report their data and reports the
+// average execution time for each timer periodically.
+class TimingManager {
+public:
+ TimingManager(u32 PrintingInterval = DefaultPrintingInterval)
+ : PrintingInterval(PrintingInterval) {}
+ ~TimingManager() {
+ if (NumAllocatedTimers != 0)
+ printAll();
+ }
+
+ Timer getOrCreateTimer(const char *Name) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ CHECK_LT(strlen(Name), MaxLenOfTimerName);
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (strncmp(Name, Timers[I].Name, MaxLenOfTimerName) == 0)
+ return Timer(*this, I);
+ }
+
+ CHECK_LT(NumAllocatedTimers, MaxNumberOfTimers);
+ strncpy(Timers[NumAllocatedTimers].Name, Name, MaxLenOfTimerName);
+ TimerRecords[NumAllocatedTimers].AccumulatedTime = 0;
+ TimerRecords[NumAllocatedTimers].Occurrence = 0;
+ TimerRecords[NumAllocatedTimers].MaxTime = 0;
+ return Timer(*this, NumAllocatedTimers++);
+ }
+
+ // Add a sub-Timer associated with another Timer. This is used when we want to
+ // detail the execution time in the scope of a Timer.
+ // For example,
+ // void Foo() {
+ // // T1 records the time spent in both first and second tasks.
+ // ScopedTimer T1(getTimingManager(), "Task1");
+ // {
+ // // T2 records the time spent in first task
+ // ScopedTimer T2(getTimingManager, T1, "Task2");
+ // // Do first task.
+ // }
+ // // Do second task.
+ // }
+ //
+ // The report will show proper indents to indicate the nested relation like,
+ // -- Average Operation Time -- -- Name (# of Calls) --
+ // 10.0(ns) Task1 (1)
+ // 5.0(ns) Task2 (1)
+ Timer nest(const Timer &T, const char *Name) EXCLUDES(Mutex) {
+ CHECK_EQ(T.Manager, this);
+ Timer Nesting = getOrCreateTimer(Name);
+
+ ScopedLock L(Mutex);
+ CHECK_NE(Nesting.HandleId, T.HandleId);
+ Timers[Nesting.HandleId].Nesting = T.HandleId;
+ return Nesting;
+ }
+
+ void report(const Timer &T) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ const u32 HandleId = T.HandleId;
+ CHECK_LT(HandleId, MaxNumberOfTimers);
+ u64 AccTime = T.getAccumulatedTime();
+ TimerRecords[HandleId].AccumulatedTime += AccTime;
+ if (AccTime > TimerRecords[HandleId].MaxTime) {
+ TimerRecords[HandleId].MaxTime = AccTime;
+ }
+ ++TimerRecords[HandleId].Occurrence;
+ ++NumEventsReported;
+ if (NumEventsReported % PrintingInterval == 0) {
+ ScopedString Str;
+ getAllImpl(Str);
+ Str.output();
+ }
+ }
+
+ void printAll() EXCLUDES(Mutex) {
+ ScopedString Str;
+ getAll(Str);
+ Str.output();
+ }
+
+ void getAll(ScopedString &Str) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ getAllImpl(Str);
+ }
+
+private:
+ void getAllImpl(ScopedString &Str) REQUIRES(Mutex) {
+ static char AvgHeader[] = "-- Average Operation Time --";
+ static char MaxHeader[] = "-- Maximum Operation Time --";
+ static char NameHeader[] = "-- Name (# of Calls) --";
+ Str.append("%-15s %-15s %-15s\n", AvgHeader, MaxHeader, NameHeader);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (Timers[I].Nesting != MaxNumberOfTimers)
+ continue;
+ getImpl(Str, I);
+ }
+ }
+
+ void getImpl(ScopedString &Str, const u32 HandleId, const u32 ExtraIndent = 0)
+ REQUIRES(Mutex) {
+ const u64 AccumulatedTime = TimerRecords[HandleId].AccumulatedTime;
+ const u64 Occurrence = TimerRecords[HandleId].Occurrence;
+ const u64 Integral = Occurrence == 0 ? 0 : AccumulatedTime / Occurrence;
+ // Only keep single digit of fraction is enough and it enables easier layout
+ // maintenance.
+ const u64 Fraction =
+ Occurrence == 0 ? 0
+ : ((AccumulatedTime % Occurrence) * 10) / Occurrence;
+
+ // Average time.
+ Str.append("%14" PRId64 ".%" PRId64 "(ns) %-8s", Integral, Fraction, " ");
+
+ // Maximum time.
+ Str.append("%16" PRId64 "(ns) %-11s", TimerRecords[HandleId].MaxTime, " ");
+
+ // Name and num occurrences.
+ for (u32 I = 0; I < ExtraIndent; ++I)
+ Str.append("%s", " ");
+ Str.append("%s (%" PRId64 ")\n", Timers[HandleId].Name, Occurrence);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I)
+ if (Timers[I].Nesting == HandleId)
+ getImpl(Str, I, ExtraIndent + 1);
+ }
+
+ // Instead of maintaining pages for timer registration, a static buffer is
+ // sufficient for most use cases in Scudo.
+ static constexpr u32 MaxNumberOfTimers = 50;
+ static constexpr u32 MaxLenOfTimerName = 50;
+ static constexpr u32 DefaultPrintingInterval = 100;
+
+ struct Record {
+ u64 AccumulatedTime = 0;
+ u64 Occurrence = 0;
+ u64 MaxTime = 0;
+ };
+
+ struct TimerInfo {
+ char Name[MaxLenOfTimerName + 1];
+ u32 Nesting = MaxNumberOfTimers;
+ };
+
+ HybridMutex Mutex;
+ // The frequency of proactively dumping the timer statistics. For example, the
+ // default setting is to dump the statistics every 100 reported events.
+ u32 PrintingInterval GUARDED_BY(Mutex);
+ u64 NumEventsReported GUARDED_BY(Mutex) = 0;
+ u32 NumAllocatedTimers GUARDED_BY(Mutex) = 0;
+ TimerInfo Timers[MaxNumberOfTimers] GUARDED_BY(Mutex);
+ Record TimerRecords[MaxNumberOfTimers] GUARDED_BY(Mutex);
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TIMING_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
new file mode 100644
index 000000000000..26b349c6e506
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -0,0 +1,118 @@
+//===-- trusty.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_TRUSTY
+
+#include "common.h"
+#include "mutex.h"
+#include "report_linux.h"
+#include "trusty.h"
+
+#include <errno.h> // for errno
+#include <lk/err_ptr.h> // for PTR_ERR and IS_ERR
+#include <stdio.h> // for printf()
+#include <stdlib.h> // for getenv()
+#include <sys/auxv.h> // for getauxval()
+#include <time.h> // for clock_gettime()
+#include <trusty_err.h> // for lk_err_to_errno()
+#include <trusty_syscalls.h> // for _trusty_brk()
+#include <uapi/mm.h> // for MMAP flags
+
+namespace scudo {
+
+uptr getPageSize() { return getauxval(AT_PAGESZ); }
+
+void NORETURN die() { abort(); }
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ uint32_t MmapFlags =
+ MMAP_FLAG_ANONYMOUS | MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE;
+
+ // If the MAP_NOACCESS flag is set, Scudo tries to reserve
+ // a memory region without mapping physical pages. This corresponds
+ // to MMAP_FLAG_NO_PHYSICAL in Trusty.
+ if (Flags & MAP_NOACCESS)
+ MmapFlags |= MMAP_FLAG_NO_PHYSICAL;
+ if (Addr)
+ MmapFlags |= MMAP_FLAG_FIXED_NOREPLACE;
+
+ if (Flags & MAP_MEMTAG)
+ MmapFlags |= MMAP_FLAG_PROT_MTE;
+
+ void *P = (void *)_trusty_mmap(Addr, Size, MmapFlags, 0);
+
+ if (IS_ERR(P)) {
+ errno = lk_err_to_errno(PTR_ERR(P));
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(Size);
+ return nullptr;
+ }
+
+ return P;
+}
+
+void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ if (_trusty_munmap(Addr, Size) != 0)
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
+}
+
+void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, UNUSED uptr Offset,
+ UNUSED uptr Size, UNUSED MapPlatformData *Data) {}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// All mutex operations are a no-op since Trusty doesn't currently support
+// threads.
+bool HybridMutex::tryLock() { return true; }
+
+void HybridMutex::lockSlow() {}
+
+void HybridMutex::unlock() {}
+
+void HybridMutex::assertHeldImpl() {}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
+u32 getNumberOfCPUs() { return 0; }
+
+u32 getThreadID() { return 0; }
+
+bool getRandom(UNUSED void *Buffer, UNUSED uptr Length, UNUSED bool Blocking) {
+ return false;
+}
+
+void outputRaw(const char *Buffer) { printf("%s", Buffer); }
+
+void setAbortMessage(UNUSED const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_TRUSTY
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.h
new file mode 100644
index 000000000000..50edd1c6fe63
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.h
@@ -0,0 +1,24 @@
+//===-- trusty.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TRUSTY_H_
+#define SCUDO_TRUSTY_H_
+
+#include "platform.h"
+
+#if SCUDO_TRUSTY
+
+namespace scudo {
+// MapPlatformData is unused on Trusty, define it as a minimially sized
+// structure.
+struct MapPlatformData {};
+} // namespace scudo
+
+#endif // SCUDO_TRUSTY
+
+#endif // SCUDO_TRUSTY_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
new file mode 100644
index 000000000000..72773f2f72b1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
@@ -0,0 +1,90 @@
+//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+#include "thread_annotations.h"
+
+#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+#include <pthread.h>
+
+// With some build setups, this might still not be defined.
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#define PTHREAD_DESTRUCTOR_ITERATIONS 4
+#endif
+
+namespace scudo {
+
+template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
+ using ThisT = TSD<Allocator>;
+ u8 DestructorIterations = 0;
+
+ void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_EQ(DestructorIterations, 0U);
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ Instance->initCache(&Cache);
+ DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
+ }
+
+ inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
+ if (Mutex.tryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(
+ &Precedence,
+ static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+ inline void lock() NO_THREAD_SAFETY_ANALYSIS {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.lock();
+ }
+ inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
+ inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ // As the comments attached to `getCache()`, the TSD doesn't always need to be
+ // locked. In that case, we would only skip the check before we have all TSDs
+ // locked in all paths.
+ void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
+ if (SCUDO_DEBUG && !BypassCheck)
+ Mutex.assertHeld();
+ }
+
+ // Ideally, we may want to assert that all the operations on
+ // Cache/QuarantineCache always have the `Mutex` acquired. However, the
+ // current architecture of accessing TSD is not easy to cooperate with the
+ // thread-safety analysis because of pointer aliasing. So now we just add the
+ // assertion on the getters of Cache/QuarantineCache.
+ //
+ // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
+ // TSD doesn't always require holding the lock. Add this assertion while the
+ // lock is always acquired.
+ typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
+ typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
+ return QuarantineCache;
+ }
+
+private:
+ HybridMutex Mutex;
+ atomic_uptr Precedence = {};
+
+ typename Allocator::CacheT Cache GUARDED_BY(Mutex);
+ typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
new file mode 100644
index 000000000000..a58ba6505089
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -0,0 +1,198 @@
+//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_EXCLUSIVE_H_
+#define SCUDO_TSD_EXCLUSIVE_H_
+
+#include "tsd.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+struct ThreadState {
+ bool DisableMemInit : 1;
+ enum : unsigned {
+ NotInitialized = 0,
+ Initialized,
+ TornDown,
+ } InitState : 2;
+};
+
+template <class Allocator> void teardownThread(void *Ptr);
+
+template <class Allocator> struct TSDRegistryExT {
+ using ThisT = TSDRegistryExT<Allocator>;
+
+ struct ScopedTSD {
+ ALWAYS_INLINE ScopedTSD(ThisT &TSDRegistry) {
+ CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ DCHECK_NE(CurrentTSD, nullptr);
+ }
+
+ ~ScopedTSD() {
+ if (UNLIKELY(UnlockRequired))
+ CurrentTSD->unlock();
+ }
+
+ TSD<Allocator> &operator*() { return *CurrentTSD; }
+
+ TSD<Allocator> *operator->() {
+ CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
+ return CurrentTSD;
+ }
+
+ private:
+ TSD<Allocator> *CurrentTSD;
+ bool UnlockRequired;
+ };
+
+ void init(Allocator *Instance) REQUIRES(Mutex) {
+ DCHECK(!Initialized);
+ Instance->init();
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
+ FallbackTSD.init(Instance);
+ Initialized = true;
+ }
+
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ init(Instance); // Sets Initialized.
+ }
+
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
+ DCHECK(Instance);
+ if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
+ DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
+ Instance);
+ ThreadTSD.commitBack(Instance);
+ ThreadTSD = {};
+ }
+ CHECK_EQ(pthread_key_delete(PThreadKey), 0);
+ PThreadKey = {};
+ FallbackTSD.commitBack(Instance);
+ FallbackTSD = {};
+ State = {};
+ ScopedLock L(Mutex);
+ Initialized = false;
+ }
+
+ void drainCaches(Allocator *Instance) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
+ // drain the `ThreadTSD` of current thread and `FallbackTSD`.
+ Instance->drainCache(&ThreadTSD);
+ FallbackTSD.lock();
+ Instance->drainCache(&FallbackTSD);
+ FallbackTSD.unlock();
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
+ if (LIKELY(State.InitState != ThreadState::NotInitialized))
+ return;
+ initThread(Instance, MinimalInit);
+ }
+
+ // To disable the exclusive TSD registry, we effectively lock the fallback TSD
+ // and force all threads to attempt to use it instead of their local one.
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ Mutex.lock();
+ FallbackTSD.lock();
+ atomic_store(&Disabled, 1U, memory_order_release);
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ atomic_store(&Disabled, 0U, memory_order_release);
+ FallbackTSD.unlock();
+ Mutex.unlock();
+ }
+
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ThreadDisableMemInit)
+ State.DisableMemInit = Value;
+ if (O == Option::MaxTSDsCount)
+ return false;
+ return true;
+ }
+
+ bool getDisableMemInit() { return State.DisableMemInit; }
+
+ void getStats(ScopedString *Str) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
+ // printing only self `ThreadTSD` which may mislead the usage, we just skip
+ // it.
+ Str->append("Exclusive TSD don't support iterating each TSD\n");
+ }
+
+private:
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
+ if (LIKELY(State.InitState == ThreadState::Initialized &&
+ !atomic_load(&Disabled, memory_order_acquire))) {
+ *UnlockRequired = false;
+ return &ThreadTSD;
+ }
+ FallbackTSD.lock();
+ *UnlockRequired = true;
+ return &FallbackTSD;
+ }
+
+ // Using minimal initialization allows for global initialization while keeping
+ // the thread specific structure untouched. The fallback structure will be
+ // used instead.
+ NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
+ initOnceMaybe(Instance);
+ if (UNLIKELY(MinimalInit))
+ return;
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
+ ThreadTSD.init(Instance);
+ State.InitState = ThreadState::Initialized;
+ Instance->callPostInitCallback();
+ }
+
+ pthread_key_t PThreadKey = {};
+ bool Initialized GUARDED_BY(Mutex) = false;
+ atomic_u8 Disabled = {};
+ TSD<Allocator> FallbackTSD;
+ HybridMutex Mutex;
+ static thread_local ThreadState State;
+ static thread_local TSD<Allocator> ThreadTSD;
+
+ friend void teardownThread<Allocator>(void *Ptr);
+};
+
+template <class Allocator>
+thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+template <class Allocator>
+thread_local ThreadState TSDRegistryExT<Allocator>::State;
+
+template <class Allocator>
+void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
+ typedef TSDRegistryExT<Allocator> TSDRegistryT;
+ Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
+ TSDRegistryT::ThreadTSD.DestructorIterations--;
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
+ Ptr) == 0))
+ return;
+ }
+ TSDRegistryT::ThreadTSD.commitBack(Instance);
+ TSDRegistryT::State.InitState = ThreadState::TornDown;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
new file mode 100644
index 000000000000..dade16dad9f2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -0,0 +1,269 @@
+//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_SHARED_H_
+#define SCUDO_TSD_SHARED_H_
+
+#include "tsd.h"
+
+#include "string_utils.h"
+
+#if SCUDO_HAS_PLATFORM_TLS_SLOT
+// This is a platform-provided header that needs to be on the include path when
+// Scudo is compiled. It must declare a function with the prototype:
+// uintptr_t *getPlatformAllocatorTlsSlot()
+// that returns the address of a thread-local word of storage reserved for
+// Scudo, that must be zero-initialized in newly created threads.
+#include "scudo_platform_tls_slot.h"
+#endif
+
+namespace scudo {
+
+template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
+struct TSDRegistrySharedT {
+ using ThisT = TSDRegistrySharedT<Allocator, TSDsArraySize, DefaultTSDCount>;
+
+ struct ScopedTSD {
+ ALWAYS_INLINE ScopedTSD(ThisT &TSDRegistry) {
+ CurrentTSD = TSDRegistry.getTSDAndLock();
+ DCHECK_NE(CurrentTSD, nullptr);
+ }
+
+ ~ScopedTSD() { CurrentTSD->unlock(); }
+
+ TSD<Allocator> &operator*() { return *CurrentTSD; }
+
+ TSD<Allocator> *operator->() {
+ CurrentTSD->assertLocked(/*BypassCheck=*/false);
+ return CurrentTSD;
+ }
+
+ private:
+ TSD<Allocator> *CurrentTSD;
+ };
+
+ void init(Allocator *Instance) REQUIRES(Mutex) {
+ DCHECK(!Initialized);
+ Instance->init();
+ for (u32 I = 0; I < TSDsArraySize; I++)
+ TSDs[I].init(Instance);
+ const u32 NumberOfCPUs = getNumberOfCPUs();
+ setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
+ : Min(NumberOfCPUs, DefaultTSDCount));
+ Initialized = true;
+ }
+
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ init(Instance); // Sets Initialized.
+ }
+
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
+ for (u32 I = 0; I < TSDsArraySize; I++) {
+ TSDs[I].commitBack(Instance);
+ TSDs[I] = {};
+ }
+ setCurrentTSD(nullptr);
+ ScopedLock L(Mutex);
+ Initialized = false;
+ }
+
+ void drainCaches(Allocator *Instance) {
+ ScopedLock L(MutexTSDs);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ Instance->drainCache(&TSDs[I]);
+ TSDs[I].unlock();
+ }
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
+ UNUSED bool MinimalInit) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(Instance);
+ }
+
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
+ Mutex.lock();
+ for (u32 I = 0; I < TSDsArraySize; I++)
+ TSDs[I].lock();
+ }
+
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
+ TSDs[I].unlock();
+ Mutex.unlock();
+ }
+
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::MaxTSDsCount)
+ return setNumberOfTSDs(static_cast<u32>(Value));
+ if (O == Option::ThreadDisableMemInit)
+ setDisableMemInit(Value);
+ // Not supported by the TSD Registry, but not an error either.
+ return true;
+ }
+
+ bool getDisableMemInit() const { return *getTlsPtr() & 1; }
+
+ void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
+ ScopedLock L(MutexTSDs);
+
+ Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
+ TSDsArraySize);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
+ // thread annotations. However, given the TSD is only locked on shared
+ // path, do the assertion in a separate path to avoid confusing the
+ // analyzer.
+ TSDs[I].assertLocked(/*BypassCheck=*/true);
+ Str->append(" Shared TSD[%zu]:\n", I);
+ TSDs[I].getCache().getStats(Str);
+ TSDs[I].unlock();
+ }
+ }
+
+private:
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock() NO_THREAD_SAFETY_ANALYSIS {
+ TSD<Allocator> *TSD = getCurrentTSD();
+ DCHECK(TSD);
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If that fails, go down the slow path.
+ if (TSDsArraySize == 1U) {
+ // Only 1 TSD, not need to go any further.
+ // The compiler will optimize this one way or the other.
+ TSD->lock();
+ return TSD;
+ }
+ return getTSDAndLockSlow(TSD);
+ }
+
+ ALWAYS_INLINE uptr *getTlsPtr() const {
+#if SCUDO_HAS_PLATFORM_TLS_SLOT
+ return reinterpret_cast<uptr *>(getPlatformAllocatorTlsSlot());
+#else
+ static thread_local uptr ThreadTSD;
+ return &ThreadTSD;
+#endif
+ }
+
+ static_assert(alignof(TSD<Allocator>) >= 2, "");
+
+ ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+ *getTlsPtr() &= 1;
+ *getTlsPtr() |= reinterpret_cast<uptr>(CurrentTSD);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
+ return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
+ }
+
+ bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
+ ScopedLock L(MutexTSDs);
+ if (N < NumberOfTSDs)
+ return false;
+ if (N > TSDsArraySize)
+ N = TSDsArraySize;
+ NumberOfTSDs = N;
+ NumberOfCoPrimes = 0;
+ // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+ // array of TSDs in a random order. For details, see:
+ // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+ for (u32 I = 0; I < N; I++) {
+ u32 A = I + 1;
+ u32 B = N;
+ // Find the GCD between I + 1 and N. If 1, they are coprimes.
+ while (B != 0) {
+ const u32 T = A;
+ A = B;
+ B = T % B;
+ }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+ return true;
+ }
+
+ void setDisableMemInit(bool B) {
+ *getTlsPtr() &= ~1ULL;
+ *getTlsPtr() |= B;
+ }
+
+ NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
+ initOnceMaybe(Instance);
+ // Initial context assignment is done in a plain round-robin fashion.
+ const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+ Instance->callPostInitCallback();
+ }
+
+ // TSDs is an array of locks which is not supported for marking thread-safety
+ // capability.
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
+ EXCLUDES(MutexTSDs) {
+ // Use the Precedence of the current TSD as our random seed. Since we are
+ // in the slow path, it means that tryLock failed, and as a result it's
+ // very likely that said Precedence is non-zero.
+ const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
+ u32 N, Inc;
+ {
+ ScopedLock L(MutexTSDs);
+ N = NumberOfTSDs;
+ DCHECK_NE(NumberOfCoPrimes, 0U);
+ Inc = CoPrimes[R % NumberOfCoPrimes];
+ }
+ if (N > 1U) {
+ u32 Index = R % N;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ TSD<Allocator> *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, N); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= N)
+ Index -= N;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ CurrentTSD->lock();
+ return CurrentTSD;
+ }
+
+ atomic_u32 CurrentIndex = {};
+ u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
+ u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
+ u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
+ bool Initialized GUARDED_BY(Mutex) = false;
+ HybridMutex Mutex;
+ HybridMutex MutexTSDs;
+ TSD<Allocator> TSDs[TSDsArraySize];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_SHARED_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
new file mode 100644
index 000000000000..98b3db4ad698
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
@@ -0,0 +1,143 @@
+//===-- vector.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_VECTOR_H_
+#define SCUDO_VECTOR_H_
+
+#include "mem_map.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// A low-level vector based on map. It stores the contents inline up to a fixed
+// capacity, or in an external memory buffer if it grows bigger than that. May
+// incur a significant memory overhead for small vectors. The current
+// implementation supports only POD types.
+//
+// NOTE: This class is not meant to be used directly, use Vector<T> instead.
+template <typename T, size_t StaticNumEntries> class VectorNoCtor {
+public:
+ T &operator[](uptr I) {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ const T &operator[](uptr I) const {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ void push_back(const T &Element) {
+ DCHECK_LE(Size, capacity());
+ if (Size == capacity()) {
+ const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
+ if (!reallocate(NewCapacity)) {
+ return;
+ }
+ }
+ memcpy(&Data[Size++], &Element, sizeof(T));
+ }
+ T &back() {
+ DCHECK_GT(Size, 0);
+ return Data[Size - 1];
+ }
+ void pop_back() {
+ DCHECK_GT(Size, 0);
+ Size--;
+ }
+ uptr size() const { return Size; }
+ const T *data() const { return Data; }
+ T *data() { return Data; }
+ constexpr uptr capacity() const { return CapacityBytes / sizeof(T); }
+ bool reserve(uptr NewSize) {
+ // Never downsize internal buffer.
+ if (NewSize > capacity())
+ return reallocate(NewSize);
+ return true;
+ }
+ void resize(uptr NewSize) {
+ if (NewSize > Size) {
+ if (!reserve(NewSize)) {
+ return;
+ }
+ memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
+ }
+ Size = NewSize;
+ }
+
+ void clear() { Size = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const { return data(); }
+ T *begin() { return data(); }
+ const T *end() const { return data() + size(); }
+ T *end() { return data() + size(); }
+
+protected:
+ constexpr void init(uptr InitialCapacity = 0) {
+ Data = &LocalData[0];
+ CapacityBytes = sizeof(LocalData);
+ if (InitialCapacity > capacity())
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data != &LocalData[0])
+ ExternalBuffer.unmap(ExternalBuffer.getBase(),
+ ExternalBuffer.getCapacity());
+ }
+
+private:
+ bool reallocate(uptr NewCapacity) {
+ DCHECK_GT(NewCapacity, 0);
+ DCHECK_LE(Size, NewCapacity);
+
+ MemMapT NewExternalBuffer;
+ NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
+ if (!NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector",
+ MAP_ALLOWNOMEM)) {
+ return false;
+ }
+ T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
+
+ memcpy(NewExternalData, Data, Size * sizeof(T));
+ destroy();
+
+ Data = NewExternalData;
+ CapacityBytes = NewCapacity;
+ ExternalBuffer = NewExternalBuffer;
+ return true;
+ }
+
+ T *Data = nullptr;
+ uptr CapacityBytes = 0;
+ uptr Size = 0;
+
+ T LocalData[StaticNumEntries] = {};
+ MemMapT ExternalBuffer;
+};
+
+template <typename T, size_t StaticNumEntries>
+class Vector : public VectorNoCtor<T, StaticNumEntries> {
+public:
+ static_assert(StaticNumEntries > 0U,
+ "Vector must have a non-zero number of static entries.");
+ constexpr Vector() { VectorNoCtor<T, StaticNumEntries>::init(); }
+ explicit Vector(uptr Count) {
+ VectorNoCtor<T, StaticNumEntries>::init(Count);
+ this->resize(Count);
+ }
+ ~Vector() { VectorNoCtor<T, StaticNumEntries>::destroy(); }
+ // Disallow copies and moves.
+ Vector(const Vector &) = delete;
+ Vector &operator=(const Vector &) = delete;
+ Vector(Vector &&) = delete;
+ Vector &operator=(Vector &&) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_VECTOR_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
new file mode 100644
index 000000000000..60014a0f66bf
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -0,0 +1,40 @@
+//===-- wrappers_c.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+
+// Export the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+SCUDO_REQUIRE_CONSTANT_INITIALIZATION
+scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
+
+#include "wrappers_c.inc"
+
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+extern "C" INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
new file mode 100644
index 000000000000..08dc679b34ca
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
@@ -0,0 +1,62 @@
+//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_WRAPPERS_C_H_
+#define SCUDO_WRAPPERS_C_H_
+
+#include "platform.h"
+#include "stats.h"
+
+// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
+#if SCUDO_ANDROID
+typedef size_t __scudo_mallinfo_data_t;
+#else
+typedef int __scudo_mallinfo_data_t;
+#endif
+
+struct __scudo_mallinfo {
+ __scudo_mallinfo_data_t arena;
+ __scudo_mallinfo_data_t ordblks;
+ __scudo_mallinfo_data_t smblks;
+ __scudo_mallinfo_data_t hblks;
+ __scudo_mallinfo_data_t hblkhd;
+ __scudo_mallinfo_data_t usmblks;
+ __scudo_mallinfo_data_t fsmblks;
+ __scudo_mallinfo_data_t uordblks;
+ __scudo_mallinfo_data_t fordblks;
+ __scudo_mallinfo_data_t keepcost;
+};
+
+struct __scudo_mallinfo2 {
+ size_t arena;
+ size_t ordblks;
+ size_t smblks;
+ size_t hblks;
+ size_t hblkhd;
+ size_t usmblks;
+ size_t fsmblks;
+ size_t uordblks;
+ size_t fordblks;
+ size_t keepcost;
+};
+
+// Android sometimes includes malloc.h no matter what, which yields to
+// conflicting return types for mallinfo() if we use our own structure. So if
+// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
+#if STRUCT_MALLINFO_DECLARED
+#define SCUDO_MALLINFO mallinfo
+#else
+#define SCUDO_MALLINFO __scudo_mallinfo
+#endif
+
+#if !SCUDO_ANDROID || !_BIONIC
+extern "C" void malloc_postinit();
+extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
+#endif
+
+#endif // SCUDO_WRAPPERS_C_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
new file mode 100644
index 000000000000..59f3fb0962f8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -0,0 +1,377 @@
+//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PREFIX
+#error "Define SCUDO_PREFIX prior to including this file!"
+#endif
+
+// malloc-type functions have to be aligned to std::max_align_t. This is
+// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
+// do not have to abide by the same requirement.
+#ifndef SCUDO_MALLOC_ALIGNMENT
+#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
+#endif
+
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
+ DCHECK_NE(new_ptr, nullptr);
+
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_allocate_hook)
+ __scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
+ else if (__scudo_allocate_hook)
+ __scudo_allocate_hook(new_ptr, size);
+ }
+}
+static void reportReallocDeallocation(void *old_ptr) {
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_deallocate_hook)
+ __scudo_realloc_deallocate_hook(old_ptr);
+ else if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(old_ptr);
+ }
+}
+
+extern "C" {
+
+INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
+ scudo::uptr Product;
+ if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportCallocOverflow(nmemb, size);
+ }
+ void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT, true);
+ reportAllocation(Ptr, Product);
+ return scudo::setErrnoOnNull(Ptr);
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ reportDeallocation(ptr);
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+}
+
+INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
+ struct SCUDO_MALLINFO Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ // Space allocated in mmapped regions (bytes)
+ Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
+ // Maximum total allocated space (bytes)
+ Info.usmblks = Info.hblkhd;
+ // Space in freed fastbin blocks (bytes)
+ Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
+ // Total allocated space (bytes)
+ Info.uordblks =
+ static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+ // Total free space (bytes)
+ Info.fordblks = Info.fsmblks;
+ return Info;
+}
+
+// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
+#if !SCUDO_ANDROID
+INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
+ struct __scudo_mallinfo2 Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ // Space allocated in mmapped regions (bytes)
+ Info.hblkhd = Stats[scudo::StatMapped];
+ // Maximum total allocated space (bytes)
+ Info.usmblks = Info.hblkhd;
+ // Space in freed fastbin blocks (bytes)
+ Info.fsmblks = Stats[scudo::StatFree];
+ // Total allocated space (bytes)
+ Info.uordblks = Stats[scudo::StatAllocated];
+ // Total free space (bytes)
+ Info.fordblks = Info.fsmblks;
+ return Info;
+}
+#endif
+
+INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
+}
+
+#if SCUDO_ANDROID
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
+#else
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
+#endif
+ return SCUDO_ALLOCATOR.getUsableSize(ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
+ // Android rounds up the alignment to a power of two if it isn't one.
+ if (SCUDO_ANDROID) {
+ if (UNLIKELY(!alignment)) {
+ alignment = 1U;
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
+ alignment = scudo::roundUpPowerOfTwo(alignment);
+ }
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportAlignmentNotPowerOfTwo(alignment);
+ }
+ }
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
+ if (!SCUDO_ALLOCATOR.canReturnNull())
+ scudo::reportInvalidPosixMemalignAlignment(alignment);
+ return EINVAL;
+ }
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ reportAllocation(Ptr, size);
+
+ *memptr = Ptr;
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportPvallocOverflow(size);
+ }
+ // pvalloc(0) should allocate one page.
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize);
+ reportAllocation(Ptr, scudo::roundUp(size, PageSize));
+
+ return scudo::setErrnoOnNull(Ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
+ if (!ptr) {
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
+ }
+ if (size == 0) {
+ reportDeallocation(ptr);
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+ return nullptr;
+ }
+
+ // Given that the reporting of deallocation and allocation are not atomic, we
+ // always pretend the old pointer will be released so that the user doesn't
+ // need to worry about the false double-use case from the view of hooks.
+ //
+ // For example, assume that `realloc` releases the old pointer and allocates a
+ // new pointer. Before the reporting of both operations has been done, another
+ // thread may get the old pointer from `malloc`. It may be misinterpreted as
+ // double-use if it's not handled properly on the hook side.
+ reportReallocDeallocation(ptr);
+ void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
+ if (NewPtr != nullptr) {
+ // Note that even if NewPtr == ptr, the size has changed. We still need to
+ // report the new size.
+ reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
+ } else {
+ // If `realloc` fails, the old pointer is not released. Report the old
+ // pointer as allocated again.
+ reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
+ SCUDO_ALLOCATOR.getAllocSize(ptr));
+ }
+
+ return scudo::setErrnoOnNull(NewPtr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ scudo::getPageSizeCached());
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
+ uintptr_t base, size_t size,
+ void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
+ SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
+ return 0;
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
+ SCUDO_ALLOCATOR.disable();
+}
+
+void SCUDO_PREFIX(malloc_postinit)() {
+ SCUDO_ALLOCATOR.initGwpAsan();
+ pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
+ SCUDO_PREFIX(malloc_enable));
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
+ if (param == M_DECAY_TIME) {
+ if (SCUDO_ANDROID) {
+ // Before changing the interval, reset the memory usage status by doing a
+ // M_PURGE call so that we can minimize the impact of any unreleased pages
+ // introduced by interval transition.
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
+
+ // The values allowed on Android are {-1, 0, 1}. "1" means the longest
+ // interval.
+ CHECK(value >= -1 && value <= 1);
+ if (value == 1)
+ value = INT32_MAX;
+ }
+
+ SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
+ static_cast<scudo::sptr>(value));
+ return 1;
+ } else if (param == M_PURGE) {
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
+ return 1;
+ } else if (param == M_PURGE_ALL) {
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
+ return 1;
+ } else if (param == M_LOG_STATS) {
+ SCUDO_ALLOCATOR.printStats();
+ SCUDO_ALLOCATOR.printFragmentationInfo();
+ return 1;
+ } else {
+ scudo::Option option;
+ switch (param) {
+ case M_MEMTAG_TUNING:
+ option = scudo::Option::MemtagTuning;
+ break;
+ case M_THREAD_DISABLE_MEM_INIT:
+ option = scudo::Option::ThreadDisableMemInit;
+ break;
+ case M_CACHE_COUNT_MAX:
+ option = scudo::Option::MaxCacheEntriesCount;
+ break;
+ case M_CACHE_SIZE_MAX:
+ option = scudo::Option::MaxCacheEntrySize;
+ break;
+ case M_TSDS_COUNT_MAX:
+ option = scudo::Option::MaxTSDsCount;
+ break;
+ default:
+ return 0;
+ }
+ return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
+ }
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportInvalidAlignedAllocAlignment(alignment, size);
+ }
+
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
+ const scudo::uptr max_size =
+ decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
+ auto *sizes = static_cast<scudo::uptr *>(
+ SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
+ auto callback = [](uintptr_t, size_t size, void *arg) {
+ auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
+ if (size < max_size)
+ sizes[size]++;
+ };
+
+ SCUDO_ALLOCATOR.disable();
+ SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
+ SCUDO_ALLOCATOR.enable();
+
+ fputs("<malloc version=\"scudo-1\">\n", stream);
+ for (scudo::uptr i = 0; i != max_size; ++i)
+ if (sizes[i])
+ fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
+ fputs("</malloc>\n", stream);
+ SCUDO_PREFIX(free)(sizes);
+ return 0;
+}
+
+// Disable memory tagging for the heap. The caller must disable memory tag
+// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
+// function, and may not re-enable them after calling the function.
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
+ SCUDO_ALLOCATOR.disableMemoryTagging();
+}
+
+// Sets whether scudo records stack traces and other metadata for allocations
+// and deallocations. This function only has an effect if the allocator and
+// hardware support memory tagging.
+INTERFACE WEAK void
+SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
+ SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
+}
+
+// Sets whether scudo zero-initializes all allocated memory.
+INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
+ SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
+ : scudo::NoFill);
+}
+
+// Sets whether scudo pattern-initializes all allocated memory.
+INTERFACE WEAK void
+SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
+ SCUDO_ALLOCATOR.setFillContents(
+ pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
+}
+
+// Sets whether scudo adds a small amount of slack at the end of large
+// allocations, before the guard page. This can be enabled to work around buggy
+// applications that read a few bytes past the end of their allocation.
+INTERFACE WEAK void
+SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
+ SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
+}
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
new file mode 100644
index 000000000000..e9d8c1e8d3db
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -0,0 +1,75 @@
+//===-- wrappers_c_bionic.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// This is only used when compiled as part of Bionic.
+#if SCUDO_ANDROID && _BIONIC
+
+#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+// Regular MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
+#define SCUDO_ALLOCATOR Allocator
+
+extern "C" void SCUDO_PREFIX(malloc_postinit)();
+SCUDO_REQUIRE_CONSTANT_INITIALIZATION
+static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
+ SCUDO_ALLOCATOR;
+
+#include "wrappers_c.inc"
+
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// TODO(kostyak): support both allocators.
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+INTERFACE void __scudo_get_error_info(
+ struct scudo_error_info *error_info, uintptr_t fault_addr,
+ const char *stack_depot, size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size, const char *memory,
+ const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
+ Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
+ region_info, ring_buffer, ring_buffer_size, memory,
+ memory_tags, memory_addr, memory_size);
+}
+
+INTERFACE const char *__scudo_get_stack_depot_addr() {
+ return Allocator.getStackDepotAddress();
+}
+
+INTERFACE size_t __scudo_get_stack_depot_size() {
+ return Allocator.getStackDepotSize();
+}
+
+INTERFACE const char *__scudo_get_region_info_addr() {
+ return Allocator.getRegionInfoArrayAddress();
+}
+
+INTERFACE size_t __scudo_get_region_info_size() {
+ return Allocator.getRegionInfoArraySize();
+}
+
+INTERFACE const char *__scudo_get_ring_buffer_addr() {
+ return Allocator.getRingBufferAddress();
+}
+
+INTERFACE size_t __scudo_get_ring_buffer_size() {
+ return Allocator.getRingBufferSize();
+}
+
+#endif // SCUDO_ANDROID && _BIONIC
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
new file mode 100644
index 000000000000..d0288699cf1b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -0,0 +1,70 @@
+//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKS_H_
+#define SCUDO_CHECKS_H_
+
+#include "common.h"
+
+#include <errno.h>
+
+#ifndef __has_builtin
+#define __has_builtin(X) 0
+#endif
+
+namespace scudo {
+
+// A common errno setting logic shared by almost all Scudo C wrappers.
+inline void *setErrnoOnNull(void *Ptr) {
+ if (UNLIKELY(!Ptr))
+ errno = ENOMEM;
+ return Ptr;
+}
+
+// Checks return true on failure.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment.
+inline bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+ return !isPowerOfTwo(Alignment) || !isAligned(Size, Alignment);
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+inline bool checkPosixMemalignAlignment(uptr Alignment) {
+ return !isPowerOfTwo(Alignment) || !isAligned(Alignment, sizeof(void *));
+}
+
+// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
+// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
+// costly division.
+inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+#if __has_builtin(__builtin_umull_overflow) && (SCUDO_WORDSIZE == 64U)
+ return __builtin_umull_overflow(Size, N,
+ reinterpret_cast<unsigned long *>(Product));
+#elif __has_builtin(__builtin_umul_overflow) && (SCUDO_WORDSIZE == 32U)
+ // On, e.g. armv7, uptr/uintptr_t may be defined as unsigned long
+ return __builtin_umul_overflow(Size, N,
+ reinterpret_cast<unsigned int *>(Product));
+#else
+ *Product = Size * N;
+ if (!Size)
+ return false;
+ return (*Product / Size) != N;
+#endif
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of PageSize.
+inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+ return roundUp(Size, PageSize) < Size;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
new file mode 100644
index 000000000000..098d4f71acc4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -0,0 +1,150 @@
+//===-- wrappers_cpp.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
+#include "wrappers_c.h"
+
+#include <stdint.h>
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+
+INTERFACE WEAK void *operator new(size_t size) {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new[](size_t size) {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new(size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new[](size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
+}
+
+INTERFACE WEAK void operator delete(void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+}
+INTERFACE WEAK void operator delete(void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
+ Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
+ static_cast<scudo::uptr>(align));
+}
+
+#endif // !SCUDO_ANDROID || !_BIONIC