diff options
Diffstat (limited to 'compiler-rt/lib/scudo/standalone/combined.h')
-rw-r--r-- | compiler-rt/lib/scudo/standalone/combined.h | 472 |
1 files changed, 426 insertions, 46 deletions
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index a0b4b2973e96..3bb41eca88f7 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -13,28 +13,36 @@ #include "common.h" #include "flags.h" #include "flags_parser.h" -#include "interface.h" #include "local_cache.h" +#include "memtag.h" #include "quarantine.h" #include "report.h" #include "secondary.h" +#include "stack_depot.h" #include "string_utils.h" #include "tsd.h" +#include "scudo/interface.h" + #ifdef GWP_ASAN_HOOKS #include "gwp_asan/guarded_pool_allocator.h" -// GWP-ASan is declared here in order to avoid indirect call overhead. It's also -// instantiated outside of the Allocator class, as the allocator is only -// zero-initialised. GWP-ASan requires constant initialisation, and the Scudo -// allocator doesn't have a constexpr constructor (see discussion here: -// https://reviews.llvm.org/D69265#inline-624315). -static gwp_asan::GuardedPoolAllocator GuardedAlloc; +#include "gwp_asan/optional/backtrace.h" +#include "gwp_asan/optional/segv_handler.h" #endif // GWP_ASAN_HOOKS extern "C" inline void EmptyCallback() {} +#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE +// This function is not part of the NDK so it does not appear in any public +// header files. We only declare/use it when targeting the platform. +extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf, + size_t num_entries); +#endif + namespace scudo { +enum class Option { ReleaseInterval }; + template <class Params, void (*PostInitCallback)(void) = EmptyCallback> class Allocator { public: @@ -139,20 +147,29 @@ public: // Store some flags locally. Options.MayReturnNull = getFlags()->may_return_null; - Options.ZeroContents = getFlags()->zero_contents; + Options.FillContents = + getFlags()->zero_contents + ? ZeroFill + : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill); Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; + Options.TrackAllocationStacks = false; Options.QuarantineMaxChunkSize = static_cast<u32>(getFlags()->quarantine_max_chunk_size); Stats.initLinkerInitialized(); - Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms); - Secondary.initLinkerInitialized(&Stats); + const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms; + Primary.initLinkerInitialized(ReleaseToOsIntervalMs); + Secondary.initLinkerInitialized(&Stats, ReleaseToOsIntervalMs); Quarantine.init( static_cast<uptr>(getFlags()->quarantine_size_kb << 10), static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10)); + } + // Initialize the embedded GWP-ASan instance. Requires the main allocator to + // be functional, best called from PostInitCallback. + void initGwpAsan() { #ifdef GWP_ASAN_HOOKS gwp_asan::options::Options Opt; Opt.Enabled = getFlags()->GWP_ASAN_Enabled; @@ -165,8 +182,17 @@ public: getFlags()->GWP_ASAN_MaxSimultaneousAllocations; Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate; Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers; - Opt.Printf = Printf; + // Embedded GWP-ASan is locked through the Scudo atfork handler (via + // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork + // handler. + Opt.InstallForkHandlers = false; + Opt.Backtrace = gwp_asan::options::getBacktraceFunction(); GuardedAlloc.init(Opt); + + if (Opt.InstallSignalHandlers) + gwp_asan::crash_handler::installSignalHandlers( + &GuardedAlloc, Printf, gwp_asan::options::getPrintBacktraceFunction(), + Opt.Backtrace); #endif // GWP_ASAN_HOOKS } @@ -175,6 +201,11 @@ public: void unmapTestOnly() { TSDRegistry.unmapTestOnly(); Primary.unmapTestOnly(); +#ifdef GWP_ASAN_HOOKS + if (getFlags()->GWP_ASAN_InstallSignalHandlers) + gwp_asan::crash_handler::uninstallSignalHandlers(); + GuardedAlloc.uninitTestOnly(); +#endif // GWP_ASAN_HOOKS } TSDRegistryT *getTSDRegistry() { return &TSDRegistry; } @@ -195,6 +226,27 @@ public: TSD->Cache.destroy(&Stats); } + ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) { + if (Primary.SupportsMemoryTagging) + return reinterpret_cast<void *>( + untagPointer(reinterpret_cast<uptr>(Ptr))); + return Ptr; + } + + NOINLINE u32 collectStackTrace() { +#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE + // Discard collectStackTrace() frame and allocator function frame. + constexpr uptr DiscardFrames = 2; + uptr Stack[MaxTraceSize + DiscardFrames]; + uptr Size = + android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames); + Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames); + return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size); +#else + return 0; +#endif + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -207,7 +259,8 @@ public: } #endif // GWP_ASAN_HOOKS - ZeroContents |= static_cast<bool>(Options.ZeroContents); + FillContentsMode FillContents = + ZeroContents ? ZeroFill : Options.FillContents; if (UNLIKELY(Alignment > MaxAlignment)) { if (Options.MayReturnNull) @@ -235,22 +288,36 @@ public: } DCHECK_LE(Size, NeededSize); - void *Block; - uptr ClassId; - uptr BlockEnd; + void *Block = nullptr; + uptr ClassId = 0; + uptr SecondaryBlockEnd; if (LIKELY(PrimaryT::canAllocate(NeededSize))) { ClassId = SizeClassMap::getClassIdBySize(NeededSize); DCHECK_NE(ClassId, 0U); bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); Block = TSD->Cache.allocate(ClassId); + // If the allocation failed, the most likely reason with a 32-bit primary + // is the region being full. In that event, retry in each successively + // larger class until it fits. If it fails to fit in the largest class, + // fallback to the Secondary. + if (UNLIKELY(!Block)) { + while (ClassId < SizeClassMap::LargestClassId) { + Block = TSD->Cache.allocate(++ClassId); + if (LIKELY(Block)) { + break; + } + } + if (UNLIKELY(!Block)) { + ClassId = 0; + } + } if (UnlockRequired) TSD->unlock(); - } else { - ClassId = 0; - Block = - Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents); } + if (UNLIKELY(ClassId == 0)) + Block = Secondary.allocate(NeededSize, Alignment, &SecondaryBlockEnd, + FillContents); if (UNLIKELY(!Block)) { if (Options.MayReturnNull) @@ -258,16 +325,88 @@ public: reportOutOfMemory(NeededSize); } - // We only need to zero the contents for Primary backed allocations. This - // condition is not necessarily unlikely, but since memset is costly, we - // might as well mark it as such. - if (UNLIKELY(ZeroContents && ClassId)) - memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); - - const uptr UnalignedUserPtr = - reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(); + const uptr BlockUptr = reinterpret_cast<uptr>(Block); + const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize(); const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment); + void *Ptr = reinterpret_cast<void *>(UserPtr); + void *TaggedPtr = Ptr; + if (ClassId) { + // We only need to zero or tag the contents for Primary backed + // allocations. We only set tags for primary allocations in order to avoid + // faulting potentially large numbers of pages for large secondary + // allocations. We assume that guard pages are enough to protect these + // allocations. + // + // FIXME: When the kernel provides a way to set the background tag of a + // mapping, we should be able to tag secondary allocations as well. + // + // When memory tagging is enabled, zeroing the contents is done as part of + // setting the tag. + if (UNLIKELY(useMemoryTagging())) { + uptr PrevUserPtr; + Chunk::UnpackedHeader Header; + const uptr BlockEnd = BlockUptr + PrimaryT::getSizeByClassId(ClassId); + // If possible, try to reuse the UAF tag that was set by deallocate(). + // For simplicity, only reuse tags if we have the same start address as + // the previous allocation. This handles the majority of cases since + // most allocations will not be more aligned than the minimum alignment. + // + // We need to handle situations involving reclaimed chunks, and retag + // the reclaimed portions if necessary. In the case where the chunk is + // fully reclaimed, the chunk's header will be zero, which will trigger + // the code path for new mappings and invalid chunks that prepares the + // chunk from scratch. There are three possibilities for partial + // reclaiming: + // + // (1) Header was reclaimed, data was partially reclaimed. + // (2) Header was not reclaimed, all data was reclaimed (e.g. because + // data started on a page boundary). + // (3) Header was not reclaimed, data was partially reclaimed. + // + // Case (1) will be handled in the same way as for full reclaiming, + // since the header will be zero. + // + // We can detect case (2) by loading the tag from the start + // of the chunk. If it is zero, it means that either all data was + // reclaimed (since we never use zero as the chunk tag), or that the + // previous allocation was of size zero. Either way, we need to prepare + // a new chunk from scratch. + // + // We can detect case (3) by moving to the next page (if covered by the + // chunk) and loading the tag of its first granule. If it is zero, it + // means that all following pages may need to be retagged. On the other + // hand, if it is nonzero, we can assume that all following pages are + // still tagged, according to the logic that if any of the pages + // following the next page were reclaimed, the next page would have been + // reclaimed as well. + uptr TaggedUserPtr; + if (getChunkFromBlock(BlockUptr, &PrevUserPtr, &Header) && + PrevUserPtr == UserPtr && + (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) { + uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes; + const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached()); + if (NextPage < PrevEnd && loadTag(NextPage) != NextPage) + PrevEnd = NextPage; + TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr); + resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd); + if (Size) { + // Clear any stack metadata that may have previously been stored in + // the chunk data. + memset(TaggedPtr, 0, archMemoryTagGranuleSize()); + } + } else { + TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd); + } + storeAllocationStackMaybe(Ptr); + } else if (UNLIKELY(FillContents != NoFill)) { + // This condition is not necessarily unlikely, but since memset is + // costly, we might as well mark it as such. + memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte, + PrimaryT::getSizeByClassId(ClassId)); + } + } + Chunk::UnpackedHeader Header = {}; if (UNLIKELY(UnalignedUserPtr != UserPtr)) { const uptr Offset = UserPtr - UnalignedUserPtr; @@ -283,15 +422,15 @@ public: Header.ClassId = ClassId & Chunk::ClassIdMask; Header.State = Chunk::State::Allocated; Header.Origin = Origin & Chunk::OriginMask; - Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) & - Chunk::SizeOrUnusedBytesMask; - void *Ptr = reinterpret_cast<void *>(UserPtr); + Header.SizeOrUnusedBytes = + (ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) & + Chunk::SizeOrUnusedBytesMask; Chunk::storeHeader(Cookie, Ptr, &Header); if (&__scudo_allocate_hook) - __scudo_allocate_hook(Ptr, Size); + __scudo_allocate_hook(TaggedPtr, Size); - return Ptr; + return TaggedPtr; } NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0, @@ -319,6 +458,8 @@ public: if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))) reportMisalignedPointer(AllocatorAction::Deallocating, Ptr); + Ptr = untagPointerMaybe(Ptr); + Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); @@ -346,6 +487,15 @@ public: void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) { initThreadMaybe(); + if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) { + if (Options.MayReturnNull) + return nullptr; + reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize); + } + + void *OldTaggedPtr = OldPtr; + OldPtr = untagPointerMaybe(OldPtr); + // The following cases are handled by the C wrappers. DCHECK_NE(OldPtr, nullptr); DCHECK_NE(NewSize, 0); @@ -396,16 +546,20 @@ public: // reasonable delta), we just keep the old block, and update the chunk // header to reflect the size change. if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) { - const uptr Delta = - OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize; - if (Delta <= SizeClassMap::MaxSize / 2) { + if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) { Chunk::UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = (ClassId ? NewSize : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); - return OldPtr; + if (UNLIKELY(ClassId && useMemoryTagging())) { + resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize, + reinterpret_cast<uptr>(OldTaggedPtr) + NewSize, + BlockEnd); + storeAllocationStackMaybe(OldPtr); + } + return OldTaggedPtr; } } @@ -416,7 +570,7 @@ public: void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment); if (NewPtr) { const uptr OldSize = getSize(OldPtr, &OldHeader); - memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); + memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); } return NewPtr; @@ -427,6 +581,9 @@ public: // this function finishes. We will revisit that later. void disable() { initThreadMaybe(); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.disable(); +#endif TSDRegistry.disable(); Stats.disable(); Quarantine.disable(); @@ -441,6 +598,9 @@ public: Quarantine.enable(); Stats.enable(); TSDRegistry.enable(); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.enable(); +#endif } // The function returns the amount of bytes required to store the statistics, @@ -473,6 +633,7 @@ public: void releaseToOS() { initThreadMaybe(); Primary.releaseToOS(); + Secondary.releaseToOS(); } // Iterate over all chunks and call a callback for all busy chunks located @@ -489,11 +650,19 @@ public: uptr Chunk; Chunk::UnpackedHeader Header; if (getChunkFromBlock(Block, &Chunk, &Header) && - Header.State == Chunk::State::Allocated) - Callback(Chunk, getSize(reinterpret_cast<void *>(Chunk), &Header), Arg); + Header.State == Chunk::State::Allocated) { + uptr TaggedChunk = Chunk; + if (useMemoryTagging()) + TaggedChunk = loadTag(Chunk); + Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header), + Arg); + } }; Primary.iterateOverBlocks(Lambda); Secondary.iterateOverBlocks(Lambda); +#ifdef GWP_ASAN_HOOKS + GuardedAlloc.iterate(reinterpret_cast<void *>(Base), Size, Callback, Arg); +#endif } bool canReturnNull() { @@ -501,8 +670,14 @@ public: return Options.MayReturnNull; } - // TODO(kostyak): implement this as a "backend" to mallopt. - bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; } + bool setOption(Option O, sptr Value) { + if (O == Option::ReleaseInterval) { + Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value)); + Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value)); + return true; + } + return false; + } // Return the usable size for a given chunk. Technically we lie, as we just // report the actual size of a chunk. This is done to counteract code actively @@ -519,6 +694,7 @@ public: return GuardedAlloc.getSize(Ptr); #endif // GWP_ASAN_HOOKS + Ptr = untagPointerMaybe(const_cast<void *>(Ptr)); Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); // Getting the usable size of a chunk only makes sense if it's allocated. @@ -543,11 +719,151 @@ public: #endif // GWP_ASAN_HOOKS if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)) return false; + Ptr = untagPointerMaybe(const_cast<void *>(Ptr)); Chunk::UnpackedHeader Header; return Chunk::isValid(Cookie, Ptr, &Header) && Header.State == Chunk::State::Allocated; } + bool useMemoryTagging() { return Primary.useMemoryTagging(); } + + void disableMemoryTagging() { Primary.disableMemoryTagging(); } + + void setTrackAllocationStacks(bool Track) { + initThreadMaybe(); + Options.TrackAllocationStacks = Track; + } + + void setFillContents(FillContentsMode FillContents) { + initThreadMaybe(); + Options.FillContents = FillContents; + } + + const char *getStackDepotAddress() const { + return reinterpret_cast<const char *>(&Depot); + } + + const char *getRegionInfoArrayAddress() const { + return Primary.getRegionInfoArrayAddress(); + } + + static uptr getRegionInfoArraySize() { + return PrimaryT::getRegionInfoArraySize(); + } + + static void getErrorInfo(struct scudo_error_info *ErrorInfo, + uintptr_t FaultAddr, const char *DepotPtr, + const char *RegionInfoPtr, const char *Memory, + const char *MemoryTags, uintptr_t MemoryAddr, + size_t MemorySize) { + *ErrorInfo = {}; + if (!PrimaryT::SupportsMemoryTagging || + MemoryAddr + MemorySize < MemoryAddr) + return; + + uptr UntaggedFaultAddr = untagPointer(FaultAddr); + u8 FaultAddrTag = extractTag(FaultAddr); + BlockInfo Info = + PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr); + + auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool { + if (Addr < MemoryAddr || + Addr + archMemoryTagGranuleSize() < Addr || + Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize) + return false; + *Data = &Memory[Addr - MemoryAddr]; + *Tag = static_cast<u8>( + MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]); + return true; + }; + + auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr, + Chunk::UnpackedHeader *Header, const u32 **Data, + u8 *Tag) { + const char *BlockBegin; + u8 BlockBeginTag; + if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag)) + return false; + uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin); + *ChunkAddr = Addr + ChunkOffset; + + const char *ChunkBegin; + if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag)) + return false; + *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>( + ChunkBegin - Chunk::getHeaderSize()); + *Data = reinterpret_cast<const u32 *>(ChunkBegin); + return true; + }; + + auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr); + + auto MaybeCollectTrace = [&](uintptr_t(&Trace)[MaxTraceSize], u32 Hash) { + uptr RingPos, Size; + if (!Depot->find(Hash, &RingPos, &Size)) + return; + for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I) + Trace[I] = (*Depot)[RingPos + I]; + }; + + size_t NextErrorReport = 0; + + // First, check for UAF. + { + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (ReadBlock(Info.BlockBegin, &ChunkAddr, &Header, &Data, &Tag) && + Header.State != Chunk::State::Allocated && + Data[MemTagPrevTagIndex] == FaultAddrTag) { + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = USE_AFTER_FREE; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, + Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + MaybeCollectTrace(R->deallocation_trace, + Data[MemTagDeallocationTraceIndex]); + R->deallocation_tid = Data[MemTagDeallocationTidIndex]; + } + } + + auto CheckOOB = [&](uptr BlockAddr) { + if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd) + return false; + + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) || + Header.State != Chunk::State::Allocated || Tag != FaultAddrTag) + return false; + + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = + UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + return NextErrorReport == + sizeof(ErrorInfo->reports) / sizeof(ErrorInfo->reports[0]); + }; + + if (CheckOOB(Info.BlockBegin)) + return; + + // Check for OOB in the 30 surrounding blocks. Beyond that we are likely to + // hit false positives. + for (int I = 1; I != 16; ++I) + if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) || + CheckOOB(Info.BlockBegin - I * Info.BlockSize)) + return; + } + private: using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; @@ -561,9 +877,32 @@ private: static_assert(MinAlignment >= sizeof(Chunk::PackedHeader), "Minimal alignment must at least cover a chunk header."); + static_assert(!PrimaryT::SupportsMemoryTagging || + MinAlignment >= archMemoryTagGranuleSize(), + ""); static const u32 BlockMarker = 0x44554353U; + // These are indexes into an "array" of 32-bit values that store information + // inline with a chunk that is relevant to diagnosing memory tag faults, where + // 0 corresponds to the address of the user memory. This means that negative + // indexes may be used to store information about allocations, while positive + // indexes may only be used to store information about deallocations, because + // the user memory is in use until it has been deallocated. The smallest index + // that may be used is -2, which corresponds to 8 bytes before the user + // memory, because the chunk header size is 8 bytes and in allocators that + // support memory tagging the minimum alignment is at least the tag granule + // size (16 on aarch64), and the largest index that may be used is 3 because + // we are only guaranteed to have at least a granule's worth of space in the + // user memory. + static const sptr MemTagAllocationTraceIndex = -2; + static const sptr MemTagAllocationTidIndex = -1; + static const sptr MemTagDeallocationTraceIndex = 0; + static const sptr MemTagDeallocationTidIndex = 1; + static const sptr MemTagPrevTagIndex = 2; + + static const uptr MaxTraceSize = 64; + GlobalStats Stats; TSDRegistryT TSDRegistry; PrimaryT Primary; @@ -574,12 +913,19 @@ private: struct { u8 MayReturnNull : 1; // may_return_null - u8 ZeroContents : 1; // zero_contents + FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch u8 DeleteSizeMismatch : 1; // delete_size_mismatch + u8 TrackAllocationStacks : 1; u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size } Options; +#ifdef GWP_ASAN_HOOKS + gwp_asan::GuardedPoolAllocator GuardedAlloc; +#endif // GWP_ASAN_HOOKS + + StackDepot Depot; + // The following might get optimized out by the compiler. NOINLINE void performSanityChecks() { // Verify that the header offset field can hold the maximum offset. In the @@ -638,6 +984,14 @@ private: void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header, uptr Size) { Chunk::UnpackedHeader NewHeader = *Header; + if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { + u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr))); + uptr TaggedBegin, TaggedEnd; + // Exclude the previous tag so that immediate use after free is detected + // 100% of the time. + setRandomTag(Ptr, Size, 1UL << PrevTag, &TaggedBegin, &TaggedEnd); + storeDeallocationStackMaybe(Ptr, PrevTag); + } // If the quarantine is disabled, the actual size of a chunk is 0 or larger // than the maximum allowed, we return a chunk directly to the backend. // Logical Or can be short-circuited, which introduces unnecessary @@ -672,13 +1026,39 @@ private: bool getChunkFromBlock(uptr Block, uptr *Chunk, Chunk::UnpackedHeader *Header) { - u32 Offset = 0; - if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker) - Offset = reinterpret_cast<u32 *>(Block)[1]; - *Chunk = Block + Offset + Chunk::getHeaderSize(); + *Chunk = + Block + getChunkOffsetFromBlock(reinterpret_cast<const char *>(Block)); return Chunk::isValid(Cookie, reinterpret_cast<void *>(*Chunk), Header); } + static uptr getChunkOffsetFromBlock(const char *Block) { + u32 Offset = 0; + if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker) + Offset = reinterpret_cast<const u32 *>(Block)[1]; + return Offset + Chunk::getHeaderSize(); + } + + void storeAllocationStackMaybe(void *Ptr) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); + Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagAllocationTidIndex] = getThreadID(); + } + + void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + + // Disable tag checks here so that we don't need to worry about zero sized + // allocations. + ScopedDisableMemoryTagChecks x; + auto *Ptr32 = reinterpret_cast<u32 *>(Ptr); + Ptr32[MemTagDeallocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagDeallocationTidIndex] = getThreadID(); + Ptr32[MemTagPrevTagIndex] = PrevTag; + } + uptr getStats(ScopedString *Str) { Primary.getStats(Str); Secondary.getStats(Str); |