diff options
Diffstat (limited to 'lib/Support/Windows/Memory.inc')
-rw-r--r-- | lib/Support/Windows/Memory.inc | 96 |
1 files changed, 66 insertions, 30 deletions
diff --git a/lib/Support/Windows/Memory.inc b/lib/Support/Windows/Memory.inc index 318e65aaa9ee..a67f9c7d0f35 100644 --- a/lib/Support/Windows/Memory.inc +++ b/lib/Support/Windows/Memory.inc @@ -1,9 +1,8 @@ //===- Win32/Memory.cpp - Win32 Memory Implementation -----------*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // @@ -23,7 +22,7 @@ namespace { DWORD getWindowsProtectionFlags(unsigned Flags) { - switch (Flags) { + switch (Flags & llvm::sys::Memory::MF_RWE_MASK) { // Contrary to what you might expect, the Windows page protection flags // are not a bitwise combination of RWX values case llvm::sys::Memory::MF_READ: @@ -48,6 +47,9 @@ DWORD getWindowsProtectionFlags(unsigned Flags) { return PAGE_NOACCESS; } +// While we'd be happy to allocate single pages, the Windows allocation +// granularity may be larger than a single page (in practice, it is 64K) +// so mapping less than that will create an unreachable fragment of memory. size_t getAllocationGranularity() { SYSTEM_INFO Info; ::GetSystemInfo(&Info); @@ -57,6 +59,38 @@ size_t getAllocationGranularity() { return Info.dwAllocationGranularity; } +// Large/huge memory pages need explicit process permissions in order to be +// used. See https://blogs.msdn.microsoft.com/oldnewthing/20110128-00/?p=11643 +// Also large pages need to be manually enabled on your OS. If all this is +// sucessfull, we return the minimal large memory page size. +static size_t enableProcessLargePages() { + HANDLE Token = 0; + size_t LargePageMin = GetLargePageMinimum(); + if (LargePageMin) + OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, + &Token); + if (!Token) + return 0; + LUID Luid; + if (!LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &Luid)) { + CloseHandle(Token); + return 0; + } + TOKEN_PRIVILEGES TP{}; + TP.PrivilegeCount = 1; + TP.Privileges[0].Luid = Luid; + TP.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + if (!AdjustTokenPrivileges(Token, FALSE, &TP, 0, 0, 0)) { + CloseHandle(Token); + return 0; + } + DWORD E = GetLastError(); + CloseHandle(Token); + if (E == ERROR_SUCCESS) + return LargePageMin; + return 0; +} + } // namespace namespace llvm { @@ -75,22 +109,23 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, if (NumBytes == 0) return MemoryBlock(); - // While we'd be happy to allocate single pages, the Windows allocation - // granularity may be larger than a single page (in practice, it is 64K) - // so mapping less than that will create an unreachable fragment of memory. - // Avoid using one-time initialization of static locals here, since they - // aren't thread safe with MSVC. - static volatile size_t GranularityCached; - size_t Granularity = GranularityCached; - if (Granularity == 0) { - Granularity = getAllocationGranularity(); - GranularityCached = Granularity; + static size_t DefaultGranularity = getAllocationGranularity(); + static size_t LargePageGranularity = enableProcessLargePages(); + + DWORD AllocType = MEM_RESERVE | MEM_COMMIT; + bool HugePages = false; + size_t Granularity = DefaultGranularity; + + if ((Flags & MF_HUGE_HINT) && LargePageGranularity > 0) { + AllocType |= MEM_LARGE_PAGES; + HugePages = true; + Granularity = LargePageGranularity; } - const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity; + size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity; uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + - NearBlock->size() + NearBlock->allocatedSize() : 0; // If the requested address is not aligned to the allocation granularity, @@ -100,13 +135,13 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, DWORD Protect = getWindowsProtectionFlags(Flags); - void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start), - NumBlocks*Granularity, - MEM_RESERVE | MEM_COMMIT, Protect); + size_t AllocSize = NumBlocks * Granularity; + void *PA = ::VirtualAlloc(reinterpret_cast<void *>(Start), + AllocSize, AllocType, Protect); if (PA == NULL) { - if (NearBlock) { - // Try again without the NearBlock hint - return allocateMappedMemory(NumBytes, NULL, Flags, EC); + if (NearBlock || HugePages) { + // Try again without the NearBlock hint and without large memory pages + return allocateMappedMemory(NumBytes, NULL, Flags & ~MF_HUGE_HINT, EC); } EC = mapWindowsError(::GetLastError()); return MemoryBlock(); @@ -114,40 +149,41 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, MemoryBlock Result; Result.Address = PA; - Result.Size = NumBlocks*Granularity; + Result.AllocatedSize = AllocSize; + Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0); if (Flags & MF_EXEC) - Memory::InvalidateInstructionCache(Result.Address, Result.Size); + Memory::InvalidateInstructionCache(Result.Address, AllocSize); return Result; } std::error_code Memory::releaseMappedMemory(MemoryBlock &M) { - if (M.Address == 0 || M.Size == 0) + if (M.Address == 0 || M.AllocatedSize == 0) return std::error_code(); if (!VirtualFree(M.Address, 0, MEM_RELEASE)) return mapWindowsError(::GetLastError()); M.Address = 0; - M.Size = 0; + M.AllocatedSize = 0; return std::error_code(); } std::error_code Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { - if (M.Address == 0 || M.Size == 0) + if (M.Address == 0 || M.AllocatedSize == 0) return std::error_code(); DWORD Protect = getWindowsProtectionFlags(Flags); DWORD OldFlags; - if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags)) + if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags)) return mapWindowsError(::GetLastError()); if (Flags & MF_EXEC) - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); return std::error_code(); } |