diff options
Diffstat (limited to 'lib/msan')
| -rw-r--r-- | lib/msan/CMakeLists.txt | 4 | ||||
| -rw-r--r-- | lib/msan/msan.h | 95 | ||||
| -rw-r--r-- | lib/msan/msan_allocator.cc | 48 | ||||
| -rw-r--r-- | lib/msan/msan_interceptors.cc | 86 | ||||
| -rw-r--r-- | lib/msan/msan_interface_internal.h | 10 | ||||
| -rw-r--r-- | lib/msan/msan_linux.cc | 3 |
6 files changed, 177 insertions, 69 deletions
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt index e7f2877d1b2a..598ae54588c1 100644 --- a/lib/msan/CMakeLists.txt +++ b/lib/msan/CMakeLists.txt @@ -25,8 +25,7 @@ append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding MSAN_RTL_CFLAGS set(MSAN_RUNTIME_LIBRARIES) # Static runtime library. -add_custom_target(msan) -set_target_properties(msan PROPERTIES FOLDER "Compiler-RT Misc") +add_compiler_rt_component(msan) foreach(arch ${MSAN_SUPPORTED_ARCH}) add_compiler_rt_runtime(clang_rt.msan @@ -61,7 +60,6 @@ foreach(arch ${MSAN_SUPPORTED_ARCH}) endforeach() add_compiler_rt_resource_file(msan_blacklist msan_blacklist.txt msan) -add_dependencies(compiler-rt msan) if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(tests) diff --git a/lib/msan/msan.h b/lib/msan/msan.h index 1f2ff59ca686..0709260eebe2 100644 --- a/lib/msan/msan.h +++ b/lib/msan/msan.h @@ -42,27 +42,43 @@ struct MappingDesc { #if SANITIZER_LINUX && defined(__mips64) -// Everything is above 0x00e000000000. +// MIPS64 maps: +// - 0x0000000000-0x0200000000: Program own segments +// - 0xa200000000-0xc000000000: PIE program segments +// - 0xe200000000-0xffffffffff: libraries segments. const MappingDesc kMemoryLayout[] = { - {0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"}, - {0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"}, - {0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"}, - {0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}}; + {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"}, + {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"}, + {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"}, + {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"}, + {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"}, + {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"}, + {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"}, + {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"}, + {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}}; -#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL) -#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000) +#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) +#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) #elif SANITIZER_LINUX && defined(__aarch64__) -// The mapping describes both 39-bits and 42-bits. AArch64 maps: -// - 0x00000000000-0x00010000000: 39/42-bits program own segments -// - 0x05500000000-0x05600000000: 39-bits PIE program segments -// - 0x07f80000000-0x07fffffffff: 39-bits libraries segments -// - 0x2aa00000000-0x2ab00000000: 42-bits PIE program segments -// - 0x3ff00000000-0x3ffffffffff: 42-bits libraries segments +// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64 +// maps: +// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments +// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments +// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments +// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments +// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments +// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments +// - 0xffff000000000-0x1000000000000: 48-bits libraries segments // It is fragmented in multiples segments to increase the memory available // on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for -// 39 bits). +// 39 bits). The 48-bits segments only cover the usual PIE/default segments +// plus some more segments (262144GB total, 0.39% total VMA). const MappingDesc kMemoryLayout[] = { {0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"}, {0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"}, @@ -103,6 +119,42 @@ const MappingDesc kMemoryLayout[] = { {0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"}, {0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"}, {0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"}, + // The mappings below are used only for 48-bits VMA. + // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE + // segments and some more segments totalizing 262144GB of VMA (which cover + // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by + // adding multiple application segments like 39 and 42 mapping. + {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"}, + {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"}, + {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"}, + {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"}, + {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"}, + {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"}, + {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"}, + {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"}, + {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"}, + {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"}, + {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"}, + {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"}, + {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"}, + {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"}, + {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"}, + {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"}, + {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"}, + {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"}, + {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"}, }; # define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL) # define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL) @@ -277,11 +329,20 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ common_flags()->fast_unwind_on_malloc) +// For platforms which support slow unwinder only, we restrict the store context +// size to 1, basically only storing the current pc. We do this because the slow +// unwinder which is based on libunwind is not async signal safe and causes +// random freezes in forking applications as well as in signal handlers. #define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ BufferedStackTrace stack; \ - if (__msan_get_track_origins() > 1 && msan_inited) \ - GetStackTrace(&stack, flags()->store_context_size, pc, bp, \ - common_flags()->fast_unwind_on_malloc) + if (__msan_get_track_origins() > 1 && msan_inited) { \ + if (!SANITIZER_CAN_FAST_UNWIND) \ + GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \ + false); \ + else \ + GetStackTrace(&stack, flags()->store_context_size, pc, bp, \ + common_flags()->fast_unwind_on_malloc); \ + } #define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ BufferedStackTrace stack; \ diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index b7d394729bfc..6c389f008cf7 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -33,9 +33,12 @@ struct MsanMapUnmapCallback { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. - FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size); - if (__msan_get_track_origins()) - FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size); + uptr shadow_p = MEM_TO_SHADOW(p); + ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); + if (__msan_get_track_origins()) { + uptr origin_p = MEM_TO_ORIGIN(p); + ReleaseMemoryPagesToOS(origin_p, origin_p + size); + } } }; @@ -56,23 +59,32 @@ struct MsanMapUnmapCallback { #else static const uptr kAllocatorSpace = 0x600000000000ULL; #endif - static const uptr kAllocatorSize = 0x80000000000; // 8T. - static const uptr kMetadataSize = sizeof(Metadata); static const uptr kMaxAllowedMallocSize = 8UL << 30; - typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize, - DefaultSizeClassMap, - MsanMapUnmapCallback> PrimaryAllocator; + struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = 0x40000000000; // 4T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + + typedef SizeClassAllocator64<AP64> PrimaryAllocator; #elif defined(__powerpc64__) - static const uptr kAllocatorSpace = 0x300000000000; - static const uptr kAllocatorSize = 0x020000000000; // 2T - static const uptr kMetadataSize = sizeof(Metadata); static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G - typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize, - DefaultSizeClassMap, - MsanMapUnmapCallback> PrimaryAllocator; + struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = 0x300000000000; + static const uptr kSpaceSize = 0x020000000000; // 2T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + + typedef SizeClassAllocator64<AP64> PrimaryAllocator; #elif defined(__aarch64__) static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G static const uptr kRegionSizeLog = 20; @@ -94,7 +106,9 @@ static AllocatorCache fallback_allocator_cache; static SpinMutex fallback_mutex; void MsanAllocatorInit() { - allocator.Init(common_flags()->allocator_may_return_null); + allocator.Init( + common_flags()->allocator_may_return_null, + common_flags()->allocator_release_to_os_interval_ms); } AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { @@ -112,7 +126,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); - return allocator.ReturnNullOrDie(); + return allocator.ReturnNullOrDieOnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; @@ -170,7 +184,7 @@ void MsanDeallocate(StackTrace *stack, void *p) { void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDie(); + return allocator.ReturnNullOrDieOnBadRequest(); return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); } diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index f23d3eeb3eda..6447bb1b270e 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -45,6 +45,8 @@ using __sanitizer::atomic_uintptr_t; DECLARE_REAL(SIZE_T, strlen, const char *s) DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen) +DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n) +DECLARE_REAL(void *, memset, void *dest, int c, uptr n) #if SANITIZER_FREEBSD #define __errno_location __error @@ -64,6 +66,23 @@ bool IsInInterceptorScope() { return in_interceptor_scope; } +static uptr allocated_for_dlsym; +static const uptr kDlsymAllocPoolSize = 1024; +static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + +static bool IsInDlsymAllocPool(const void *ptr) { + uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + return off < sizeof(alloc_memory_for_dlsym); +} + +static void *AllocateFromLocalPool(uptr size_in_bytes) { + uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; + void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; + allocated_for_dlsym += size_in_words; + CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); + return mem; +} + #define ENSURE_MSAN_INITED() do { \ CHECK(!msan_init_is_running); \ if (!msan_inited) { \ @@ -135,10 +154,6 @@ INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { return res; } -INTERCEPTOR(void *, memcpy, void *dest, const void *src, SIZE_T n) { - return __msan_memcpy(dest, src, n); -} - INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) { return (char *)__msan_memcpy(dest, src, n) + n; } @@ -153,14 +168,6 @@ INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) { return res; } -INTERCEPTOR(void *, memmove, void *dest, const void *src, SIZE_T n) { - return __msan_memmove(dest, src, n); -} - -INTERCEPTOR(void *, memset, void *s, int c, SIZE_T n) { - return __msan_memset(s, c, n); -} - INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) { return __msan_memmove(dest, src, n); } @@ -227,14 +234,14 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) { INTERCEPTOR(void, free, void *ptr) { GET_MALLOC_STACK_TRACE; - if (!ptr) return; + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #if !SANITIZER_FREEBSD INTERCEPTOR(void, cfree, void *ptr) { GET_MALLOC_STACK_TRACE; - if (!ptr) return; + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; MsanDeallocate(&stack, ptr); } #define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) @@ -907,27 +914,35 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents, INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { GET_MALLOC_STACK_TRACE; - if (UNLIKELY(!msan_inited)) { + if (UNLIKELY(!msan_inited)) // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. - const SIZE_T kCallocPoolSize = 1024; - static uptr calloc_memory_for_dlsym[kCallocPoolSize]; - static SIZE_T allocated; - SIZE_T size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; - void *mem = (void*)&calloc_memory_for_dlsym[allocated]; - allocated += size_in_words; - CHECK(allocated < kCallocPoolSize); - return mem; - } + return AllocateFromLocalPool(nmemb * size); return MsanCalloc(&stack, nmemb, size); } INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { GET_MALLOC_STACK_TRACE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); + void *new_ptr; + if (UNLIKELY(!msan_inited)) { + new_ptr = AllocateFromLocalPool(copy_size); + } else { + copy_size = size; + new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false); + } + internal_memcpy(new_ptr, ptr, copy_size); + return new_ptr; + } return MsanReallocate(&stack, ptr, size, sizeof(u64), false); } INTERCEPTOR(void *, malloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; + if (UNLIKELY(!msan_inited)) + // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. + return AllocateFromLocalPool(size); return MsanReallocate(&stack, nullptr, size, sizeof(u64), false); } @@ -1329,11 +1344,23 @@ int OnExit() { *begin = *end = 0; \ } +#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \ + { \ + (void)ctx; \ + return __msan_memset(block, c, size); \ + } +#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \ + { \ + (void)ctx; \ + return __msan_memmove(to, from, size); \ + } +#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \ + { \ + (void)ctx; \ + return __msan_memcpy(to, from, size); \ + } + #include "sanitizer_common/sanitizer_platform_interceptors.h" -// Msan needs custom handling of these: -#undef SANITIZER_INTERCEPT_MEMSET -#undef SANITIZER_INTERCEPT_MEMMOVE -#undef SANITIZER_INTERCEPT_MEMCPY #include "sanitizer_common/sanitizer_common_interceptors.inc" #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s) @@ -1489,11 +1516,8 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(fread); MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED; INTERCEPT_FUNCTION(readlink); - INTERCEPT_FUNCTION(memcpy); INTERCEPT_FUNCTION(memccpy); INTERCEPT_FUNCTION(mempcpy); - INTERCEPT_FUNCTION(memset); - INTERCEPT_FUNCTION(memmove); INTERCEPT_FUNCTION(bcopy); INTERCEPT_FUNCTION(wmemset); INTERCEPT_FUNCTION(wmemcpy); diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h index c1e02ce72bf4..c6990db243c1 100644 --- a/lib/msan/msan_interface_internal.h +++ b/lib/msan/msan_interface_internal.h @@ -37,6 +37,16 @@ void __msan_warning(); SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn)) void __msan_warning_noreturn(); +using __sanitizer::uptr; +using __sanitizer::sptr; +using __sanitizer::uu64; +using __sanitizer::uu32; +using __sanitizer::uu16; +using __sanitizer::u64; +using __sanitizer::u32; +using __sanitizer::u16; +using __sanitizer::u8; + SANITIZER_INTERFACE_ATTRIBUTE void __msan_maybe_warning_1(u8 s, u32 o); SANITIZER_INTERFACE_ATTRIBUTE diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc index d6a95889ad0f..0a687f620c94 100644 --- a/lib/msan/msan_linux.cc +++ b/lib/msan/msan_linux.cc @@ -66,7 +66,8 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) { } if ((uptr)addr != beg) { uptr end = beg + size - 1; - Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end); + Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end, + name); return false; } } |
