diff options
Diffstat (limited to 'lib')
39 files changed, 648 insertions, 375 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 4ab1e933af3a6..90f72d02afa11 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -21,18 +21,9 @@ function(compiler_rt_build_runtime runtime) string(TOUPPER ${runtime} runtime_uppercase) if(COMPILER_RT_HAS_${runtime_uppercase}) add_subdirectory(${runtime}) - foreach(directory ${ARGN}) - add_subdirectory(${directory}) - endforeach() - endif() -endfunction() - -function(compiler_rt_build_sanitizer sanitizer) - string(TOUPPER ${sanitizer} sanitizer_uppercase) - string(TOLOWER ${sanitizer} sanitizer_lowercase) - list(FIND COMPILER_RT_SANITIZERS_TO_BUILD ${sanitizer_lowercase} result) - if(NOT ${result} EQUAL -1) - compiler_rt_build_runtime(${sanitizer} ${ARGN}) + if(${runtime} STREQUAL tsan) + add_subdirectory(tsan/dd) + endif() endif() endfunction() @@ -45,14 +36,9 @@ if(COMPILER_RT_BUILD_SANITIZERS) add_subdirectory(ubsan) endif() - compiler_rt_build_sanitizer(asan) - compiler_rt_build_sanitizer(dfsan) - compiler_rt_build_sanitizer(msan) - compiler_rt_build_sanitizer(tsan tsan/dd) - compiler_rt_build_sanitizer(safestack) - compiler_rt_build_sanitizer(cfi) - compiler_rt_build_sanitizer(esan) - compiler_rt_build_sanitizer(scudo) + foreach(sanitizer ${COMPILER_RT_SANITIZERS_TO_BUILD}) + compiler_rt_build_runtime(${sanitizer}) + endforeach() compiler_rt_build_runtime(profile) endif() diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index 57651f49cdb98..de6613f56727a 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -160,7 +160,11 @@ struct QuarantineCallback { } void *Allocate(uptr size) { - return get_allocator().Allocate(cache_, size, 1); + void *res = get_allocator().Allocate(cache_, size, 1); + // TODO(alekseys): Consider making quarantine OOM-friendly. + if (UNLIKELY(!res)) + return DieOnFailure::OnOOM(); + return res; } void Deallocate(void *p) { @@ -524,8 +528,7 @@ struct Allocator { // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. - void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, - AllocType alloc_type) { + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); CHECK_GE(m->alloc_tid, 0); if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. @@ -603,7 +606,7 @@ struct Allocator { ReportNewDeleteSizeMismatch(p, delete_size, stack); } - QuarantineChunk(m, ptr, stack, alloc_type); + QuarantineChunk(m, ptr, stack); } void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { @@ -632,7 +635,7 @@ struct Allocator { } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) + if (CheckForCallocOverflow(size, nmemb)) return AsanAllocator::FailureHandler::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc index 264d5aee8ceb5..ed12a9ac90151 100644 --- a/lib/asan/asan_interceptors.cc +++ b/lib/asan/asan_interceptors.cc @@ -579,17 +579,6 @@ INTERCEPTOR(char*, __strdup, const char *s) { } #endif // ASAN_INTERCEPT___STRDUP -INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { - void *ctx; - ASAN_INTERCEPTOR_ENTER(ctx, wcslen); - SIZE_T length = internal_wcslen(s); - if (!asan_init_is_running) { - ENSURE_ASAN_INITED(); - ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t)); - } - return length; -} - INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { void *ctx; ASAN_INTERCEPTOR_ENTER(ctx, strncpy); @@ -722,7 +711,6 @@ void InitializeAsanInterceptors() { // Intercept str* functions. ASAN_INTERCEPT_FUNC(strcat); // NOLINT ASAN_INTERCEPT_FUNC(strcpy); // NOLINT - ASAN_INTERCEPT_FUNC(wcslen); ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncpy); ASAN_INTERCEPT_FUNC(strdup); diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc index 3283fb3942cf1..942b169d920c8 100644 --- a/lib/asan/asan_new_delete.cc +++ b/lib/asan/asan_new_delete.cc @@ -25,22 +25,26 @@ // dllexport would normally do. We need to export them in order to make the // VS2015 dynamic CRT (MD) work. #if SANITIZER_WINDOWS -# define CXX_OPERATOR_ATTRIBUTE -# ifdef _WIN64 -# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new -# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete -# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete -# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[] -# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[] -# else -# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new -# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete -# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete -# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[] -# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[] -# endif +#define CXX_OPERATOR_ATTRIBUTE +#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:"##sym)) +#ifdef _WIN64 +COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new +COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[] #else -# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE +COMMENT_EXPORT("??2@YAPAXI@Z") // operator new +COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow +COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete +COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete +COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[] +COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[] +#endif +#undef COMMENT_EXPORT +#else +#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE #endif using namespace __asan; // NOLINT @@ -63,12 +67,17 @@ struct nothrow_t {}; enum class align_val_t: size_t {}; } // namespace std -#define OPERATOR_NEW_BODY(type) \ +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(type, nothrow) \ GET_STACK_TRACE_MALLOC;\ - return asan_memalign(0, size, &stack, type); -#define OPERATOR_NEW_BODY_ALIGN(type) \ + void *res = asan_memalign(0, size, &stack, type);\ + if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + return res; +#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ GET_STACK_TRACE_MALLOC;\ - return asan_memalign((uptr)align, size, &stack, type); + void *res = asan_memalign((uptr)align, size, &stack, type);\ + if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + return res; // On OS X it's not enough to just provide our own 'operator new' and // 'operator delete' implementations, because they're going to be in the @@ -79,40 +88,42 @@ enum class align_val_t: size_t {}; // OS X we need to intercept them using their mangled names. #if !SANITIZER_MAC CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); } +void *operator new(size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); } +void *operator new[](size_t size) +{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW); } +{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::nothrow_t const&) -{ OPERATOR_NEW_BODY(FROM_NEW_BR); } +{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); } +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::align_val_t align) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); } +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); } +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) -{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); } +{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); } #else // SANITIZER_MAC INTERCEPTOR(void *, _Znwm, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW); + OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); } INTERCEPTOR(void *, _Znam, size_t size) { - OPERATOR_NEW_BODY(FROM_NEW_BR); + OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); } INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW); + OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); } INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(FROM_NEW_BR); + OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); } #endif diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc index 189b4b141bfa1..c67116c42ca2a 100644 --- a/lib/asan/asan_win_dll_thunk.cc +++ b/lib/asan/asan_win_dll_thunk.cc @@ -85,6 +85,7 @@ INTERCEPT_LIBRARY_FUNCTION(strstr); INTERCEPT_LIBRARY_FUNCTION(strtok); INTERCEPT_LIBRARY_FUNCTION(strtol); INTERCEPT_LIBRARY_FUNCTION(wcslen); +INTERCEPT_LIBRARY_FUNCTION(wcsnlen); #ifdef _WIN64 INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler); diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc index f54e953731b4d..6514aea6f6098 100644 --- a/lib/lsan/lsan_allocator.cc +++ b/lib/lsan/lsan_allocator.cc @@ -74,7 +74,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, size = 1; if (size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return nullptr; + return Allocator::FailureHandler::OnBadRequest(); } void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); // Do not rely on the allocator to clear the memory (it's slow). @@ -99,7 +99,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); - return nullptr; + return Allocator::FailureHandler::OnBadRequest(); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -134,6 +134,8 @@ void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { } void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { + if (CheckForCallocOverflow(size, nmemb)) + return Allocator::FailureHandler::OnBadRequest(); size *= nmemb; return Allocate(stack, size, 1, true); } diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc index 9e39a7d1944da..7d514402ad4b1 100644 --- a/lib/lsan/lsan_interceptors.cc +++ b/lib/lsan/lsan_interceptors.cc @@ -70,7 +70,6 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { CHECK(allocated < kCallocPoolSize); return mem; } - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr; ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; return lsan_calloc(nmemb, size, stack); @@ -199,24 +198,38 @@ INTERCEPTOR(int, mprobe, void *ptr) { } #endif // SANITIZER_INTERCEPT_MCHECK_MPROBE -#define OPERATOR_NEW_BODY \ - ENSURE_LSAN_INITED; \ - GET_STACK_TRACE_MALLOC; \ - return Allocate(stack, size, 1, kAlwaysClearMemory); -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY; } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY; } -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(nothrow) \ + ENSURE_LSAN_INITED; \ + GET_STACK_TRACE_MALLOC; \ + void *res = Allocate(stack, size, 1, kAlwaysClearMemory);\ + if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + return res; #define OPERATOR_DELETE_BODY \ ENSURE_LSAN_INITED; \ Deallocate(ptr); +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the runtime +// dylib, and the main executable will depend on both the runtime dylib and +// libstdc++, each of has its implementation of new and delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC + +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } + INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE @@ -224,9 +237,31 @@ void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const &) { - OPERATOR_DELETE_BODY; -} +void operator delete[](void *ptr, std::nothrow_t const &) +{ OPERATOR_DELETE_BODY; } + +#else // SANITIZER_MAC + +INTERCEPTOR(void *, _Znwm, size_t size) +{ OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR(void *, _Znam, size_t size) +{ OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(true /*nothrow*/); } + +INTERCEPTOR(void, _ZdlPv, void *ptr) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdaPv, void *ptr) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY; } + +#endif // !SANITIZER_MAC + ///// Thread initialization and finalization. ///// diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index d0f478afc9ccc..a92b7fd12f92b 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -195,7 +195,7 @@ void MsanDeallocate(StackTrace *stack, void *p) { } void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) + if (CheckForCallocOverflow(size, nmemb)) return Allocator::FailureHandler::OnBadRequest(); return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); } diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index 0f50693441be6..ce8444a3bb2f0 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -538,49 +538,6 @@ INTERCEPTOR(int, mbrtowc, wchar_t *dest, const char *src, SIZE_T n, void *ps) { return res; } -INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { - ENSURE_MSAN_INITED(); - SIZE_T res = REAL(wcslen)(s); - CHECK_UNPOISONED(s, sizeof(wchar_t) * (res + 1)); - return res; -} - -INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) { - ENSURE_MSAN_INITED(); - SIZE_T res = REAL(wcsnlen)(s, n); - CHECK_UNPOISONED(s, sizeof(wchar_t) * Min(res + 1, n)); - return res; -} - -// wchar_t *wcschr(const wchar_t *wcs, wchar_t wc); -INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) { - ENSURE_MSAN_INITED(); - wchar_t *res = REAL(wcschr)(s, wc, ps); - return res; -} - -// wchar_t *wcscpy(wchar_t *dest, const wchar_t *src); -INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) { - ENSURE_MSAN_INITED(); - GET_STORE_STACK_TRACE; - wchar_t *res = REAL(wcscpy)(dest, src); - CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), - &stack); - return res; -} - -INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src, - SIZE_T n) { // NOLINT - ENSURE_MSAN_INITED(); - GET_STORE_STACK_TRACE; - SIZE_T copy_size = REAL(wcsnlen)(src, n); - if (copy_size < n) copy_size++; // trailing \0 - wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT - CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack); - __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t)); - return res; -} - // wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n); INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) { ENSURE_MSAN_INITED(); @@ -1344,11 +1301,11 @@ int OnExit() { return __msan_memcpy(to, from, size); \ } -#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \ - do { \ - GET_STORE_STACK_TRACE; \ - CopyShadowAndOrigin(to, from, size, &stack); \ - __msan_unpoison(to + size, 1); \ +#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \ + do { \ + GET_STORE_STACK_TRACE; \ + CopyShadowAndOrigin(to, from, size, &stack); \ + __msan_unpoison(to + size, 1); \ } while (false) #include "sanitizer_common/sanitizer_platform_interceptors.h" @@ -1424,6 +1381,35 @@ INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb callback, void *data) { return res; } +// wchar_t *wcschr(const wchar_t *wcs, wchar_t wc); +INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) { + ENSURE_MSAN_INITED(); + wchar_t *res = REAL(wcschr)(s, wc, ps); + return res; +} + +// wchar_t *wcscpy(wchar_t *dest, const wchar_t *src); +INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) { + ENSURE_MSAN_INITED(); + GET_STORE_STACK_TRACE; + wchar_t *res = REAL(wcscpy)(dest, src); + CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), + &stack); + return res; +} + +INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src, + SIZE_T n) { // NOLINT + ENSURE_MSAN_INITED(); + GET_STORE_STACK_TRACE; + SIZE_T copy_size = REAL(wcsnlen)(src, n); + if (copy_size < n) copy_size++; // trailing \0 + wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT + CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack); + __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t)); + return res; +} + // These interface functions reside here so that they can use // REAL(memset), etc. void __msan_unpoison(const void *a, uptr size) { diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc index 540100316693b..c7295feebfe47 100644 --- a/lib/msan/msan_new_delete.cc +++ b/lib/msan/msan_new_delete.cc @@ -14,6 +14,7 @@ #include "msan.h" #include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" #if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE @@ -27,18 +28,25 @@ namespace std { } // namespace std -#define OPERATOR_NEW_BODY \ +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(nothrow) \ GET_MALLOC_STACK_TRACE; \ - return MsanReallocate(&stack, 0, size, sizeof(u64), false) + void *res = MsanReallocate(&stack, 0, size, sizeof(u64), false);\ + if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + return res INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY; } +void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY; } +void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } +void *operator new(size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } +void *operator new[](size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} #define OPERATOR_DELETE_BODY \ GET_MALLOC_STACK_TRACE; \ diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc index 543a7eb98bcc1..b2d5f7c605eef 100644 --- a/lib/msan/tests/msan_test.cc +++ b/lib/msan/tests/msan_test.cc @@ -1707,6 +1707,48 @@ TEST(MemorySanitizer, strncat_overflow) { // NOLINT EXPECT_POISONED(a[7]); } +TEST(MemorySanitizer, wcscat) { + wchar_t a[10]; + wchar_t b[] = L"def"; + wcscpy(a, L"abc"); + + wcscat(a, b); + EXPECT_EQ(6U, wcslen(a)); + EXPECT_POISONED(a[7]); + + a[3] = 0; + __msan_poison(b + 1, sizeof(wchar_t)); + EXPECT_UMR(wcscat(a, b)); + + __msan_unpoison(b + 1, sizeof(wchar_t)); + __msan_poison(a + 2, sizeof(wchar_t)); + EXPECT_UMR(wcscat(a, b)); +} + +TEST(MemorySanitizer, wcsncat) { + wchar_t a[10]; + wchar_t b[] = L"def"; + wcscpy(a, L"abc"); + + wcsncat(a, b, 5); + EXPECT_EQ(6U, wcslen(a)); + EXPECT_POISONED(a[7]); + + a[3] = 0; + __msan_poison(a + 4, sizeof(wchar_t) * 6); + wcsncat(a, b, 2); + EXPECT_EQ(5U, wcslen(a)); + EXPECT_POISONED(a[6]); + + a[3] = 0; + __msan_poison(b + 1, sizeof(wchar_t)); + EXPECT_UMR(wcsncat(a, b, 2)); + + __msan_unpoison(b + 1, sizeof(wchar_t)); + __msan_poison(a + 2, sizeof(wchar_t)); + EXPECT_UMR(wcsncat(a, b, 2)); +} + #define TEST_STRTO_INT(func_name, char_type, str_prefix) \ TEST(MemorySanitizer, func_name) { \ char_type *e; \ diff --git a/lib/profile/CMakeLists.txt b/lib/profile/CMakeLists.txt index 006285b34943c..342f8ee7ebbc5 100644 --- a/lib/profile/CMakeLists.txt +++ b/lib/profile/CMakeLists.txt @@ -48,6 +48,7 @@ set(PROFILE_SOURCES InstrProfilingFile.c InstrProfilingMerge.c InstrProfilingMergeFile.c + InstrProfilingNameVar.c InstrProfilingWriter.c InstrProfilingPlatformDarwin.c InstrProfilingPlatformLinux.c diff --git a/lib/profile/InstrProfiling.c b/lib/profile/InstrProfiling.c index 6828a3d27f34c..fe66fec506585 100644 --- a/lib/profile/InstrProfiling.c +++ b/lib/profile/InstrProfiling.c @@ -19,8 +19,6 @@ COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION; -COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0}; - COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) { return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64) : (INSTR_PROF_RAW_MAGIC_32); diff --git a/lib/profile/InstrProfilingBuffer.c b/lib/profile/InstrProfilingBuffer.c index ac259e83cbd29..a7e852f53ec39 100644 --- a/lib/profile/InstrProfilingBuffer.c +++ b/lib/profile/InstrProfilingBuffer.c @@ -45,15 +45,24 @@ uint64_t __llvm_profile_get_size_for_buffer_internal( (CountersEnd - CountersBegin) * sizeof(uint64_t) + NamesSize + Padding; } +COMPILER_RT_VISIBILITY +void initBufferWriter(ProfDataWriter *BufferWriter, char *Buffer) { + BufferWriter->Write = lprofBufferWriter; + BufferWriter->WriterCtx = Buffer; +} + COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer(char *Buffer) { - return lprofWriteData(lprofBufferWriter, Buffer, 0); + ProfDataWriter BufferWriter; + initBufferWriter(&BufferWriter, Buffer); + return lprofWriteData(&BufferWriter, 0, 0); } COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer_internal( char *Buffer, const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin, const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) { - return lprofWriteDataImpl(lprofBufferWriter, Buffer, DataBegin, DataEnd, - CountersBegin, CountersEnd, 0, NamesBegin, - NamesEnd); + ProfDataWriter BufferWriter; + initBufferWriter(&BufferWriter, Buffer); + return lprofWriteDataImpl(&BufferWriter, DataBegin, DataEnd, CountersBegin, + CountersEnd, 0, NamesBegin, NamesEnd, 0); } diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c index dfcbe52d7e4fb..d038bb9cb5b0f 100644 --- a/lib/profile/InstrProfilingFile.c +++ b/lib/profile/InstrProfilingFile.c @@ -91,24 +91,39 @@ static const char *getCurFilename(char *FilenameBuf); static unsigned doMerging() { return lprofCurFilename.MergePoolSize; } /* Return 1 if there is an error, otherwise return 0. */ -static uint32_t fileWriter(ProfDataIOVec *IOVecs, uint32_t NumIOVecs, - void **WriterCtx) { +static uint32_t fileWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, + uint32_t NumIOVecs) { uint32_t I; - FILE *File = (FILE *)*WriterCtx; + FILE *File = (FILE *)This->WriterCtx; for (I = 0; I < NumIOVecs; I++) { - if (fwrite(IOVecs[I].Data, IOVecs[I].ElmSize, IOVecs[I].NumElm, File) != - IOVecs[I].NumElm) - return 1; + if (IOVecs[I].Data) { + if (fwrite(IOVecs[I].Data, IOVecs[I].ElmSize, IOVecs[I].NumElm, File) != + IOVecs[I].NumElm) + return 1; + } else { + if (fseek(File, IOVecs[I].ElmSize * IOVecs[I].NumElm, SEEK_CUR) == -1) + return 1; + } } return 0; } +static void initFileWriter(ProfDataWriter *This, FILE *File) { + This->Write = fileWriter; + This->WriterCtx = File; +} + COMPILER_RT_VISIBILITY ProfBufferIO * lprofCreateBufferIOInternal(void *File, uint32_t BufferSz) { FreeHook = &free; DynamicBufferIOBuffer = (uint8_t *)calloc(BufferSz, 1); VPBufferSize = BufferSz; - return lprofCreateBufferIO(fileWriter, File); + ProfDataWriter *fileWriter = + (ProfDataWriter *)calloc(sizeof(ProfDataWriter), 1); + initFileWriter(fileWriter, File); + ProfBufferIO *IO = lprofCreateBufferIO(fileWriter); + IO->OwnFileWriter = 1; + return IO; } static void setupIOBuffer() { @@ -122,9 +137,10 @@ static void setupIOBuffer() { /* Read profile data in \c ProfileFile and merge with in-memory profile counters. Returns -1 if there is fatal error, otheriwse - 0 is returned. + 0 is returned. Returning 0 does not mean merge is actually + performed. If merge is actually done, *MergeDone is set to 1. */ -static int doProfileMerging(FILE *ProfileFile) { +static int doProfileMerging(FILE *ProfileFile, int *MergeDone) { uint64_t ProfileFileSize; char *ProfileBuffer; @@ -169,6 +185,8 @@ static int doProfileMerging(FILE *ProfileFile) { __llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize); (void)munmap(ProfileBuffer, ProfileFileSize); + *MergeDone = 1; + return 0; } @@ -190,7 +208,7 @@ static void createProfileDir(const char *Filename) { * dumper. With profile merging enabled, each executable as well as any of * its instrumented shared libraries dump profile data into their own data file. */ -static FILE *openFileForMerging(const char *ProfileFileName) { +static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) { FILE *ProfileFile; int rc; @@ -199,15 +217,14 @@ static FILE *openFileForMerging(const char *ProfileFileName) { if (!ProfileFile) return NULL; - rc = doProfileMerging(ProfileFile); - if (rc || COMPILER_RT_FTRUNCATE(ProfileFile, 0L) || + rc = doProfileMerging(ProfileFile, MergeDone); + if (rc || (!*MergeDone && COMPILER_RT_FTRUNCATE(ProfileFile, 0L)) || fseek(ProfileFile, 0L, SEEK_SET) == -1) { PROF_ERR("Profile Merging of file %s failed: %s\n", ProfileFileName, strerror(errno)); fclose(ProfileFile); return NULL; } - fseek(ProfileFile, 0L, SEEK_SET); return ProfileFile; } @@ -216,17 +233,20 @@ static int writeFile(const char *OutputName) { int RetVal; FILE *OutputFile; + int MergeDone = 0; if (!doMerging()) OutputFile = fopen(OutputName, "ab"); else - OutputFile = openFileForMerging(OutputName); + OutputFile = openFileForMerging(OutputName, &MergeDone); if (!OutputFile) return -1; FreeHook = &free; setupIOBuffer(); - RetVal = lprofWriteData(fileWriter, OutputFile, lprofGetVPDataReader()); + ProfDataWriter fileWriter; + initFileWriter(&fileWriter, OutputFile); + RetVal = lprofWriteData(&fileWriter, lprofGetVPDataReader(), MergeDone); fclose(OutputFile); return RetVal; diff --git a/lib/profile/InstrProfilingInternal.h b/lib/profile/InstrProfilingInternal.h index c73b291013023..36490ef7d4333 100644 --- a/lib/profile/InstrProfilingInternal.h +++ b/lib/profile/InstrProfilingInternal.h @@ -48,17 +48,21 @@ typedef struct ProfDataIOVec { size_t NumElm; } ProfDataIOVec; -typedef uint32_t (*WriterCallback)(ProfDataIOVec *, uint32_t NumIOVecs, - void **WriterCtx); +struct ProfDataWriter; +typedef uint32_t (*WriterCallback)(struct ProfDataWriter *This, ProfDataIOVec *, + uint32_t NumIOVecs); + +typedef struct ProfDataWriter { + WriterCallback Write; + void *WriterCtx; +} ProfDataWriter; /*! * The data structure for buffered IO of profile data. */ typedef struct ProfBufferIO { - /* File handle. */ - void *File; - /* Low level IO callback. */ - WriterCallback FileWriter; + ProfDataWriter *FileWriter; + uint32_t OwnFileWriter; /* The start of the buffer. */ uint8_t *BufferStart; /* Total size of the buffer. */ @@ -73,7 +77,7 @@ ProfBufferIO *lprofCreateBufferIOInternal(void *File, uint32_t BufferSz); /*! * This is the interface to create a handle for buffered IO. */ -ProfBufferIO *lprofCreateBufferIO(WriterCallback FileWriter, void *File); +ProfBufferIO *lprofCreateBufferIO(ProfDataWriter *FileWriter); /*! * The interface to destroy the bufferIO handle and reclaim @@ -96,8 +100,9 @@ int lprofBufferIOFlush(ProfBufferIO *BufferIO); /* The low level interface to write data into a buffer. It is used as the * callback by other high level writer methods such as buffered IO writer * and profile data writer. */ -uint32_t lprofBufferWriter(ProfDataIOVec *IOVecs, uint32_t NumIOVecs, - void **WriterCtx); +uint32_t lprofBufferWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, + uint32_t NumIOVecs); +void initBufferWriter(ProfDataWriter *BufferWriter, char *Buffer); struct ValueProfData; struct ValueProfRecord; @@ -133,15 +138,17 @@ typedef struct VPDataReaderType { uint32_t N); } VPDataReaderType; -int lprofWriteData(WriterCallback Writer, void *WriterCtx, - VPDataReaderType *VPDataReader); -int lprofWriteDataImpl(WriterCallback Writer, void *WriterCtx, +/* Write profile data to destinitation. If SkipNameDataWrite is set to 1, + the name data is already in destintation, we just skip over it. */ +int lprofWriteData(ProfDataWriter *Writer, VPDataReaderType *VPDataReader, + int SkipNameDataWrite); +int lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin, const uint64_t *CountersEnd, VPDataReaderType *VPDataReader, const char *NamesBegin, - const char *NamesEnd); + const char *NamesEnd, int SkipNameDataWrite); /* Merge value profile data pointed to by SrcValueProfData into * in-memory profile counters pointed by to DstData. */ diff --git a/lib/profile/InstrProfilingNameVar.c b/lib/profile/InstrProfilingNameVar.c new file mode 100644 index 0000000000000..a0c448c679b57 --- /dev/null +++ b/lib/profile/InstrProfilingNameVar.c @@ -0,0 +1,18 @@ +//===- InstrProfilingNameVar.c - profile name variable setup --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "InstrProfiling.h" + +/* char __llvm_profile_filename[1] + * + * The runtime should only provide its own definition of this symbol when the + * user has not specified one. Set this up by moving the runtime's copy of this + * symbol to an object file within the archive. + */ +COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0}; diff --git a/lib/profile/InstrProfilingWriter.c b/lib/profile/InstrProfilingWriter.c index 95f37e8e9b4fd..d4c9b9bd663c6 100644 --- a/lib/profile/InstrProfilingWriter.c +++ b/lib/profile/InstrProfilingWriter.c @@ -31,41 +31,44 @@ COMPILER_RT_VISIBILITY uint32_t VPBufferSize = 0; /* The buffer writer is reponsponsible in keeping writer state * across the call. */ -COMPILER_RT_VISIBILITY uint32_t lprofBufferWriter(ProfDataIOVec *IOVecs, - uint32_t NumIOVecs, - void **WriterCtx) { +COMPILER_RT_VISIBILITY uint32_t lprofBufferWriter(ProfDataWriter *This, + ProfDataIOVec *IOVecs, + uint32_t NumIOVecs) { uint32_t I; - char **Buffer = (char **)WriterCtx; + char **Buffer = (char **)&This->WriterCtx; for (I = 0; I < NumIOVecs; I++) { size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm; - memcpy(*Buffer, IOVecs[I].Data, Length); + if (IOVecs[I].Data) + memcpy(*Buffer, IOVecs[I].Data, Length); *Buffer += Length; } return 0; } -static void llvmInitBufferIO(ProfBufferIO *BufferIO, WriterCallback FileWriter, - void *File, uint8_t *Buffer, uint32_t BufferSz) { - BufferIO->File = File; +static void llvmInitBufferIO(ProfBufferIO *BufferIO, ProfDataWriter *FileWriter, + uint8_t *Buffer, uint32_t BufferSz) { BufferIO->FileWriter = FileWriter; + BufferIO->OwnFileWriter = 0; BufferIO->BufferStart = Buffer; BufferIO->BufferSz = BufferSz; BufferIO->CurOffset = 0; } COMPILER_RT_VISIBILITY ProfBufferIO * -lprofCreateBufferIO(WriterCallback FileWriter, void *File) { +lprofCreateBufferIO(ProfDataWriter *FileWriter) { uint8_t *Buffer = DynamicBufferIOBuffer; uint32_t BufferSize = VPBufferSize; if (!Buffer) { Buffer = &BufferIOBuffer[0]; BufferSize = sizeof(BufferIOBuffer); } - llvmInitBufferIO(&TheBufferIO, FileWriter, File, Buffer, BufferSize); + llvmInitBufferIO(&TheBufferIO, FileWriter, Buffer, BufferSize); return &TheBufferIO; } COMPILER_RT_VISIBILITY void lprofDeleteBufferIO(ProfBufferIO *BufferIO) { + if (BufferIO->OwnFileWriter) + FreeHook(BufferIO->FileWriter); if (DynamicBufferIOBuffer) { FreeHook(DynamicBufferIOBuffer); DynamicBufferIOBuffer = 0; @@ -83,13 +86,16 @@ lprofBufferIOWrite(ProfBufferIO *BufferIO, const uint8_t *Data, uint32_t Size) { /* Special case, bypass the buffer completely. */ ProfDataIOVec IO[] = {{Data, sizeof(uint8_t), Size}}; if (Size > BufferIO->BufferSz) { - if (BufferIO->FileWriter(IO, 1, &BufferIO->File)) + if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1)) return -1; } else { /* Write the data to buffer */ uint8_t *Buffer = BufferIO->BufferStart + BufferIO->CurOffset; - lprofBufferWriter(IO, 1, (void **)&Buffer); - BufferIO->CurOffset = Buffer - BufferIO->BufferStart; + ProfDataWriter BufferWriter; + initBufferWriter(&BufferWriter, (char *)Buffer); + lprofBufferWriter(&BufferWriter, IO, 1); + BufferIO->CurOffset = + (uint8_t *)BufferWriter.WriterCtx - BufferIO->BufferStart; } return 0; } @@ -98,7 +104,7 @@ COMPILER_RT_VISIBILITY int lprofBufferIOFlush(ProfBufferIO *BufferIO) { if (BufferIO->CurOffset) { ProfDataIOVec IO[] = { {BufferIO->BufferStart, sizeof(uint8_t), BufferIO->CurOffset}}; - if (BufferIO->FileWriter(IO, 1, &BufferIO->File)) + if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1)) return -1; BufferIO->CurOffset = 0; } @@ -201,7 +207,7 @@ static int writeOneValueProfData(ProfBufferIO *BufferIO, return 0; } -static int writeValueProfData(WriterCallback Writer, void *WriterCtx, +static int writeValueProfData(ProfDataWriter *Writer, VPDataReaderType *VPDataReader, const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd) { @@ -211,7 +217,7 @@ static int writeValueProfData(WriterCallback Writer, void *WriterCtx, if (!VPDataReader) return 0; - BufferIO = lprofCreateBufferIO(Writer, WriterCtx); + BufferIO = lprofCreateBufferIO(Writer); for (DI = DataBegin; DI < DataEnd; DI++) { if (writeOneValueProfData(BufferIO, VPDataReader, DI)) @@ -225,9 +231,9 @@ static int writeValueProfData(WriterCallback Writer, void *WriterCtx, return 0; } -COMPILER_RT_VISIBILITY int lprofWriteData(WriterCallback Writer, - void *WriterCtx, - VPDataReaderType *VPDataReader) { +COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer, + VPDataReaderType *VPDataReader, + int SkipNameDataWrite) { /* Match logic in __llvm_profile_write_buffer(). */ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data(); const __llvm_profile_data *DataEnd = __llvm_profile_end_data(); @@ -235,18 +241,17 @@ COMPILER_RT_VISIBILITY int lprofWriteData(WriterCallback Writer, const uint64_t *CountersEnd = __llvm_profile_end_counters(); const char *NamesBegin = __llvm_profile_begin_names(); const char *NamesEnd = __llvm_profile_end_names(); - return lprofWriteDataImpl(Writer, WriterCtx, DataBegin, DataEnd, - CountersBegin, CountersEnd, VPDataReader, - NamesBegin, NamesEnd); + return lprofWriteDataImpl(Writer, DataBegin, DataEnd, CountersBegin, + CountersEnd, VPDataReader, NamesBegin, NamesEnd, + SkipNameDataWrite); } COMPILER_RT_VISIBILITY int -lprofWriteDataImpl(WriterCallback Writer, void *WriterCtx, - const __llvm_profile_data *DataBegin, +lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin, const uint64_t *CountersEnd, VPDataReaderType *VPDataReader, const char *NamesBegin, - const char *NamesEnd) { + const char *NamesEnd, int SkipNameDataWrite) { /* Calculate size of sections. */ const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd); @@ -268,14 +273,14 @@ lprofWriteDataImpl(WriterCallback Writer, void *WriterCtx, #include "InstrProfData.inc" /* Write the data. */ - ProfDataIOVec IOVec[] = {{&Header, sizeof(__llvm_profile_header), 1}, - {DataBegin, sizeof(__llvm_profile_data), DataSize}, - {CountersBegin, sizeof(uint64_t), CountersSize}, - {NamesBegin, sizeof(uint8_t), NamesSize}, - {Zeroes, sizeof(uint8_t), Padding}}; - if (Writer(IOVec, sizeof(IOVec) / sizeof(*IOVec), &WriterCtx)) + ProfDataIOVec IOVec[] = { + {&Header, sizeof(__llvm_profile_header), 1}, + {DataBegin, sizeof(__llvm_profile_data), DataSize}, + {CountersBegin, sizeof(uint64_t), CountersSize}, + {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize}, + {Zeroes, sizeof(uint8_t), Padding}}; + if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec))) return -1; - return writeValueProfData(Writer, WriterCtx, VPDataReader, DataBegin, - DataEnd); + return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd); } diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index db3ebb0336a9f..2f8f6e3f9aa72 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -160,7 +160,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { - if (CallocShouldReturnNullDueToOverflow(count, size)) + if (CheckForCallocOverflow(count, size)) return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); @@ -202,7 +202,7 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } -bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { +bool CheckForCallocOverflow(uptr size, uptr n) { if (!size) return false; uptr max = (uptr)-1L; return (max / size) < n; @@ -246,11 +246,11 @@ void *ReturnNullOrDieOnFailure::OnOOM() { ReportAllocatorCannotReturnNull(); } -void *DieOnFailure::OnBadRequest() { +void NORETURN *DieOnFailure::OnBadRequest() { ReportAllocatorCannotReturnNull(); } -void *DieOnFailure::OnOOM() { +void NORETURN *DieOnFailure::OnOOM() { atomic_store_relaxed(&allocator_out_of_memory, 1); ReportAllocatorCannotReturnNull(); } diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index f59c13d1c5a56..0fb8a087ed6be 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -39,8 +39,8 @@ struct ReturnNullOrDieOnFailure { }; // Always dies on the failure. struct DieOnFailure { - static void *OnBadRequest(); - static void *OnOOM(); + static void NORETURN *OnBadRequest(); + static void NORETURN *OnOOM(); }; // Returns true if allocator detected OOM condition. Can be used to avoid memory @@ -56,8 +56,10 @@ struct NoOpMapUnmapCallback { // Callback type for iterating over chunks. typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); -// Returns true if calloc(size, n) should return 0 due to overflow in size*n. -bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); +// Returns true if calloc(size, n) call overflows on size*n calculation. +// The caller should "return POLICY::OnBadRequest();" where POLICY is the +// current allocator failure handling policy. +bool CheckForCallocOverflow(uptr size, uptr n); #include "sanitizer_allocator_size_class_map.h" #include "sanitizer_allocator_stats.h" diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h index 8fa62a3bf8292..ec0742c20be2e 100644 --- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -46,8 +46,10 @@ struct SizeClassAllocator64LocalCache { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; - if (UNLIKELY(c->count == 0)) - Refill(c, allocator, class_id); + if (UNLIKELY(c->count == 0)) { + if (UNLIKELY(!Refill(c, allocator, class_id))) + return nullptr; + } stats_.Add(AllocatorStatAllocated, c->class_size); CHECK_GT(c->count, 0); CompactPtrT chunk = c->chunks[--c->count]; @@ -101,13 +103,15 @@ struct SizeClassAllocator64LocalCache { } } - NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator, + NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, uptr class_id) { InitCache(); uptr num_requested_chunks = c->max_count / 2; - allocator->GetFromAllocator(&stats_, class_id, c->chunks, - num_requested_chunks); + if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, + num_requested_chunks))) + return false; c->count = num_requested_chunks; + return true; } NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h index d3949cc057345..e85821543d271 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -118,7 +118,6 @@ class SizeClassAllocator32 { } void *MapWithCallback(uptr size) { - size = RoundUpTo(size, GetPageSizeCached()); void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); return res; @@ -285,7 +284,7 @@ class SizeClassAllocator32 { return 0; MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); - CHECK_EQ(0U, (res & (kRegionSize - 1))); + CHECK(IsAligned(res, kRegionSize)); possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); return res; } @@ -303,17 +302,17 @@ class SizeClassAllocator32 { return false; uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); + CHECK_GT(max_count, 0); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); - if (!b) + if (UNLIKELY(!b)) return false; b->Clear(); } b->Add((void*)i); if (b->Count() == max_count) { - CHECK_GT(b->Count(), 0); sci->free_list.push_back(b); b = nullptr; } diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h index 035d92b98cfca..0c2e72ce7eb18 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -80,7 +80,7 @@ class SizeClassAllocator64 { CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } SetReleaseToOSIntervalMs(release_to_os_interval_ms); - MapWithCallback(SpaceEnd(), AdditionalSize()); + MapWithCallbackOrDie(SpaceEnd(), AdditionalSize()); } s32 ReleaseToOSIntervalMs() const { @@ -92,16 +92,6 @@ class SizeClassAllocator64 { memory_order_relaxed); } - void MapWithCallback(uptr beg, uptr size) { - CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); - MapUnmapCallback().OnMap(beg, size); - } - - void UnmapWithCallback(uptr beg, uptr size) { - MapUnmapCallback().OnUnmap(beg, size); - UnmapOrDie(reinterpret_cast<void *>(beg), size); - } - static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; @@ -116,16 +106,20 @@ class SizeClassAllocator64 { BlockingMutexLock l(®ion->mutex); uptr old_num_chunks = region->num_freed_chunks; uptr new_num_freed_chunks = old_num_chunks + n_chunks; - EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks); + // Failure to allocate free array space while releasing memory is non + // recoverable. + if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, + new_num_freed_chunks))) + DieOnFailure::OnOOM(); for (uptr i = 0; i < n_chunks; i++) free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; - region->n_freed += n_chunks; + region->stats.n_freed += n_chunks; MaybeReleaseToOS(class_id); } - NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id, + NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, CompactPtrT *chunks, uptr n_chunks) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); @@ -133,18 +127,19 @@ class SizeClassAllocator64 { BlockingMutexLock l(®ion->mutex); if (UNLIKELY(region->num_freed_chunks < n_chunks)) { - PopulateFreeArray(stat, class_id, region, - n_chunks - region->num_freed_chunks); + if (UNLIKELY(!PopulateFreeArray(stat, class_id, region, + n_chunks - region->num_freed_chunks))) + return false; CHECK_GE(region->num_freed_chunks, n_chunks); } region->num_freed_chunks -= n_chunks; uptr base_idx = region->num_freed_chunks; for (uptr i = 0; i < n_chunks; i++) chunks[i] = free_array[base_idx + i]; - region->n_allocated += n_chunks; + region->stats.n_allocated += n_chunks; + return true; } - bool PointerIsMine(const void *p) { uptr P = reinterpret_cast<uptr>(p); if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) @@ -211,7 +206,7 @@ class SizeClassAllocator64 { // Test-only. void TestOnlyUnmap() { - UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize()); + UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize()); } static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats, @@ -224,15 +219,15 @@ class SizeClassAllocator64 { void PrintStats(uptr class_id, uptr rss) { RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user == 0) return; - uptr in_use = region->n_allocated - region->n_freed; + uptr in_use = region->stats.n_allocated - region->stats.n_freed; uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); Printf( - " %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " + "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd\n", - class_id, ClassIdToSize(class_id), region->mapped_user >> 10, - region->n_allocated, region->n_freed, in_use, - region->num_freed_chunks, avail_chunks, rss >> 10, - region->rtoi.num_releases); + region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), + region->mapped_user >> 10, region->stats.n_allocated, + region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, + rss >> 10, region->rtoi.num_releases); } void PrintStats() { @@ -242,8 +237,8 @@ class SizeClassAllocator64 { for (uptr class_id = 1; class_id < kNumClasses; class_id++) { RegionInfo *region = GetRegionInfo(class_id); total_mapped += region->mapped_user; - n_allocated += region->n_allocated; - n_freed += region->n_freed; + n_allocated += region->stats.n_allocated; + n_freed += region->stats.n_freed; } Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; " "remains %zd\n", @@ -326,6 +321,11 @@ class SizeClassAllocator64 { atomic_sint32_t release_to_os_interval_ms_; + struct Stats { + uptr n_allocated; + uptr n_freed; + }; + struct ReleaseToOsInfo { uptr n_freed_at_last_release; uptr num_releases; @@ -340,8 +340,9 @@ class SizeClassAllocator64 { uptr allocated_meta; // Bytes allocated for metadata. uptr mapped_user; // Bytes mapped for user memory. uptr mapped_meta; // Bytes mapped for metadata. - u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. - uptr n_allocated, n_freed; // Just stats. + u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. + bool exhausted; // Whether region is out of space for new chunks. + Stats stats; ReleaseToOsInfo rtoi; }; COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); @@ -386,7 +387,26 @@ class SizeClassAllocator64 { kFreeArraySize); } - void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, + bool MapWithCallback(uptr beg, uptr size) { + uptr mapped = reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(beg, size)); + if (UNLIKELY(!mapped)) + return false; + CHECK_EQ(beg, mapped); + MapUnmapCallback().OnMap(beg, size); + return true; + } + + void MapWithCallbackOrDie(uptr beg, uptr size) { + CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); + MapUnmapCallback().OnMap(beg, size); + } + + void UnmapWithCallbackOrDie(uptr beg, uptr size) { + MapUnmapCallback().OnUnmap(beg, size); + UnmapOrDie(reinterpret_cast<void *>(beg), size); + } + + bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, uptr num_freed_chunks) { uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); if (region->mapped_free_array < needed_space) { @@ -395,66 +415,87 @@ class SizeClassAllocator64 { uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + region->mapped_free_array; uptr new_map_size = new_mapped_free_array - region->mapped_free_array; - MapWithCallback(current_map_end, new_map_size); + if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size))) + return false; region->mapped_free_array = new_mapped_free_array; } + return true; } - - NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id, + NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, RegionInfo *region, uptr requested_count) { // region->mutex is held. - uptr size = ClassIdToSize(class_id); - uptr beg_idx = region->allocated_user; - uptr end_idx = beg_idx + requested_count * size; - uptr region_beg = GetRegionBeginBySizeClass(class_id); - if (end_idx > region->mapped_user) { + const uptr size = ClassIdToSize(class_id); + const uptr new_space_beg = region->allocated_user; + const uptr new_space_end = new_space_beg + requested_count * size; + const uptr region_beg = GetRegionBeginBySizeClass(class_id); + + // Map more space for chunks, if necessary. + if (new_space_end > region->mapped_user) { if (!kUsingConstantSpaceBeg && region->mapped_user == 0) region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR. // Do the mmap for the user memory. uptr map_size = kUserMapSize; - while (end_idx > region->mapped_user + map_size) + while (new_space_end > region->mapped_user + map_size) map_size += kUserMapSize; - CHECK_GE(region->mapped_user + map_size, end_idx); - MapWithCallback(region_beg + region->mapped_user, map_size); + CHECK_GE(region->mapped_user + map_size, new_space_end); + if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user, + map_size))) + return false; stat->Add(AllocatorStatMapped, map_size); region->mapped_user += map_size; } - CompactPtrT *free_array = GetFreeArray(region_beg); - uptr total_count = (region->mapped_user - beg_idx) / size; - uptr num_freed_chunks = region->num_freed_chunks; - EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count); - for (uptr i = 0; i < total_count; i++) { - uptr chunk = beg_idx + i * size; - free_array[num_freed_chunks + total_count - 1 - i] = - PointerToCompactPtr(0, chunk); + const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size; + + // Calculate the required space for metadata. + const uptr requested_allocated_meta = + region->allocated_meta + new_chunks_count * kMetadataSize; + uptr requested_mapped_meta = region->mapped_meta; + while (requested_allocated_meta > requested_mapped_meta) + requested_mapped_meta += kMetaMapSize; + // Check whether this size class is exhausted. + if (region->mapped_user + requested_mapped_meta > + kRegionSize - kFreeArraySize) { + if (!region->exhausted) { + region->exhausted = true; + Printf("%s: Out of memory. ", SanitizerToolName); + Printf("The process has exhausted %zuMB for size class %zu.\n", + kRegionSize >> 20, size); + } + return false; + } + // Map more space for metadata, if necessary. + if (requested_mapped_meta > region->mapped_meta) { + if (UNLIKELY(!MapWithCallback( + GetMetadataEnd(region_beg) - requested_mapped_meta, + requested_mapped_meta - region->mapped_meta))) + return false; + region->mapped_meta = requested_mapped_meta; } + + // If necessary, allocate more space for the free array and populate it with + // newly allocated chunks. + const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; + if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))) + return false; + CompactPtrT *free_array = GetFreeArray(region_beg); + for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count; + i++, chunk += size) + free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); if (kRandomShuffleChunks) - RandomShuffle(&free_array[num_freed_chunks], total_count, + RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, ®ion->rand_state); - region->num_freed_chunks += total_count; - region->allocated_user += total_count * size; - CHECK_LE(region->allocated_user, region->mapped_user); - region->allocated_meta += total_count * kMetadataSize; - if (region->allocated_meta > region->mapped_meta) { - uptr map_size = kMetaMapSize; - while (region->allocated_meta > region->mapped_meta + map_size) - map_size += kMetaMapSize; - // Do the mmap for the metadata. - CHECK_GE(region->mapped_meta + map_size, region->allocated_meta); - MapWithCallback(GetMetadataEnd(region_beg) - - region->mapped_meta - map_size, map_size); - region->mapped_meta += map_size; - } + // All necessary memory is mapped and now it is safe to advance all + // 'allocated_*' counters. + region->num_freed_chunks += new_chunks_count; + region->allocated_user += new_chunks_count * size; + CHECK_LE(region->allocated_user, region->mapped_user); + region->allocated_meta = requested_allocated_meta; CHECK_LE(region->allocated_meta, region->mapped_meta); - if (region->mapped_user + region->mapped_meta > - kRegionSize - kFreeArraySize) { - Printf("%s: Out of memory. Dying. ", SanitizerToolName); - Printf("The process has exhausted %zuMB for size class %zu.\n", - kRegionSize / 1024 / 1024, size); - Die(); - } + region->exhausted = false; + + return true; } void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size, @@ -478,8 +519,8 @@ class SizeClassAllocator64 { uptr n = region->num_freed_chunks; if (n * chunk_size < page_size) return; // No chance to release anything. - if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size < - page_size) { + if ((region->stats.n_freed - + region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { return; // Nothing new to release. } @@ -508,7 +549,7 @@ class SizeClassAllocator64 { CHECK_GT(chunk - prev, scaled_chunk_size); if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) { MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev); - region->rtoi.n_freed_at_last_release = region->n_freed; + region->rtoi.n_freed_at_last_release = region->stats.n_freed; region->rtoi.num_releases++; } range_beg = chunk; @@ -517,5 +558,3 @@ class SizeClassAllocator64 { } } }; - - diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h index 560c53b6400e5..d44c715138968 100644 --- a/lib/sanitizer_common/sanitizer_common.h +++ b/lib/sanitizer_common/sanitizer_common.h @@ -92,6 +92,9 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); +// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in +// that case returns nullptr. +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size); void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc index 6ca431d8ad822..459530aa95bac 100644 --- a/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -224,16 +224,16 @@ bool PlatformHasDifferentMemcpyAndMemmove(); #endif #ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL -#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \ - COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \ - uptr copy_length = internal_strnlen(s, size); \ - char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \ - if (common_flags()->intercept_strndup) { \ - COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \ - } \ - COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \ - internal_memcpy(new_mem, s, copy_length); \ - new_mem[copy_length] = '\0'; \ +#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \ + COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \ + uptr copy_length = internal_strnlen(s, size); \ + char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \ + if (common_flags()->intercept_strndup) { \ + COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \ + } \ + COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \ + internal_memcpy(new_mem, s, copy_length); \ + new_mem[copy_length] = '\0'; \ return new_mem; #endif @@ -6199,6 +6199,57 @@ INTERCEPTOR(int, mprobe, void *ptr) { } #endif +INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s); + SIZE_T res = REAL(wcslen)(s); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1)); + return res; +} + +INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n); + SIZE_T res = REAL(wcsnlen)(s, n); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n)); + return res; +} +#define INIT_WCSLEN \ + COMMON_INTERCEPT_FUNCTION(wcslen); \ + COMMON_INTERCEPT_FUNCTION(wcsnlen); + +#if SANITIZER_INTERCEPT_WCSCAT +INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src); + SIZE_T src_size = REAL(wcslen)(src); + SIZE_T dst_size = REAL(wcslen)(dst); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, + (src_size + 1) * sizeof(wchar_t)); + return REAL(wcscat)(dst, src); // NOLINT +} + +INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n); + SIZE_T src_size = REAL(wcsnlen)(src, n); + SIZE_T dst_size = REAL(wcslen)(dst); + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, + Min(src_size + 1, n) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size, + (src_size + 1) * sizeof(wchar_t)); + return REAL(wcsncat)(dst, src, n); // NOLINT +} +#define INIT_WCSCAT \ + COMMON_INTERCEPT_FUNCTION(wcscat); \ + COMMON_INTERCEPT_FUNCTION(wcsncat); +#else +#define INIT_WCSCAT +#endif + static void InitializeCommonInterceptors() { static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1]; interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap(); @@ -6403,4 +6454,6 @@ static void InitializeCommonInterceptors() { INIT_UTMP; INIT_UTMPX; INIT_GETLOADAVG; + INIT_WCSLEN; + INIT_WCSCAT; } diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h index a95497467d61b..1bc43e817230b 100644 --- a/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -354,5 +354,6 @@ #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_WCSCAT SI_NOT_WINDOWS #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc index 87c5b9add5cf7..63f1bf713b245 100644 --- a/lib/sanitizer_common/sanitizer_posix.cc +++ b/lib/sanitizer_common/sanitizer_posix.cc @@ -129,7 +129,7 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; - if (internal_iserror(res, &reserrno)) + if (UNLIKELY(internal_iserror(res, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report); IncreaseTotalMmap(size); return (void *)res; @@ -138,7 +138,7 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { void UnmapOrDie(void *addr, uptr size) { if (!addr || !size) return; uptr res = internal_munmap(addr, size); - if (internal_iserror(res)) { + if (UNLIKELY(internal_iserror(res))) { Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", SanitizerToolName, size, size, addr); CHECK("unable to unmap" && 0); @@ -152,7 +152,7 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; - if (internal_iserror(res, &reserrno)) { + if (UNLIKELY(internal_iserror(res, &reserrno))) { if (reserrno == ENOMEM) return nullptr; ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); @@ -170,15 +170,15 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type); - if (!map_res) + if (UNLIKELY(!map_res)) return nullptr; uptr map_end = map_res + map_size; uptr res = map_res; - if (res & (alignment - 1)) // Not aligned. - res = (map_res + alignment) & ~(alignment - 1); - uptr end = res + size; - if (res != map_res) + if (!IsAligned(res, alignment)) { + res = (map_res + alignment - 1) & ~(alignment - 1); UnmapOrDie((void*)map_res, res - map_res); + } + uptr end = res + size; if (end != map_end) UnmapOrDie((void*)end, map_end - end); return (void*)res; @@ -192,13 +192,13 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) { MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, -1, 0); int reserrno; - if (internal_iserror(p, &reserrno)) + if (UNLIKELY(internal_iserror(p, &reserrno))) ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno); IncreaseTotalMmap(size); return (void *)p; } -void *MmapFixedOrDie(uptr fixed_addr, uptr size) { +void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), RoundUpTo(size, PageSize), @@ -206,8 +206,10 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0); int reserrno; - if (internal_iserror(p, &reserrno)) { - char mem_type[30]; + if (UNLIKELY(internal_iserror(p, &reserrno))) { + if (tolerate_enomem && reserrno == ENOMEM) + return nullptr; + char mem_type[40]; internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", fixed_addr); ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno); @@ -216,6 +218,14 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return (void *)p; } +void *MmapFixedOrDie(uptr fixed_addr, uptr size) { + return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/); +} + +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { + return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/); +} + bool MprotectNoAccess(uptr addr, uptr size) { return 0 == internal_mprotect((void*)addr, size, PROT_NONE); } diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc index c6a146553412c..89d9cf61c3e49 100644 --- a/lib/sanitizer_common/sanitizer_win.cc +++ b/lib/sanitizer_common/sanitizer_win.cc @@ -235,6 +235,18 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return p; } +void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { + void *p = VirtualAlloc((LPVOID)fixed_addr, size, + MEM_COMMIT, PAGE_READWRITE); + if (p == 0) { + char mem_type[30]; + internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", + fixed_addr); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); + } + return p; +} + void *MmapNoReserveOrDie(uptr size, const char *mem_type) { // FIXME: make this really NoReserve? return MmapOrDie(size, mem_type); diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index f256d8776d80d..0def8ee0fd70d 100644 --- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -436,30 +436,31 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); } -template<class Allocator> -void FailInAssertionOnOOM() { - Allocator a; +// Don't test OOM conditions on Win64 because it causes other tests on the same +// machine to OOM. +#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64Overflow) { + Allocator64 a; a.Init(kReleaseToOSIntervalNever); - SizeClassAllocatorLocalCache<Allocator> cache; + SizeClassAllocatorLocalCache<Allocator64> cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); AllocatorStats stats; stats.Init(); + const size_t kNumChunks = 128; uint32_t chunks[kNumChunks]; + bool allocation_failed = false; for (int i = 0; i < 1000000; i++) { - a.GetFromAllocator(&stats, 52, chunks, kNumChunks); + if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) { + allocation_failed = true; + break; + } } + EXPECT_EQ(allocation_failed, true); a.TestOnlyUnmap(); } - -// Don't test OOM conditions on Win64 because it causes other tests on the same -// machine to OOM. -#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID -TEST(SanitizerCommon, SizeClassAllocator64Overflow) { - EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory"); -} #endif TEST(SanitizerCommon, LargeMmapAllocator) { @@ -970,9 +971,9 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID); ASSERT_LT(2 * kAllocationSize, kRegionSize); ASSERT_GT(3 * kAllocationSize, kRegionSize); - cache.Allocate(a, kClassID); - EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID), - "The process has exhausted"); + EXPECT_NE(cache.Allocate(a, kClassID), nullptr); + EXPECT_NE(cache.Allocate(a, kClassID), nullptr); + EXPECT_EQ(cache.Allocate(a, kClassID), nullptr); const uptr Class2 = 100; const uptr Size2 = SpecialSizeClassMap::Size(Class2); @@ -980,11 +981,12 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { char *p[7]; for (int i = 0; i < 7; i++) { p[i] = (char*)cache.Allocate(a, Class2); + EXPECT_NE(p[i], nullptr); fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2); p[i][Size2 - 1] = 42; if (i) ASSERT_LT(p[i - 1], p[i]); } - EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted"); + EXPECT_EQ(cache.Allocate(a, Class2), nullptr); cache.Deallocate(a, Class2, p[0]); cache.Drain(a); ASSERT_EQ(p[6][Size2 - 1], 42); diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp index 1d0db84a5aaf4..00fa192181ade 100644 --- a/lib/scudo/scudo_allocator.cpp +++ b/lib/scudo/scudo_allocator.cpp @@ -22,8 +22,7 @@ #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_quarantine.h" -#include <limits.h> -#include <pthread.h> +#include <errno.h> #include <string.h> namespace __scudo { @@ -341,7 +340,7 @@ struct ScudoAllocator { // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { initThreadMaybe(); - if (!UserPtr) + if (UNLIKELY(!UserPtr)) return false; uptr UserBeg = reinterpret_cast<uptr>(UserPtr); if (!IsAligned(UserBeg, MinAlignment)) @@ -353,22 +352,19 @@ struct ScudoAllocator { void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { initThreadMaybe(); - if (UNLIKELY(!IsPowerOfTwo(Alignment))) { - dieWithMessage("ERROR: alignment is not a power of 2\n"); - } - if (Alignment > MaxAlignment) + if (UNLIKELY(Alignment > MaxAlignment)) return FailureHandler::OnBadRequest(); - if (Alignment < MinAlignment) + if (UNLIKELY(Alignment < MinAlignment)) Alignment = MinAlignment; - if (Size >= MaxAllowedMallocSize) + if (UNLIKELY(Size >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); - if (Size == 0) + if (UNLIKELY(Size == 0)) Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; uptr AlignedSize = (Alignment > MinAlignment) ? NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; - if (AlignedSize >= MaxAllowedMallocSize) + if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); // Primary and Secondary backed allocations have a different treatment. We @@ -393,7 +389,7 @@ struct ScudoAllocator { Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, AllocationAlignment, FromPrimary); } - if (!Ptr) + if (UNLIKELY(!Ptr)) return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. @@ -404,7 +400,7 @@ struct ScudoAllocator { UnpackedHeader Header = {}; uptr AllocBeg = reinterpret_cast<uptr>(Ptr); uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; - if (!IsAligned(UserBeg, Alignment)) { + if (UNLIKELY(!IsAligned(UserBeg, Alignment))) { // Since the Secondary takes care of alignment, a non-aligned pointer // means it is from the Primary. It is also the only case where the offset // field of the header would be non-zero. @@ -481,7 +477,7 @@ struct ScudoAllocator { void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); - if (!UserPtr) + if (UNLIKELY(!UserPtr)) return; uptr UserBeg = reinterpret_cast<uptr>(UserPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { @@ -568,7 +564,7 @@ struct ScudoAllocator { // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { initThreadMaybe(); - if (!Ptr) + if (UNLIKELY(!Ptr)) return 0; uptr UserBeg = reinterpret_cast<uptr>(Ptr); ScudoChunk *Chunk = getScudoChunk(UserBeg); @@ -584,10 +580,9 @@ struct ScudoAllocator { void *calloc(uptr NMemB, uptr Size) { initThreadMaybe(); - uptr Total = NMemB * Size; - if (Size != 0 && Total / Size != NMemB) // Overflow check + if (CheckForCallocOverflow(NMemB, Size)) return FailureHandler::OnBadRequest(); - return allocate(Total, MinAlignment, FromMalloc, true); + return allocate(NMemB * Size, MinAlignment, FromMalloc, true); } void commitBack(ScudoThreadContext *ThreadContext) { @@ -655,10 +650,6 @@ void *scudoValloc(uptr Size) { return Instance.allocate(Size, GetPageSizeCached(), FromMemalign); } -void *scudoMemalign(uptr Alignment, uptr Size) { - return Instance.allocate(Size, Alignment, FromMemalign); -} - void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); Size = RoundUpTo(Size, PageSize); @@ -669,16 +660,27 @@ void *scudoPvalloc(uptr Size) { return Instance.allocate(Size, PageSize, FromMemalign); } +void *scudoMemalign(uptr Alignment, uptr Size) { + if (UNLIKELY(!IsPowerOfTwo(Alignment))) + return ScudoAllocator::FailureHandler::OnBadRequest(); + return Instance.allocate(Size, Alignment, FromMemalign); +} + int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { + if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) { + *MemPtr = ScudoAllocator::FailureHandler::OnBadRequest(); + return EINVAL; + } *MemPtr = Instance.allocate(Size, Alignment, FromMemalign); + if (!*MemPtr) + return ENOMEM; return 0; } void *scudoAlignedAlloc(uptr Alignment, uptr Size) { - // size must be a multiple of the alignment. To avoid a division, we first - // make sure that alignment is a power of 2. - CHECK(IsPowerOfTwo(Alignment)); - CHECK_EQ((Size & (Alignment - 1)), 0); + // Alignment must be a power of 2, Size must be a multiple of Alignment. + if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0)) + return ScudoAllocator::FailureHandler::OnBadRequest(); return Instance.allocate(Size, Alignment, FromMalloc); } diff --git a/lib/scudo/scudo_allocator.h b/lib/scudo/scudo_allocator.h index 523808750eec2..29d85995a3eea 100644 --- a/lib/scudo/scudo_allocator.h +++ b/lib/scudo/scudo_allocator.h @@ -116,6 +116,11 @@ struct AP32 { typedef SizeClassAllocator32<AP32> PrimaryAllocator; #endif // SANITIZER_CAN_USE_ALLOCATOR64 +// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own. +INLINE uptr RoundUpTo(uptr Size, uptr Boundary) { + return (Size + Boundary - 1) & ~(Boundary - 1); +} + #include "scudo_allocator_secondary.h" #include "scudo_allocator_combined.h" diff --git a/lib/scudo/scudo_allocator_combined.h b/lib/scudo/scudo_allocator_combined.h index 21c45897b94ed..8182728688801 100644 --- a/lib/scudo/scudo_allocator_combined.h +++ b/lib/scudo/scudo_allocator_combined.h @@ -45,7 +45,7 @@ class ScudoCombinedAllocator { uptr GetActuallyAllocatedSize(void *Ptr, bool FromPrimary) { if (FromPrimary) - return Primary.GetActuallyAllocatedSize(Ptr); + return PrimaryAllocator::ClassIdToSize(Primary.GetSizeClass(Ptr)); return Secondary.GetActuallyAllocatedSize(Ptr); } diff --git a/lib/scudo/scudo_new_delete.cpp b/lib/scudo/scudo_new_delete.cpp index c022bd0acbe75..cdefb127b9651 100644 --- a/lib/scudo/scudo_new_delete.cpp +++ b/lib/scudo/scudo_new_delete.cpp @@ -26,13 +26,18 @@ namespace std { struct nothrow_t {}; } // namespace std +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size) { - return scudoMalloc(size, FromNew); + void *res = scudoMalloc(size, FromNew); + if (UNLIKELY(!res)) DieOnFailure::OnOOM(); + return res; } CXX_OPERATOR_ATTRIBUTE void *operator new[](size_t size) { - return scudoMalloc(size, FromNewArray); + void *res = scudoMalloc(size, FromNewArray); + if (UNLIKELY(!res)) DieOnFailure::OnOOM(); + return res; } CXX_OPERATOR_ATTRIBUTE void *operator new(size_t size, std::nothrow_t const&) { diff --git a/lib/scudo/scudo_tls_android.cpp b/lib/scudo/scudo_tls_android.cpp index 0e3602b2faf01..ec74e37c8dbc0 100644 --- a/lib/scudo/scudo_tls_android.cpp +++ b/lib/scudo/scudo_tls_android.cpp @@ -45,7 +45,7 @@ static void initOnce() { NumberOfContexts = getNumberOfCPUs(); ThreadContexts = reinterpret_cast<ScudoThreadContext *>( MmapOrDie(sizeof(ScudoThreadContext) * NumberOfContexts, __func__)); - for (int i = 0; i < NumberOfContexts; i++) + for (uptr i = 0; i < NumberOfContexts; i++) ThreadContexts[i].init(); } diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index fa0d0cafe9b63..7169d5b02c044 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -162,7 +162,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { } void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { - if (CallocShouldReturnNullDueToOverflow(size, n)) + if (CheckForCallocOverflow(size, n)) return Allocator::FailureHandler::OnBadRequest(); void *p = user_alloc(thr, pc, n * size); if (p) diff --git a/lib/tsan/rtl/tsan_new_delete.cc b/lib/tsan/rtl/tsan_new_delete.cc index b6478bb08c572..4d03145c16ad5 100644 --- a/lib/tsan/rtl/tsan_new_delete.cc +++ b/lib/tsan/rtl/tsan_new_delete.cc @@ -12,6 +12,7 @@ // Interceptors for operators new and delete. //===----------------------------------------------------------------------===// #include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "tsan_interceptors.h" @@ -24,13 +25,15 @@ struct nothrow_t {}; DECLARE_REAL(void *, malloc, uptr size) DECLARE_REAL(void, free, void *ptr) -#define OPERATOR_NEW_BODY(mangled_name) \ +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(mangled_name, nothrow) \ if (cur_thread()->in_symbolizer) \ return InternalAlloc(size); \ void *p = 0; \ { \ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ p = user_alloc(thr, pc, size); \ + if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \ } \ invoke_malloc_hook(p, size); \ return p; @@ -38,25 +41,25 @@ DECLARE_REAL(void, free, void *ptr) SANITIZER_INTERFACE_ATTRIBUTE void *operator new(__sanitizer::uptr size); void *operator new(__sanitizer::uptr size) { - OPERATOR_NEW_BODY(_Znwm); + OPERATOR_NEW_BODY(_Znwm, false /*nothrow*/); } SANITIZER_INTERFACE_ATTRIBUTE void *operator new[](__sanitizer::uptr size); void *operator new[](__sanitizer::uptr size) { - OPERATOR_NEW_BODY(_Znam); + OPERATOR_NEW_BODY(_Znam, false /*nothrow*/); } SANITIZER_INTERFACE_ATTRIBUTE void *operator new(__sanitizer::uptr size, std::nothrow_t const&); void *operator new(__sanitizer::uptr size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t); + OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t, true /*nothrow*/); } SANITIZER_INTERFACE_ATTRIBUTE void *operator new[](__sanitizer::uptr size, std::nothrow_t const&); void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t); + OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t, true /*nothrow*/); } #define OPERATOR_DELETE_BODY(mangled_name) \ diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt index 87e14174ad1f0..ca43a928d1b8a 100644 --- a/lib/tsan/tests/CMakeLists.txt +++ b/lib/tsan/tests/CMakeLists.txt @@ -8,6 +8,7 @@ set(TSAN_UNITTEST_CFLAGS ${TSAN_CFLAGS} ${COMPILER_RT_UNITTEST_CFLAGS} ${COMPILER_RT_GTEST_CFLAGS} + -I${COMPILER_RT_SOURCE_DIR}/include -I${COMPILER_RT_SOURCE_DIR}/lib -I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl -DGTEST_HAS_RTTI=0) diff --git a/lib/xray/xray_always_instrument.txt b/lib/xray/xray_always_instrument.txt new file mode 100644 index 0000000000000..151ed703dd567 --- /dev/null +++ b/lib/xray/xray_always_instrument.txt @@ -0,0 +1,6 @@ +# List of function matchers common to C/C++ applications that make sense to +# always instrument. You can use this as an argument to +# -fxray-always-instrument=<path> along with your project-specific lists. + +# Always instrument the main function. +fun:main diff --git a/lib/xray/xray_never_instrument.txt b/lib/xray/xray_never_instrument.txt new file mode 100644 index 0000000000000..7fa48dda7e168 --- /dev/null +++ b/lib/xray/xray_never_instrument.txt @@ -0,0 +1,6 @@ +# List of function matchers common to C/C++ applications that make sense to +# never instrument. You can use this as an argument to +# -fxray-never-instrument=<path> along with your project-specific lists. + +# Never instrument any function whose symbol starts with __xray. +fun:__xray* |